예제 #1
0
파일: views.py 프로젝트: mwiencek/mbspotify
def mapping():
    """Endpoint for getting mappings for a MusicBrainz entity.

    JSON parameters:
        mbid: MBID of the entity that you need to find a mapping for.

    Returns:
        List with mappings to a specified MBID.
    """
    mbid = request.json["mbid"]
    if not validate_uuid(mbid):
        raise BadRequest("Incorrect MBID (UUID).")

    conn = psycopg2.connect(current_app.config['PG_CONNECT'])
    cur = conn.cursor()

    cur.execute("SELECT spotify_uri "
                "FROM mapping "
                "WHERE is_deleted = FALSE AND mbid = %s",
                (mbid,))

    response = Response(
        json.dumps({
            "mbid": mbid,
            "mappings": [row[0] for row in cur.fetchall()],
        }),
        mimetype="application/json"
    )
    response.headers["Access-Control-Allow-Origin"] = "*"
    return response
예제 #2
0
def drip():
    """Drips data over a duration after an optional initial delay."""
    args = CaseInsensitiveDict(request.args.items())
    duration = float(args.get('duration', 2))
    numbytes = int(args.get('numbytes', 10))
    code = int(args.get('code', 200))
    pause = duration / numbytes

    delay = float(args.get('delay', 0))
    if delay > 0:
        time.sleep(delay)

    def generate_bytes():
        for i in xrange(numbytes):
            yield u"*".encode('utf-8')
            time.sleep(pause)

    response = Response(generate_bytes(), headers={
        "Content-Type": "application/octet-stream",
        "Content-Length": str(numbytes),
    })

    response.status_code = code

    return response
예제 #3
0
파일: io.py 프로젝트: HengeSense/webed
def archive_download (chunk_size=256 * 1024):

    node_uuid = request.args.get ('node_uuid', None)
    assert node_uuid
    base = Q (Node.query).one (uuid=app.session_manager.anchor)
    assert base
    node = Q (base.subnodes).one (uuid=node_uuid)
    assert node

    archive_key = object_cache.make_key (node_uuid, 'archive', 'zip')
    content_val = object_cache.get_value (archive_key)

    if content_val:
        if request.args.get ('fetch', False):

            content_len = len (content_val)
            content_csz = chunk_size

            def next_chunk (length, size):
                for index in range (0, length, size):
                    yield content_val[index:index + size]

            response = Response (next_chunk (content_len, content_csz))
            response.headers ['Content-Length'] = content_len
            response.headers ['Content-Disposition'] = \
                'attachment;filename="%s [%s].zip"' % (
                    node.name.encode ('utf-8'), node.mime.replace ('/', '!'))
        else:
            response = JSON.encode (dict (success=True, name=node.name))
            object_cache.expire (archive_key, expiry=90) ## refresh
    else:
        response = JSON.encode (dict (success=True, name=node.name))
        object_cache.set_value (archive_key, compress (node), expiry=90) ##[s]

    return response
예제 #4
0
def upload():
    file = request.files['file']
    if file:
        dirname = mkdtemp()

        inputFile = dirname + "/in.pdf"

        file.save(inputFile)

        args = [
            "gs",
            "-dNOPAUSE", "-dBATCH", "-dSAFER",
            "-sDEVICE=pdfwrite",
            "-sOutputFile=%stdout",
            "-c", ".setpdfwrite",
            "-f", inputFile
        ]

        process = subprocess.Popen(args, stdout=subprocess.PIPE)

        def stream_out():
            for c in iter(lambda: process.stdout.read(100), ''):
                yield c
            rmtree(dirname)

        resp = Response(stream_out(), mimetype='application/pdf')
        resp.headers['Content-Type'] = 'application/pdf'
        resp.headers['Content-Disposition'] = 'attachment; filename="' + add_suffix(file.filename) + '"'
        return resp

    return 'no file given'
예제 #5
0
def get_log(log):
    path = "/log/" + log
    if not db.logs.exists(path):
        data = "No such log!"
    else:
        data = bz2.decompress(db.logs.get(path).read())
    if 'plugin' in request.args:
        plugin = request.args.get('plugin')
        data = extract_plugin_log(data, plugin)
        if 'diff' in request.args:
            header = data[:data.find('\n')]
            base = request.args.get('base')
            ticket_id = request.args.get('ticket')
            base_data = bz2.decompress(db.logs.get(request.args.get('diff')).read())
            base_data = extract_plugin_log(base_data, plugin)
            diff = difflib.unified_diff(base_data.split('\n'), data.split('\n'), base, "%s + #%s" % (base, ticket_id), n=0)
            data = data = '\n'.join(('' if item[0] == '@' else item)
                                    for item in diff)
            if not data:
                data = "No change."
            data = header + "\n\n" + data

    if 'short' in request.args:
        response = Response(shorten(data), direct_passthrough=True)
    else:
        response = make_response(data)
    response.headers['Content-type'] = 'text/plain; charset=utf-8'
    return response
def new_post():
    """
    Mattermost new post event handler
    """

    data = request.form

    if MATTERMOST_GIPHY_TOKEN.find(data['token']) == -1:
        print('Tokens did not match, it is possible that this request came from somewhere other than Mattermost')
        return 'OK'

    translate_text = data['text'][len(data['trigger_word']):]

    if len(translate_text) == 0:
        print("No translate text provided, not hitting Giphy")
        return 'OK'

    gif_url = giphy_translate(translate_text)

    if len(gif_url) == 0:
        print('No gif url found, not returning a post to Mattermost')
        return 'OK'

    resp_data = {}
    resp_data['text'] = gif_url
    resp_data['username'] = USERNAME
    resp_data['icon_url'] = ICON_URL

    resp = Response(content_type='application/json')
    resp.set_data(json.dumps(resp_data))

    return resp
예제 #7
0
 def respond_csv():
     out = StringIO()
     writer = UnicodeWriter(out)
     tmp = 'attachment; filename=all_users.csv'
     res = Response(gen_csv(out, writer, write_user), mimetype='text/csv')
     res.headers['Content-Disposition'] = tmp
     return res
예제 #8
0
    def database_item(database_id, item_id, suffix, session_id):
        """
        """

        range_header = request.headers.get("Range", None)

        if range_header:
            begin, end = http.parse_range_header(range_header).ranges[0]
            data, mimetype, total_length = provider.get_item(
                session_id, database_id, item_id, byte_range=(begin, end))
            begin, end = (begin or 0), (end or total_length)

            # Setup response
            response = Response(
                data, 206, mimetype=mimetype,
                direct_passthrough=not isinstance(data, basestring))
            response.headers["Content-Range"] = "bytes %d-%d/%d" % (
                begin, end - 1, total_length)
            response.headers["Content-Length"] = end - begin
        else:
            data, mimetype, total_length = provider.get_item(
                session_id, database_id, item_id)

            # Setup response
            response = Response(
                data, 200, mimetype=mimetype,
                direct_passthrough=not isinstance(data, basestring))
            response.headers["Content-Length"] = total_length

        return response
def apply_response(*args, **kwargs):
    resp = Response(content)
    resp.headers["Server"] = "GoAhead-Webs"
    resp.headers["Content-type"] = "application/octet-stream"
    resp.headers["Content-Transfer-Encoding"] = "binary"
    resp.headers["Content-Disposition"] = "attachment; filename=\"Config.CFG\""
    return resp, 200
예제 #10
0
def api_list_data(trip_name, date_string, place_name, sample_type):

    is_csv = False

    try:
        data_format = request.args.get("fmt")

        if data_format == "csv":
            is_csv = True
    except KeyError:
        pass

    data = psql_query("SELECT * FROM field_data WHERE dataset=%s AND date(tstamp)=(DATE %s) AND site=%s AND sensor=%s", (trip_name, date_string, place_name, sample_type))
    if is_csv:
        cursor = db_conn.cursor()
        cursor.execute("SELECT * FROM field_data LIMIT 0")

        column_names = [desc[0] for desc in cursor.description]

        # In case the requested csv is huge, return it bit by bit
        def stream_csv():
            yield ",".join(column_names) + "\n"
            for row in data:
                print(row)
                yield ",".join([str(item) if item is not None else "" for item in row]) + "\n"

        csv_response = Response(stream_csv(), mimetype="text/csv")
        # Set the file's download name
        csv_response.headers["Content-Disposition"] = 'inline; filename="data.csv"'

        return csv_response

    else:
        return jsonify(dataset=trip_name, date=date_string,
                       place=place_name, sample_type=sample_type, data=data)
예제 #11
0
    def zip_analysis(self):
        def generator():
            z = zipstream.ZipFile(mode='w',
                                  compression=zipstream.ZIP_DEFLATED)

            # find all analysis files
            folder = self.analyses_path+'/'+self.name
            for root, dirnames, filenames in os.walk(folder):
                invisible_dirs = [d for d in dirnames if d[0] == '.']
                for d in invisible_dirs:
                    dirnames.remove(d)
                for filename in filenames:
                    if filename[0] == '.':
                        continue
                    if filename[-4:] == '.pyc':
                        continue

                    # add the file to zipstream
                    fullname = os.path.join(root, filename)
                    arcname = fullname.replace(self.analyses_path+'/', '')
                    z.write(fullname, arcname=arcname)

            # add requirements.txt if present
            if os.path.isfile(self.analyses_path+'/requirements.txt'):
                z.write(self.analyses_path+'/requirements.txt')

            for chunk in z:
                yield chunk

        response = Response(generator(), mimetype='application/zip')
        response.headers['Content-Disposition'] = \
            'attachment; filename='+self.name+'.zip'
        return response
예제 #12
0
def hello():
    text = 'Hello from: {0}\n'.format(socket.gethostname())
    resp = Response(text, mimetype='text/plain')
    resp.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
    resp.headers['Pragma'] = 'no-cache'
    resp.headers['Expires'] = 0
    return resp
def download():
    form = AdvancedSearchForm()
    form.business_type.default = 'All Entities'
    if form.validate_on_submit():
        q_object = {
            'query': form.query.data,
            'query_limit': form.query_limit.data,
            'index_field': form.index_field.data,
            'active': form.active.data,
            'sort_by': form.sort_by.data,
            'sort_order': form.sort_order.data
        }
        try:
            q_object['start_date'] = datetime.strftime(form.start_date.data, '%Y-%m-%d')
            q_object['end_date'] = datetime.strftime(form.end_date.data, '%Y-%m-%d')
        except TypeError:
            q_object['start_date'] = date(year=1990, month=1, day=1)
            q_object['end_date'] = datetime.now()
        q_object['business_type'] = form.business_type.data
        results = query(q_object)
        file = StringIO()

        writer = csv.DictWriter(file, fieldnames=['name', 'id', 'origin date', 'status', 'type', 'street', 'city', 'state', 'zip'])
        writer.writeheader()
        for biz in results.all():
            row = {'name': biz.nm_name, 'id': biz.id_bus, 'origin date': biz.dt_origin, 'status': biz.status,
                   'type': biz.type, 'street': biz.street, 'city': biz.city, 'state': biz.state, 'zip': biz.zip}
            writer.writerow(row)
        file.seek(0)
        response = Response(file, content_type='text/csv')
        response.headers['Content-Disposition'] = 'attachment; filename=sots_search_results.csv'
        return response
def api_get_all_meta():
    json_res = get_all_meta_json()
    ret = Response(json_res, mimetype='text/json')
    ret.content_encoding = 'utf-8'
    ret.headers.set("Cache-Control", "public, max-age=604800")

    return ret
def download():
    table_name = request.args.get('table_name', None)
    crs = request.args.get('crs', None)

    if not crs:
        return Response("crs 인자가 필요합니다.", 400)
    if not table_name:
        return Response("table_name 인자가 필요합니다.", 400)

    # file name을 <table_name>__<crs>로 정함
    file_base = table_name+"__"+crs

    zip_file = os.path.join(config.download_folder, file_base+".zip")
    if not os.path.isfile(zip_file):
        return Response("ZIP 파일 없음", 500)

    try:
        with open(zip_file, "rb") as f:
            zip_bin = f.read()
    except Exception as e:
        logger.error("Shape 다운로드 중 오류: "+str(e))
        return Response("Shape 다운로드 중 오류", 500)

    ret = Response(zip_bin, mimetype='application/zip')
    ret.headers["Content-Disposition"] = "attachment; filename={}".format(file_base+".zip")
    return ret
예제 #16
0
def all():
    """Returns all data in CSV format.

    **Example request**:

    .. sourcecode:: http

       GET /all HTTP/1.1
       Host: example.com
       Accept: */*

    **Example response**:

    .. sourcecode:: http

       HTTP/1.1 200 OK
       Content-Disposition: filename="cooler.csv"
       Content-Type: text/csv; charset=utf-8

       maker,model,width,depth,height,heatsink_type,weight,price,shop_count,first_seen,fan_size,fan_thickness,fan_count,noise,noise_actual_min,noise_actual_max,rpm_min,rpm_max,power,cpu_temp_delta,power_temp_delta
       3Rsystem,iCEAGE 120,125.0,100.0,154.0,tower,590.0,,,2007-04-04 16:18:55,120,25,1,35,,,1002,1010,62,50.7,
       Zalman,ZM-LQ320,,,,tower,195.0,91000,244,2013-01-31 16:57:18,120,25,2,100,58,58,2042,2068,200,60.8,64.5

    """
    resp = Response(export_data(), mimetype='text/csv')
    resp.headers['Content-Disposition'] = 'filename="cooler.csv"'
    return resp
예제 #17
0
파일: server.py 프로젝트: seibert/ln
def get(series_name, id):
    '''Return just the value corresponding to a particular index number.
    Primarily used to fetch blobs.'''

    id = int(id)

    try:
        times, values, resume = storage_backend.get_data(series_name,
                                                         offset=id,
                                                         limit=1)
    except SeriesDoesNotExistError:
        return jsonify_with_status_code(404)

    if len(times) == 0:
        return jsonify_with_status_code(404)

    value = values[0]
    if isinstance(value, Blob):
        response = Response(mimetype=value.mimetype)
        response.set_data(value.get_bytes())
        return response
    elif hasattr(value, '__len__'):  # list-like
        return json.dumps(value)
    else:
        return str(value)
예제 #18
0
파일: web.py 프로젝트: 5up3rD4n1/spreads
def download_workflow(workflow, fname):
    """ Return a ZIP archive of the current workflow.

    Included all files from the workflow folder as well as the workflow
    configuration as a YAML dump.
    """
    # Set proper file name for zip file
    if fname is None:
        return redirect(url_for('download_workflow', workflow=workflow,
                        fname="{0}.zip".format(workflow.path.stem)))

    zstream = workflow.bag.package_as_zipstream(compression=None)
    zstream_copy = copy.deepcopy(zstream)
    zipsize = sum(len(data) for data in zstream_copy)
    on_download_prepared.send(workflow)

    def zstream_wrapper():
        """ Wrapper around our zstream so we can emit a signal when all data
        has been streamed to the client.
        """
        for data in zstream:
            yield data
        on_download_finished.send()

    response = Response(zstream_wrapper(), mimetype='application/zip')
    response.headers['Content-length'] = int(zipsize)
    return response
예제 #19
0
def buildResponse(content, secret, application=None):
	if isinstance(content, dict) == False:
		return content

	data = {}
	status = 200
	
	if 'status' in content:
		data = {'error': content['message']}
		status = content['status']
	else:
		data = content
	
	if application:
		data['liftpass-application'] = application	

	data['liftpass-time'] = round(time.time())
	data = extras.toJSON(data)

	response = Response(data, status, {'content-type':'application/json'})
	if secret:
		digest = hmac.new(secret, data.encode('utf-8'), hashlib.sha256).hexdigest()
		response.headers['liftpass-hash'] = digest

	response.status_code = status

	return response
예제 #20
0
def export():
    print("request.form:", request.form)
    svg_xml = request.form.get("data", "Invalid data")
    filename = request.form.get("filename", "manhattan_plot_snp")
    response = Response(svg_xml, mimetype="image/svg+xml")
    response.headers["Content-Disposition"] = "attachment; filename=%s"%filename
    return response
예제 #21
0
파일: http.py 프로젝트: wonderpl/dolly-web
 def func(*args, **kwargs):
     resp = f(*args, **kwargs)
     try:
         # assume response instance
         resp.headers
     except AttributeError:
         # try wrapping string
         try:
             resp = Response(resp)
         except Exception:
             # give up
             return resp
     if headers:
         for header, value in headers.items():
             resp.headers.add(header, value)
     if '_nc' in request.args:
         resp.cache_control.no_cache = True
     else:
         if cache_private:
             resp.cache_control.private = True
         if cache_max_age:
             if not resp.cache_control.private:
                 resp.cache_control.public = True
             resp.cache_control.max_age = cache_max_age
             # Always add ETag to cached responses
             resp.add_etag()
             resp.make_conditional(request)
     return resp
예제 #22
0
파일: FlaskHello.py 프로젝트: aifa/python
def generateScriptAll(scriptType):
	fromI=request.args.get('from')
	toI=request.args.get('to')

	if scriptType == 'import':
		template='[Job{0}]\nCmdTyp			= T\nDossier			= \nSubmission		= \nUpdatestate     = Imported\nFillAPIDossiers = True\n\n\n\n'
	elif scriptType == 'publish':
		template='[Job{0}]\nCmdTyp			= T\nDossier			= \nSubmission		= \nUpdatestate     = Published\nFillAPIDossiers = True\n\n\n\n'
	elif scriptType == 'verify':
		template='[Job{0}]\nCmdTyp			= T\nDossier			= \nSubmission		= \nUpdatestate     = Verified\nFillAPIDossiers = True\n\n\n\n'
	elif scriptType == 'torrent':
		template='[Job{0}]\nCmdTyp			= T\nDossier			= \nSubmission		= \n\n\n\n'
	else:
		return 'unknown command: ' + scriptType

	result = ""
	fI = open('toImport.ini', 'w')

	i=1
	result = result + template.format(i)
		
	response = Response(result, mimetype="text/plain")
	response.headers['Content-Type'] = 'image/plain'
	response.headers['Content-Disposition']='attachment;filename='+scriptType+'.ini'
	return response
예제 #23
0
def proxy(path=''):
    url = PROXY_URL + '/' + path
    host = request.host
    data = request.environ['body_copy']

    headers = clean_headers(request.headers, request=True)

    cookies = request.cookies
    kwargs = {
        'allow_redirects': False,
        'cookies': cookies,
        'headers': headers,
    }
    if data:
        kwargs['data'] = data

    response = requests.request(request.method, url, **kwargs)
    content_type = response.headers.get('content-type', 'text/plain')
    r = Response(response.content)
    for k, v in clean_headers(response.headers, request=False).items():
        r.headers[k] = v

    for c in response.cookies:
        r.set_cookie(c.name, c.value)

    r.status_code = response.status_code
    return r
 def login(self, request):
     json_body = request.json
     try:
         user = json_body["user_id"]
         passwd = json_body["password"]
     except (TypeError, KeyError):
         self._logger.debug("Failed to read user from request.")
         return self.unauthorized()
     
     #if self.db.is_valid_local_user(passwd, user_name=user):
     if user in ("marcos", "lin", "linm") and passwd in ("secret", "angular", "hello"):
         basic_enc = base64.b64encode("%s:%s" % (user, passwd))
         auth_token = "Basic %s" % basic_enc
         user_info = {
             "user_id": user,
             "user_name": user,
             "auth_token": auth_token
         }
 
         resp = Response(json.dumps(user_info))
         resp.set_cookie(self.AUTH_HEADER, auth_token)
         return resp
     else:
         self._logger.debug("Username password not authenticated.")
         return self.unauthorized()
예제 #25
0
    def run(self, args):
        city = args['city']
        gids = args['gid'].split(',')
        attributes = args['attribute'].split(',')

        json = ""
        for gid in gids:
            gidjson = ""
            for attribute in attributes:
                val = Session.attribute_for_gid(city, str(gid), attribute)
                property = utils.Property(attribute, '"{0}"'.format(val))
                if gidjson:
                    gidjson = "{0}, {1}".format(gidjson, property.geojson())
                else:
                    gidjson = property.geojson()
            gidjson = "{{ {0} }}".format(gidjson)

            if json:
                json = "{0}, {1}".format(json, gidjson)
            else:
                json = gidjson

        json = "[{0}]".format(json)

        resp = Response(json)
        resp.headers['Access-Control-Allow-Origin'] = '*'
        resp.headers['Content-Type'] = 'text/plain'

        return resp
예제 #26
0
def get_token():
    response = {}
    subject = request.values.get('subject')
    if request.method == 'GET':
        message = "Unhandled method: '%s'" % request.method
        response["error"] = message
        ctk_status = 400
    elif request.method == 'POST':
        response = {
            "error": None,
            "groups": get_groups_file(groups_file),
            "subject": subject,
            "token": get_token_file(token_file)
        }
        ctk_status = 200
    else:
        message = "Unhandled method: '%s'" % request.method
        response["error"] = message
        ctk_status = 400
    # include _links part
    response["_links"] = [{"rel": "self", "href": "/get-token"}, ]
    js = json.dumps(response, indent=fgjson_indent)
    resp = Response(js, status=ctk_status, mimetype='application/json')
    resp.headers['Content-type'] = 'application/json'
    return resp
예제 #27
0
def checktoken():
    response = {}
    token = request.values.get('token')
    if request.method == 'GET':
        message = "Unhandled method: '%s'" % request.method
        response["error"] = message
        ctk_status = 400
    elif request.method == 'POST':
        # response = {
        #    "token_status": "valid",
        #    # you may specify:
        #    #  portal_user - A portal user that can be mapped by
        #    #                fgapiserver_ptvmap.json map file
        #    #  portal_group - A portal group that can be mapped by
        #    #                 fgapiserver_ptvmap.json map file
        #    # "portal_user": fgapisrv_ptvdefusr
        #    "portal_group": "admin"
        # }
        response = {
            "error": None,
            "groups": get_groups_file(groups_file),
            "subject": get_subject_file(subject_file)
        }
        ctk_status = 200
    else:
        message = "Unhandled method: '%s'" % request.method
        response["error"] = message
        ctk_status = 400
    # include _links part
    response["_links"] = [{"rel": "self", "href": "/checktoken"}, ]
    js = json.dumps(response, indent=fgjson_indent)
    resp = Response(js, status=ctk_status, mimetype='application/json')
    resp.headers['Content-type'] = 'application/json'
    return resp
예제 #28
0
def download_data():
    """Returns a zip file of CSV files, one for every month.
    """
    filenames = _write_files_and_return_names()
    zip_subdir = 'slate'
    zip_filename = '%s.zip' % zip_subdir

    # Open StringIO to grab in-memory ZIP contents
    s = StringIO.StringIO()
    zf = zipfile.ZipFile(s, "w")

    for fpath in filenames:
        # Calculate path for file in zip
        fdir, fname = os.path.split(fpath)
        zip_path = os.path.join(zip_subdir, fname)
        # Add file, at correct path
        zf.write(fpath, zip_path)

    zf.close()

    # Grab ZIP file from in-memory, make response with correct MIME-type
    resp = Response(s.getvalue(), mimetype='application/x-zip-compressed')
    content_disposition = 'attachment; filename=%s' % zip_filename
    resp.headers['Content-Disposition'] = content_disposition

    # Delete files from server before sending them over the wire.
    for f in filenames:
        os.remove(f)

    return resp
예제 #29
0
파일: httpd.py 프로젝트: eriknj/challenge
def job_intake() :
    args = request.form.keys()[0]

    targets = []
    while len( args ) > 0 :
        scheme, netloc, args = get_next_url( args )
        target = {}
        target[ 'scheme' ] = scheme
        target[ 'netloc' ] = netloc
        target[ 'pending' ] = True
        targets.append( target )
    
    if len( job_ids ) == 0 :
        job_id = 1
    else :
        job_id = job_ids[ -1 ] + 1

    target_ndx = 1
    for target in targets :
        n = "scrape%d.%d" % ( job_id, target_ndx )
        target_ndx += 1 
        cmd = "%d %s %s" % ( job_id, target[ 'scheme' ], target[ 'netloc' ] )
        container = c.create_container( "eriknj/scrape", name=n, command=cmd )
        c.start( container, links=[ ( "httpd", "HTTPD" ) ] )
        target[ 'container' ] = container

    target_ls_by_job_id[ job_id ] = targets
    result_by_job_id[ job_id ] = []
    job_ids.append( job_id )

    response = Response( "Job accepted, ID assigned: %d\n" % job_id )
    response.result = str( job_id )
    response.result_code = 200

    return response
예제 #30
0
파일: sse.py 프로젝트: jeredding/wishbone
    def subscribe(self, destination=""):

        def consume():
            try:
                while self.loop():
                    try:
                        result = self.session_queues[destination][queue_id].get(timeout=self.keepalive_interval)
                    except:
                        if self.keepalive:
                            ev = ServerSentEvent(":keep-alive")
                            yield ev.encode()
                    else:
                        ev = ServerSentEvent(str(result))
                        yield ev.encode()
            except GeneratorExit:
                self.__deleteSessionQueue(destination, queue_id)
            except Exception:
                self.__deleteSessionQueue(destination, queue_id)

        def close():
            self.__deleteSessionQueue(destination, queue_id)

        queue_id = self.__addSessionQueue(destination)
        r = Response(consume(), mimetype="text/event-stream")
        r.call_on_close(close)
        return r
예제 #31
0
def index():
    if request.method == 'POST':
        resp_data = {
            "code": 'success',
            "data": {"imgName": None, "fileId": None}
        }
        # check if the post request has the file part
        if not os.path.exists(current_app.config['UPLOAD_BASE_FOLDER']):
            os.mkdir(current_app.config['UPLOAD_BASE_FOLDER'])
        if 'file' not in request.files:
            msg = 'No file part'
            resp_data['code'] = 'failure'
            resp_data['msg'] = msg
            resp_data.pop('data')
            return Response(
                json.dumps(resp_data, indent=4),
                content_type="application/json; charset=utf-8",
                status=400
            )
        file = request.files['file']
        # if a user don't select file, browser also
        # submit a empty part without filename
        if file.filename == '':
            msg = 'No selected file'
            resp_data['code'] = 'failure'
            resp_data['msg'] = msg
            resp_data.pop('data')
            return Response(
                json.dumps(resp_data, indent=4),
                content_type="application/json; charset=utf-8",
                status=400
            )
        if file and allowed_file(file.filename):
            original_name = file.filename.rsplit('/')[-1]  # 取上传文件的原始文件名
            file_type = secure_filename(file.filename).rsplit('.')[-1].lower()
            img_name = str(uuid.uuid4()).replace(
                '-', '')[:16] + '.' + file_type
            file_id = str(uuid.uuid1()).replace('-', '')[:10]
            file.save(
                os.path.join(current_app.config['UPLOAD_BASE_FOLDER'],
                            img_name))
            resp_data.update(
                {
                    "data":
                    {
                        "imgName": img_name,
                        "fileId": file_id,
                        "origName": original_name
                    }
                }
            )
            pic_bed = PicBed(
                img_id=file_id,
                orig_img_name=original_name,
                img_name=img_name
            )
            pic_bed.save()
            return Response(
                json.dumps(resp_data, indent=4),
                content_type="application/json; charset=utf-8"
            )
        else:
            resp_data['msg'] = 'error'
            return Response(
                json.dumps(resp_data, indent=4),
                content_type='application/json; charset=utf-8',
                status=400
            )
    return render_template('upload.html')
예제 #32
0
 def swagger_ui():
     return Response(swagger_body, content_type="text/html")
예제 #33
0
def authenticate():
    """Sends a 401 response that enables basic auth"""
    return Response(
        'Could not verify your access level for that URL.\n'
        'You have to login with proper credentials', 401,
        {'WWW-Authenticate': 'Basic realm="Login Required"'})
예제 #34
0
def showlist():
    return Response(dumps(Post.find({"Category": "种子神奇宝贝"})))
예제 #35
0
        def before_request():  # noqa
            """
            Parse and validate request data(query, form, header and body),
            set data to `request.parsed_data`
            """
            # convert "/api/items/<int:id>/" to "/api/items/{id}/"
            subs = []
            for sub in str(request.url_rule).split('/'):
                if '<' in sub:
                    if ':' in sub:
                        start = sub.index(':') + 1
                    else:
                        start = 1
                    subs.append('{{{:s}}}'.format(sub[start:-1]))
                else:
                    subs.append(sub)
            path = '/'.join(subs)
            path_key = path + request.method.lower()

            if not self.app.debug and path_key in self.parsers:
                parsers = self.parsers[path_key]
                schemas = self.schemas[path_key]
            else:
                doc = None
                for spec in self.config['specs']:
                    apispec = self.get_apispecs(endpoint=spec['endpoint'])
                    if path in apispec['paths']:
                        if request.method.lower() in apispec['paths'][path]:
                            doc = apispec['paths'][path][
                                request.method.lower()]
                            break
                if not doc:
                    return

                parsers = defaultdict(RequestParser)
                schemas = defaultdict(
                    lambda: {'type': 'object', 'properties': defaultdict(dict)}
                )
                for param in doc.get('parameters', []):
                    location = self.SCHEMA_LOCATIONS[param['in']]
                    if location == 'json':  # load data from 'request.json'
                        schemas[location] = param['schema']
                    else:
                        name = param['name']
                        if location != 'path':
                            parsers[location].add_argument(
                                name,
                                type=self.SCHEMA_TYPES[
                                    param.get('type', None)],
                                required=param.get('required', False),
                                location=self.SCHEMA_LOCATIONS[
                                    param['in']],
                                store_missing=False)

                        for k in param:
                            if k != 'required':
                                schemas[
                                    location]['properties'][name][k] = param[k]

                    self.schemas[path_key] = schemas
                    self.parsers[path_key] = parsers

            parsed_data = {'path': request.view_args}
            for location in parsers.keys():
                parsed_data[location] = parsers[location].parse_args()
            if 'json' in schemas:
                parsed_data['json'] = request.json or {}
            for location, data in parsed_data.items():
                try:
                    jsonschema.validate(
                        data, schemas[location],
                        format_checker=self.format_checker)
                except jsonschema.ValidationError as e:
                    abort(Response(e.message, status=400))

            setattr(request, 'parsed_data', parsed_data)
예제 #36
0
def not_found(exc):
    return Response('<h3>Not found</h3>'), 404
예제 #37
0
 def decorated(*args, **kwargs):
     auth = request.headers.get('Authorization')
     if not auth or hashlib.sha1('plivo123').hexdigest() != auth :
         return Response(json.dumps({'msg':'Unauthorized.'}), status=401, mimetype='application/json')
     return f(*args, **kwargs)
예제 #38
0
def video_feed():
    return Response(gen(VideoCamera()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
예제 #39
0
def autocomplete():
    query = card_list.query.all()
    cards = []
    for entry in query:
        cards.append(entry.card_name)
    return Response(json.dumps(cards), mimetype='application/json')
예제 #40
0
def tasks():

    tasks = {}  # TODO

    return Response(response=json.dumps(tasks), mimetype="application/json")
예제 #41
0
def create_businesses():
    """
    This endpoint creates a business record, from the POST data.
    It takes in a parameter list for a user as:
    :param
        JSON representation of the business

    :return: Http response 200
        id  urn:ons.gov.uk:id:business:001.234.56789
    """

    app.logger.info("businesses/create_business()")

    # First check that we have a valid JWT token if we don't send a 400 error with authorisation failure
    if request.headers.get('authorization'):
        jwt_token = request.headers.get('authorization')
        if not validate_scope(jwt_token, 'ps.write'):
            res = Response(response="Invalid token/scope to access this Microservice Resource", status=400, mimetype="text/html")
            return res
    else:
        res = Response(response="Valid token/scope is required to access this Microservice Resource", status=400, mimetype="text/html")
        return res

    party_respondent = []

    json = request.json
    if json:
        response = make_response("")

        party_respondent.append(request.json)
        response.headers["location"] = "/respondents/"

        # Check that we have all the correct attributes in our json object.
        try:

            json["businessRef"]
            json["name"]
            json["addressLine1"]
            json["city"]
            json["postcode"]

        except KeyError:
            app.logger.warning("""Party Service POST did not contain correct mandatory
                               parameters in it's JSON payload: {}""".format(str(json)))
            res = Response(response="invalid input, object invalid", status=404, mimetype="text/html")
            return res

        if not validate_legal_status_code(json["legalStatus"]):
            app.logger.warning("""Party Service POST did not contain a valid legal status code in the legal status field.
                               Received: {}""".format(json['legalStatus']))
            res = Response(response="invalid status code, object invalid", status=404, mimetype="text/html")
            return res

        if not validate_phone_number(json["telephone"]):
            app.logger.warning("""Party Service POST did not contain a valid UK phone number in the telephone field.
                               Received: {}""".format(json['telephone']))
            res = Response(response="invalid phone number, object invalid", status=404, mimetype="text/html")
            return res

        try:

            new_business_urn = generate_urn('business')

            # create business
            new_business = Business(party_id=new_business_urn,
                                    business_ref=json["businessRef"],
                                    name=json["name"],
                                    trading_name=json["tradingName"],
                                    enterprise_name=json["enterpriseName"],
                                    contact_name=json["contactName"],
                                    address_line_1=json["addressLine1"],
                                    address_line_2=json["addressLine2"],
                                    address_line_3=json["addressLine3"],
                                    city=json["city"],
                                    postcode=json["postcode"],
                                    telephone=json["telephone"],
                                    employee_count=json["employeeCount"],
                                    facsimile=json["facsimile"],
                                    fulltime_count=json["fulltimeCount"],
                                    legal_status=json["legalStatus"],
                                    sic_2003=json["sic2003"],
                                    sic_2007=json["sic2007"],
                                    turnover=json["turnover"])

            db.session.add(new_business)
            db.session.flush()

            # commit the whole transaction
            db.session.commit()

        except:

            # rollback the whole transaction
            db.session.rollback()

            app.logger.error("DB exception: {}".format(sys.exc_info()[0]))
            response = Response(response="Error in the Party DB.", status=500, mimetype="text/html")
            return response

        collection_path = response.headers["location"] = "/businesses/" + str(new_business.id)
        etag = hashlib.sha1(collection_path).hexdigest()
        response.set_etag(etag)

        response.headers["id"] = "/businesses/" + str(new_business.id)
        return response, 201

    return jsonify({"message": "Please provide a valid Json object.",
                    "hint": "you may need to pass a content-type: application/json header"}), 400
예제 #42
0
def video_feed():
    # a continuous response from the generator function
    return Response(gen(Camera()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
예제 #43
0
def authenticate():
    # 401 response
    return Response(
        'Could not verify your access level for that URL; You have to login with proper credentials',
        401, {'WWW-Authenticate': 'PlynxBasic realm="Login Required"'})
예제 #44
0
def create_respondent():
    """
    This endpoint creates a respondent record, from the POST data.
    It takes in a parameter list for a user as:
    :param
        emailAddress
        firstName
        lastName
        telephone
        status [ ACTIVE | CREATED | ACTIVE | SUSPENDED ]

    :return: Http response 200
        id  urn:ons.gov.uk:id:respondent:001.234.56789

    The email must be unique for this user.
    """

    app.logger.info("respondents/create_respondent()")

    # First check that we have a valid JWT token if we don't send a 400 error with authorisation failure
    if request.headers.get('authorization'):
        jwt_token = request.headers.get('authorization')
        if not validate_scope(jwt_token, 'ps.write'):
            res = Response(response="Invalid token/scope to access this Microservice Resource", status=400, mimetype="text/html")
            return res
    else:
        res = Response(response="Valid token/scope is required to access this Microservice Resource", status=400, mimetype="text/html")
        return res

    party_respondent = []

    json = request.json
    if json:
        response = make_response("")

        party_respondent.append(request.json)
        response.headers["location"] = "/respondents/"

        # Check that we have all the correct attributes in our json object.
        try:
            json["emailAddress"]
            json["firstName"]
            json["lastName"]
            json["telephone"]
            json["status"]
            json["enrolmentCode"]

        except KeyError:
            app.logger.warning("""Party Service POST did not contain correct mandatory
                               parameters in it's JSON payload: {}""".format(str(json)))
            res = Response(response="invalid input, object invalid", status=404, mimetype="text/html")
            return res

        if not validate_status_code(json["status"]):
            app.logger.warning("""Party Service POST did not contain a valid status code in the status field. We
                               received: {}""".format(json['status']))
            res = Response(response="invalid status code, object invalid", status=404, mimetype="text/html")
            return res

        if not validate_phone_number(json["telephone"]):
            app.logger.warning("""Party Service POST did not contain a valid UK phone number in the telephone field. We
                               received: {}""".format(json['telephone']))
            res = Response(response="invalid phone number, object invalid", status=404, mimetype="text/html")
            return res

        try:

            # generate a new respondent urn
            new_respondent_urn = generate_urn('respondent')

            # get the case context for the iac
            survey_id, business_id = get_case_context(json["enrolmentCode"])

            if survey_id and business_id:

                # set the statuses
                if json["status"] == 'CREATED':
                    business_association_status = 'INACTIVE'
                    enrolment_status = 'PENDING'
                elif json["status"] == 'ACTIVE':
                    business_association_status = 'ACTIVE'
                    enrolment_status = 'ACTIVE'
                elif json["status"] == 'SUSPENDED':
                    business_association_status = 'INACTIVE'
                    enrolment_status = 'SUSPENDED'
                else:
                    business_association_status = 'INACTIVE'
                    enrolment_status = 'PENDING'

                # create respondent
                new_respondent = Respondent(party_id=new_respondent_urn,
                                            status=json["status"],
                                            email_address=json["emailAddress"],
                                            first_name=json["firstName"],
                                            last_name=json["lastName"],
                                            telephone=json["telephone"])
                db.session.add(new_respondent)
                db.session.flush()

                # create business association
                new_business_association = BusinessAssociation(business_id=business_id,
                                                               respondent_id=new_respondent.id,
                                                               status=business_association_status)
                db.session.add(new_business_association)
                db.session.flush()

                # create enrolment
                new_enrolment = Enrolment(business_association_id=new_business_association.id,
                                          survey_id=survey_id,
                                          status=enrolment_status)
                db.session.add(new_enrolment)

                # create enrolment invitation
                verification_token = str(uuid.uuid4())
                sms_verification_token = randint(0, 999999)
                new_enrolment_invitation = EnrolmentInvitation(respondent_id=new_respondent.id,
                                                               target_email=json["emailAddress"],
                                                               verification_token=verification_token,
                                                               sms_verification_token=sms_verification_token,
                                                               status='ACTIVE')

                db.session.add(new_enrolment_invitation)

                # TODO call notification service to send verification email

                # commit the whole transaction
                db.session.commit()

            else:

                app.logger.info("Could not establish case context for iac: {}".format(json["enrolmentCode"]))
                response = Response(response="Case context could not be established", status=404, mimetype="text/html")
                return response

        except:

            # rollback the whole transaction
            db.session.rollback()

            app.logger.error("DB exception: {}".format(sys.exc_info()[0]))
            response = Response(response="Error in the Party DB.", status=500, mimetype="text/html")
            return response

        collection_path = response.headers["location"] = "/respondents/" + str(new_respondent.id)
        etag = hashlib.sha1(collection_path).hexdigest()
        response.set_etag(etag)

        response.headers["id"] = "/respondents/" + str(new_respondent.id)
        return response, 201

    return jsonify({"message": "Please provide a valid Json object.",
                    "hint": "you may need to pass a content-type: application/json header"}), 400
예제 #45
0
    def thumbnail(
        self, pk: int, digest: str, **kwargs: Dict[str, bool]
    ) -> WerkzeugResponse:
        """Get Chart thumbnail
        ---
        get:
          description: Compute or get already computed chart thumbnail from cache.
          parameters:
          - in: path
            schema:
              type: integer
            name: pk
          - in: path
            schema:
              type: string
            name: digest
          responses:
            200:
              description: Chart thumbnail image
              /content:
               image/*:
                 schema:
                   type: string
                   format: binary
            302:
              description: Redirects to the current digest
            400:
              $ref: '#/components/responses/400'
            401:
              $ref: '#/components/responses/401'
            404:
              $ref: '#/components/responses/404'
            500:
              $ref: '#/components/responses/500'
        """
        chart = self.datamodel.get(pk, self._base_filters)
        if not chart:
            return self.response_404()

        url = get_url_path("Superset.slice", slice_id=chart.id, standalone="true")
        if kwargs["rison"].get("force", False):
            logger.info(
                "Triggering thumbnail compute (chart id: %s) ASYNC", str(chart.id)
            )
            cache_chart_thumbnail.delay(url, chart.digest, force=True)
            return self.response(202, message="OK Async")
        # fetch the chart screenshot using the current user and cache if set
        screenshot = ChartScreenshot(url, chart.digest).get_from_cache(
            cache=thumbnail_cache
        )
        # If not screenshot then send request to compute thumb to celery
        if not screenshot:
            logger.info(
                "Triggering thumbnail compute (chart id: %s) ASYNC", str(chart.id)
            )
            cache_chart_thumbnail.delay(url, chart.digest, force=True)
            return self.response(202, message="OK Async")
        # If digests
        if chart.digest != digest:
            return redirect(
                url_for(
                    f"{self.__class__.__name__}.thumbnail", pk=pk, digest=chart.digest
                )
            )
        return Response(
            FileWrapper(screenshot), mimetype="image/png", direct_passthrough=True
        )
예제 #46
0
def set_enrolment_code_as_redeemed(enrolment_code, respondent_urn=None):
    """
    Mark an enrolment_code as redeemed by its iac.
    :param enrolment_code: String, respondent_urn: String
    :return: Http Response
    """

    if not respondent_urn:
        respondent_urn = request.args.get('respondentId')

    app.logger.info("set_enrolment_code_as_redeemed with enrolment_code: {}, respondent: {}"
                    .format(enrolment_code, respondent_urn))

    # First check that we have a valid JWT token if we don't send a 400 error with authorisation failure
    if request.headers.get('authorization'):
        jwt_token = request.headers.get('authorization')
        if not validate_scope(jwt_token, 'ps.write'):
            res = Response(response="Invalid token/scope to access this Microservice Resource", status=400, mimetype="text/html")
            return res
    else:
        res = Response(response="Valid token/scope is required to access this Microservice Resource", status=400, mimetype="text/html")
        return res

    try:
        app.logger.debug("Querying DB in set_enrolment_code_as_redeemed")

        app.logger.debug("Querying DB with set_enrolment_code_as_redeemed:{}".format(enrolment_code))

        enrolment_codes = (db.session.query(EnrolmentCode)
                          .filter(EnrolmentCode.respondent_id == None)
                          .filter(EnrolmentCode.status == 'ACTIVE')
                          .filter(EnrolmentCode.iac == enrolment_code))

        existing_enrolment_code = [[enc.id, enc.business_id, enc.survey_id, enc.iac, enc.status]
                                    for enc in enrolment_codes]

        if not existing_enrolment_code:
            app.logger.info("Enrolment code not found for set_enrolment_code_as_redeemed")
            response = Response(response="Enrolment code not found", status=400, mimetype="text/html")
            return response

        respondents = (db.session.query(Respondent)
                       .filter(Respondent.party_id == respondent_urn))

        respondent_id = [[res.id]
                         for res in respondents]

        if not respondent_id:
            app.logger.info("Respondent not found for set_enrolment_code_as_redeemed")
            response = Response(response="Respondent not found", status=400, mimetype="text/html")
            return response

        new_enrolment_code = EnrolmentCode(id=existing_enrolment_code[0][0],
                                           respondent_id=respondent_id[0][0],
                                           business_id=existing_enrolment_code[0][1],
                                           survey_id=existing_enrolment_code[0][2],
                                           iac=existing_enrolment_code[0][3],
                                           status='REDEEMED')

        db.session.merge(new_enrolment_code)
        db.session.commit()

    except exc.OperationalError:
        app.logger.error("DB exception: {}".format(sys.exc_info()[0]))
        response = Response(response="Error in the Party DB.", status=500, mimetype="text/html")
        return response

    response = Response(response="Enrolment code redeemed", status=200, mimetype="text/html")
    return response
예제 #47
0
파일: webapp.py 프로젝트: zimshk/searx
def index():
    """Render index page.

    Supported outputs: html, json, csv, rss.
    """

    # output_format
    output_format = request.form.get('format', 'html')
    if output_format not in ['html', 'csv', 'json', 'rss']:
        output_format = 'html'

    # check if there is query
    if request.form.get('q') is None:
        if output_format == 'html':
            return render('index.html', )
        else:
            return index_error(output_format, 'No query'), 400

    # search
    search_query = None
    raw_text_query = None
    result_container = None
    try:
        search_query, raw_text_query = get_search_query_from_webapp(
            request.preferences, request.form)
        # search = Search(search_query) #  without plugins
        search = SearchWithPlugins(search_query, request.user_plugins, request)

        result_container = search.search()

    except Exception as e:
        # log exception
        logger.exception('search error')

        # is it an invalid input parameter or something else ?
        if (issubclass(e.__class__, SearxParameterException)):
            return index_error(output_format, e.message), 400
        else:
            return index_error(output_format, gettext('search error')), 500

    # results
    results = result_container.get_ordered_results()
    number_of_results = result_container.results_number()
    if number_of_results < result_container.results_length():
        number_of_results = 0

    # checkin for a external bang
    if result_container.redirect_url:
        return redirect(result_container.redirect_url)

    # UI
    advanced_search = request.form.get('advanced_search', None)

    # Server-Timing header
    request.timings = result_container.get_timings()

    # output
    for result in results:
        if output_format == 'html':
            if 'content' in result and result['content']:
                result['content'] = highlight_content(
                    escape(result['content'][:1024]), search_query.query)
            if 'title' in result and result['title']:
                result['title'] = highlight_content(
                    escape(result['title'] or ''), search_query.query)
        else:
            if result.get('content'):
                result['content'] = html_to_text(result['content']).strip()
            # removing html content and whitespace duplications
            result['title'] = ' '.join(
                html_to_text(result['title']).strip().split())

        if 'url' in result:
            result['pretty_url'] = prettify_url(result['url'])

        # TODO, check if timezone is calculated right
        if 'publishedDate' in result:
            try:  # test if publishedDate >= 1900 (datetime module bug)
                result['pubdate'] = result['publishedDate'].strftime(
                    '%Y-%m-%d %H:%M:%S%z')
            except ValueError:
                result['publishedDate'] = None
            else:
                if result['publishedDate'].replace(
                        tzinfo=None) >= datetime.now() - timedelta(days=1):
                    timedifference = datetime.now(
                    ) - result['publishedDate'].replace(tzinfo=None)
                    minutes = int((timedifference.seconds / 60) % 60)
                    hours = int(timedifference.seconds / 60 / 60)
                    if hours == 0:
                        result['publishedDate'] = gettext(
                            '{minutes} minute(s) ago').format(minutes=minutes)
                    else:
                        result['publishedDate'] = gettext(
                            '{hours} hour(s), {minutes} minute(s) ago').format(
                                hours=hours, minutes=minutes)  # noqa
                else:
                    result['publishedDate'] = format_date(
                        result['publishedDate'])

    if output_format == 'json':
        return Response(
            json.dumps(
                {
                    'query':
                    search_query.query,
                    'number_of_results':
                    number_of_results,
                    'results':
                    results,
                    'answers':
                    list(result_container.answers),
                    'corrections':
                    list(result_container.corrections),
                    'infoboxes':
                    result_container.infoboxes,
                    'suggestions':
                    list(result_container.suggestions),
                    'unresponsive_engines':
                    __get_translated_errors(
                        result_container.unresponsive_engines)
                },  # noqa
                default=lambda item: list(item)
                if isinstance(item, set) else item),
            mimetype='application/json')
    elif output_format == 'csv':
        csv = UnicodeWriter(StringIO())
        keys = ('title', 'url', 'content', 'host', 'engine', 'score', 'type')
        csv.writerow(keys)
        for row in results:
            row['host'] = row['parsed_url'].netloc
            row['type'] = 'result'
            csv.writerow([row.get(key, '') for key in keys])
        for a in result_container.answers:
            row = {'title': a, 'type': 'answer'}
            csv.writerow([row.get(key, '') for key in keys])
        for a in result_container.suggestions:
            row = {'title': a, 'type': 'suggestion'}
            csv.writerow([row.get(key, '') for key in keys])
        for a in result_container.corrections:
            row = {'title': a, 'type': 'correction'}
            csv.writerow([row.get(key, '') for key in keys])
        csv.stream.seek(0)
        response = Response(csv.stream.read(), mimetype='application/csv')
        cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(
            search_query.query)
        response.headers.add('Content-Disposition', cont_disp)
        return response

    elif output_format == 'rss':
        response_rss = render(
            'opensearch_response_rss.xml',
            results=results,
            answers=result_container.answers,
            corrections=result_container.corrections,
            suggestions=result_container.suggestions,
            q=request.form['q'],
            number_of_results=number_of_results,
            base_url=get_base_url(),
            override_theme='__common__',
        )
        return Response(response_rss, mimetype='text/xml')

    # HTML output format

    # suggestions: use RawTextQuery to get the suggestion URLs with the same bang
    suggestion_urls = list(
        map(
            lambda suggestion: {
                'url': raw_text_query.changeSearchQuery(suggestion).
                getFullQuery(),
                'title': suggestion
            }, result_container.suggestions))

    correction_urls = list(
        map(
            lambda correction: {
                'url': raw_text_query.changeSearchQuery(correction).
                getFullQuery(),
                'title': correction
            }, result_container.corrections))
    #
    return render('results.html',
                  results=results,
                  q=request.form['q'],
                  selected_categories=search_query.categories,
                  pageno=search_query.pageno,
                  time_range=search_query.time_range,
                  number_of_results=format_decimal(number_of_results),
                  advanced_search=advanced_search,
                  suggestions=suggestion_urls,
                  answers=result_container.answers,
                  corrections=correction_urls,
                  infoboxes=result_container.infoboxes,
                  paging=result_container.paging,
                  unresponsive_engines=__get_translated_errors(
                      result_container.unresponsive_engines),
                  current_language=match_language(
                      search_query.lang,
                      LANGUAGE_CODES,
                      fallback=request.preferences.get_value("language")),
                  base_url=get_base_url(),
                  theme=get_current_theme_name(),
                  favicons=global_favicons[themes.index(
                      get_current_theme_name())],
                  timeout_limit=request.form.get('timeout_limit', None))
예제 #48
0
def video_feed():
    # from camera import Camera
    """Video streaming route. Put this in the src attribute of an img tag."""
    return Response(gen(),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
예제 #49
0
def print_it():
    """
    POST:
    {
        action: 'print' | 'stop' | 'pause' | 'resume' | 'percentage' | 'unfinished'
        cd: String, (only if it was print)
        line: Number (Optional)
    }

    default response:
    {
        status: 'success' | 'failure'
        percentage: Number
    }
    unfinished response:
    {
        status: 'success' | 'failure'
        unfinished: {
            exist: True | False,
            (if exist:)
            cd: String,
            line: Number
        }
    }

    DELETE:
    {}{}

    """
    if request.method == 'POST':
        try:
            req = request.json
            action = req['action']
            # status = {'status': 'success', 'status_code': 200}
            percentage = 0

            if action == 'print':
                ''' refresh the  ext board buffer to able get the filament error ''' 
                # printer.ext_board_flush_input_buffer()
                # printer.ext_board_off_A_flag()
                ''' delete last printed back up files '''
                printer.delete_last_print_files()
                # try:
                gcode_file_address = req['cd']
                if printer.base_path in gcode_file_address:
                    gcode_file_address = gcode_file_address[len(
                        printer.base_path)+1:]
                if 'line' in req:
                    printer.start_printing_thread(
                        gcode_dir=gcode_file_address, line=req['line'])
                else:
                    printer.start_printing_thread(gcode_dir=gcode_file_address)
                # except Exception as e:
                #     print('ERROR:', e)
                #     status = {'status': str(e), 'status_code': 500}
            elif action == 'stop':
                printer.stop_printing()
                printer.delete_last_print_files()
                ''' wait until the buffer been free '''
                time.sleep(1)
                printer.release_motors()
                printer.cooldown_hotend()
                printer.cooldown_bed()
                printer.stop_move_up()
            elif action == 'resume':
                printer.resume_printing()
            elif action == 'pause':
                printer.pause_printing()
            elif action == 'percentage':
                percentage = printer.get_percentage()
            elif action == 'unfinished':
                cfup = printer.check_for_unfinished_print()
                if cfup[0] == True:
                    return jsonify({
                        'status': 'success',
                        'unfinished': {
                            'exist': cfup[0],
                            'cd': cfup[1][0],
                            'line': cfup[1][1]
                        }
                    }), 200
                else:
                    return jsonify({'status': 'success', 'unfinished': {'exist': False, 'cd': ''}}), 200
            else:
                raise

            return jsonify({'status': 'success', 'percentage': percentage}), 200
        except Exception as e:
            log.error('ERROR in printing: %s' % e)
            # print('ERROR in printing: ', e)
            return Response(status=500)

    elif request.method == 'DELETE':
        try:
            printer.delete_last_print_files()
            return Response(status=200)
        except Exception as e:
            log.error('ERROR in deleting file (maybe not bad) %s' % e)
            return Response(status=500)
예제 #50
0
    def predict(model_name: str) -> Response:  # pylint: disable=unused-variable
        """make a prediction using the specified model and return the results"""
        if request.method == "OPTIONS":
            return Response(response="", status=200)

        # Do log if no argument is specified
        record_to_database = request.args.get("record",
                                              "true").lower() != "false"

        # Do use the cache if no argument is specified
        use_cache = request.args.get("cache", "true").lower() != "false"

        model = app.predictors.get(model_name.lower())
        if model is None:
            raise ServerError("unknown model: {}".format(model_name),
                              status_code=400)

        data = request.get_json()

        log_blob = {
            "model": model_name,
            "inputs": data,
            "cached": False,
            "outputs": {}
        }

        # Record the number of cache hits before we hit the cache so we can tell whether we hit or not.
        # In theory this could result in false positives.
        pre_hits = _caching_prediction.cache_info().hits  # pylint: disable=no-value-for-parameter

        if use_cache and cache_size > 0:
            # lru_cache insists that all function arguments be hashable,
            # so unfortunately we have to stringify the data.
            prediction = _caching_prediction(model, json.dumps(data))
        else:
            # if cache_size is 0, skip caching altogether
            prediction = model.predict_json(data)

        post_hits = _caching_prediction.cache_info().hits  # pylint: disable=no-value-for-parameter

        if record_to_database and demo_db is not None:
            try:
                perma_id = None
                perma_id = demo_db.add_result(headers=dict(request.headers),
                                              model_name=model_name,
                                              inputs=data,
                                              outputs=prediction)
                if perma_id is not None:
                    slug = int_to_slug(perma_id)
                    prediction["slug"] = slug
                    log_blob["slug"] = slug

            except Exception:  # pylint: disable=broad-except
                # TODO(joelgrus): catch more specific errors
                logger.exception("Unable to add result to database",
                                 exc_info=True)

        if use_cache and post_hits > pre_hits:
            # Cache hit, so insert an artifical pause
            log_blob["cached"] = True
            time.sleep(0.25)

        # The model predictions are extremely verbose, so we only log the most human-readable
        # parts of them.
        if model_name == "machine-comprehension":
            log_blob["outputs"]["best_span_str"] = prediction["best_span_str"]
        elif model_name == "coreference-resolution":
            log_blob["outputs"]["clusters"] = prediction["clusters"]
            log_blob["outputs"]["document"] = prediction["document"]
        elif model_name == "textual-entailment":
            log_blob["outputs"]["label_probs"] = prediction["label_probs"]
        elif model_name == "named-entity-recognition":
            log_blob["outputs"]["tags"] = prediction["tags"]
        elif model_name == "semantic-role-labeling":
            verbs = []
            for verb in prediction["verbs"]:
                # Don't want to log boring verbs with no semantic parses.
                good_tags = [tag for tag in verb["tags"] if tag != "0"]
                if len(good_tags) > 1:
                    verbs.append({
                        "verb": verb["verb"],
                        "description": verb["description"]
                    })
            log_blob["outputs"]["verbs"] = verbs

        elif model_name == "constituency-parsing":
            log_blob["outputs"]["trees"] = prediction["trees"]

        logger.info("prediction: %s", json.dumps(log_blob))

        print(log_blob)

        return jsonify(prediction)
예제 #51
0
def authenticate():
    return Response(
        'Unauthorized', 401,
        {'WWW-Authenticate': 'Basic realm="Login Required"'})
예제 #52
0
    def get(self):
        """
        Get all exchanges.

        INPUT parameters:
          None

        RESPONSE ENTITY BODY:
        * Media type: Mason
          https://github.com/JornWildt/Mason
         * Profile: UPDOWN_User
          /profiles/user_profile

        """
        from_currency=1
        to_currency=1
        date_from=0
        date_to=0
        #extractig URL query parameters
        try:	  
            if  not 'from_currency' in request.args or not request.args['from_currency']:
                pass
            else :
                from_currency = int(request.args['from_currency'])
		
            if  not 'to_currency' in request.args or not request.args['to_currency']:
                pass
            else :
                to_currency=int(request.args['to_currency'])
        
            if  not 'date_from' in request.args or not request.args['date_from']:
                pass
            else :
                date_from=int(request.args['date_from'])

            if  not 'date_to' in request.args or not request.args['date_to']:
                pass
            else :
                date_to=int(request.args['date_to'])
				
        except Exception :
            print("A error accured, try again or contact the admin ")
		
        #Extract users from database
        exchanes_db = g.con.get_exchange(from_currency,to_currency,date_from,date_to)

        envelope = UpdownObject()
        #envelope.add_namespace("updown", LINK_RELATIONS_URL)

        #envelope.add_control("self", href=api.url_for(Users))
        items = envelope["items"] = []
		
        print(exchanes_db)

        	
        for element in exchanes_db:             
            item = UpdownObject(
                 entry_id=element['entry_id'],
                 from_currency=element['from_currency'],
                 to_currency=element['to_currency'],
                 exact_date=element['exact_date'],
                 exchange_rate=element['exchange_rate']
                )
            exchange_id=element["entry_id"]
            #item.add_control("self", href=api.url_for(Users, userid=user['user_id']))
            items.append(item)
        
        '''
        MASON = "application/vnd.mason+json"
        JSON = "application/json"
        FORUM_USER_PROFILE = "/profiles/user-profile/"
        FORUM_CURRENCY_PROFILE = "/profiles/currency-profile/"
        FORUM_CHOICE_PROFILE = "/profiles/choice-profile/"
        FORUM_EXCHANGE_PROFILE = "/profiles/exchange-profile/"
        ERROR_PROFILE = "/profiles/error-profile"
		'''			

        #RENDER
        return Response(json.dumps(envelope), 200, mimetype=MASON+";"+UPDOWN_CURRENCY_PROFILE)
예제 #53
0
def dynamic_update_random_edge():
    req_data = request.get_json()
    values = req_data['values']
    result = graphServices.dynamic_update_random_edge(values)

    return Response(json.dumps(result), mimetype='application/json')
예제 #54
0
def handler():
    return Response(response=json.dumps({"data": "Hello World!"}),
                    content_type='application/json',
                    status=200)
예제 #55
0
def run_algorithm_floyd_warshall():
    req_data = request.get_json()
    values = req_data['values']
    result = graphAlgorithms.run_algorithm_floyd_warshall(values)

    return Response(json.dumps(result), mimetype='application/json')
예제 #56
0
            if Brother(row['Nickname']).read() is None:
                try:
                    bro.create()
                except Exception, e:
                    errors.append(str(e))
            else:
                try:
                    bro.update()
                except Exception, e:
                    errors.append(str(e))
        if errors == []:
            res = 'All rows imported successfully!'
        else:
            errors.insert(0, 'Errors found with this import:')
            res = '\n'.join(errors)
        res = Response(res)
        res.headers['Content-Type'] = 'text/html'
        return res
    else:
        abort(400)


@app.route('/dpl/export/', methods=['GET'])
def downloadCsv():
    si = StringIO()
    writer = csv.writer(si)
    writer.writerow(['Name', 'Nickname', 'Big', 'Year'])
    for row in getAllBrothers():
        writer.writerow([row[1], row[0], row[2], str(row[3])])
    response = make_response(si.getvalue())
    response.headers['Content-Disposition'] = 'attachment; filename=brothers.csv'
예제 #57
0
def validate(data=None,
             schema_id=None,
             filepath=None,
             root=None,
             definition=None,
             specs=None,
             validation_function=None,
             validation_error_handler=None):
    """
    This method is available to use YAML swagger definitions file
    or specs (dict or object) to validate data against its jsonschema.

    example:
        validate({"item": 1}, 'item_schema', 'defs.yml', root=__file__)
        validate(request.json, 'User', specs={'definitions': {'User': ...}})

    :param data: data to validate, by defaull is request.json
    :param schema_id: The definition id to use to validate (from specs)
    :param filepath: definition filepath to load specs
    :param root: root folder (inferred if not provided), unused if path
        starts with `/`
    :param definition: Alias to schema_id (kept for backwards
        compatibility)
    :param specs: load definitions from dict or object passed here
        instead of a file.
    :param validation_function: custom validation function which takes
        the positional arguments: data to be validated at first and
        schema to validate against at second
    :param validation_error_handler: custom function to handle
        exceptions thrown when validating which takes the exception
        thrown as the first, the data being validated as the second and
        the schema being used to validate as the third argument
    """
    schema_id = schema_id or definition

    # for backwards compatibility with function signature
    if filepath is None and specs is None:
        abort(Response('Filepath or specs is needed to validate', status=500))

    should_validate_headers = False

    if data is None:
        should_validate_headers = True
        if request.method == 'GET':
            data = request.args.to_dict()
        else:
            data = request.json  # defaults
    elif callable(data):
        # data=lambda: request.json
        data = data()

    if not data:
        data = {}

    # not used anymore but kept to reuse with marshmallow
    endpoint = request.endpoint.lower().replace('.', '_')
    verb = request.method.lower()

    if filepath is not None:
        swag = yaml_loader.get(filepath, root)
    else:
        swag = copy.deepcopy(specs)

    params = [
        item for item in swag.get('parameters', []) if item.get('schema')
    ]

    raw_definitions = extract_definitions(params, endpoint=endpoint, verb=verb)

    if schema_id is None:
        schema_id = schema_id_for_request(params)

    if schema_id is None:
        # if it is still none use first raw_definition extracted
        if raw_definitions:
            schema_id = raw_definitions[0].get('id')

    main_def = schema_for_id(schema_id, swag, raw_definitions)

    # In GET call, query params are strings.  check type inside string.
    strict_validation = True
    if request.method == 'GET':
        strict_validation = False

    validate_data(data,
                  main_def,
                  validation_function=validation_function,
                  validation_error_handler=validation_error_handler,
                  strict_validation=strict_validation)

    # do not validate headers if data is not None
    if should_validate_headers:
        validate_headers(params, raw_definitions, validation_function,
                         validation_error_handler)
예제 #58
0
def run_algorithm_dijkstra_apsp():
    req_data = request.get_json()
    values = req_data['values']
    result = graphAlgorithms.run_algorithm_dijkstra_apsp(values)

    return Response(json.dumps(result), mimetype='application/json')
예제 #59
0
 def after_request(resp: Response) -> Response:
     if isinstance(resp.get_json(), dict):
         # use replace not json.load/dumps for speed
         resp.data = resp.data.replace(b'{', bytes('{\n"request_id":"%s",' % get_request_id(), 'utf-8'), 1)
     return resp
예제 #60
0
def video_viewer():
    return Response(video_stream(),
                    mimetype='multipart/x-mixed-replace; boundary=frame')