コード例 #1
0
class BaseHandler(web.RequestHandler):

    log = app_log
    executor = futures.ThreadPoolExecutor(8)

    def set_default_headers(self):
        self.set_header("Content-Type", "application/json;charset:utf-8")
コード例 #2
0
 def __init__(self, filename=None):
     self.filename = filename
     self.io_loop = ioloop.IOLoop.current()
     self.executor = futures.ThreadPoolExecutor(8)
     if self.filename and os.path.exists(filename):
         db = json.load(open(filename, 'r'))
         self.collections = db['collections']
     else:
         self.collections = {'maxid': 0}
コード例 #3
0
ファイル: test_paypal.py プロジェクト: ZZITE/Study-Notes
 def setUpClass(cls):
     cls.paypal_client = PayPalClient(
         mode='sandbox',
         client_id=
         'AduMrSVpBpufHUUZN_PNzRf5kBCSBxOKPlFYHDjSv9EbxPrRwXgMmbyaIpA1r5U2bixBi2XtZ7rtNPdU',
         client_secret=
         'EMqbZ1SvnbGRGBMK-SvvEEW4oT9B_0BdOzerpLUKVUcqGSEqfc-edoBP7RwKnzuBlLr-whyDU2EpTTQH'
     )
     cls.executor = futures.ThreadPoolExecutor()
     cls.async_do = cls.executor.submit
コード例 #4
0
 def __init__(self, meta_data, branch='master'):
     self.io_loop = ioloop.IOLoop.current()
     self.executor = futures.ThreadPoolExecutor(8)
     self.meta = meta_data
     self.branch = branch
     self._users = set()
     self._id = self.meta['id']
     self._full_id = ProjectFactory._full_id(self.meta['id'], branch)
     self._repo = repo(os.path.join(conf.workdirs, self._id),
                       self.branch,
                       clone_url=self.meta['repo_url'],
                       branch=self.branch)
     self._wd = os.path.join(conf.workdirs, self._id, self.branch)
コード例 #5
0
class UploadHandler(BaseHandler):
    executor = futures.ThreadPoolExecutor(100)

    @run_on_executor()
    def save_file(self,
                  fileobj,
                  base_dir,
                  filename=None,
                  user=None,
                  is_image=True):
        if not user:
            user = "******"

        upload_path = user + "/" + datetime.datetime.utcnow().strftime(
            "%Y%m%d") + "/"

        # 安全过滤
        base_dir = base_dir.replace("../", "")
        base_dir = re.sub(r"^/+", "", base_dir)
        if not os.path.exists(base_dir + upload_path):
            os.makedirs(base_dir + upload_path)

        if not filename:
            uuidhex = uuid.uuid1().hex
            file_ext = os.path.splitext(fileobj["filename"])[1].lower()
            filename = uuidhex + file_ext

        if not os.path.exists(base_dir + upload_path + filename):
            with open(base_dir + upload_path + filename, "wb") as f:
                f.write(fileobj["body"])
            result = {
                "state": "SUCCESS",
                "url": upload_path + filename,
                "title": filename,
                "original": fileobj["filename"],
            }
            u4Ts.append_file(upload_path + filename, is_image=is_image)
            return result
            # self.write(result)
            # self.finish()

    @gen.coroutine
    def get(self):
        action = self.get_argument("action")
        if action == "config":
            self.write(ueditor_config)
            return

        elif action == u4Ts.config["imageManagerActionName"]:
            start = int(self.get_argument("start"))
            size = int(self.get_argument("size"))
            urls = u4Ts.get_list(start, size, is_image=True)
            result = {
                "state": "SUCCESS",
                "list": urls,
                "start": start,
                "total": len(urls),
            }
            # self.write(result)
            # self.finish()
            return result

        elif action == u4Ts.config["fileManagerActionName"]:
            start = int(self.get_argument("start"))
            size = int(self.get_argument("size"))
            urls = u4Ts.get_list(start, size, is_image=False)
            result = {
                "state": "SUCCESS",
                "list": urls,
                "start": start,
                "total": len(urls),
            }
            self.write(result)
            self.finish()
            return

        self.finish()

    @gen.coroutine
    def post(self):
        data = {}
        action = self.get_argument("action")
        if action == u4Ts.config["imageActionName"]:
            for keys in self.request.files:
                for fileobj in self.request.files[keys]:
                    data = yield self.save_file(
                        base_dir=u4Ts.config["imagePathFormat"],
                        fileobj=fileobj)

        elif action == u4Ts.config["scrawlActionName"]:
            # python2
            # fileobj = {'filename': 'scrawl.png', 'body': base64.decodestring(self.get_argument(u4Ts.config['scrawlFieldName']))}
            # python3
            fileobj = {
                "filename":
                "scrawl.png",
                "body":
                base64.decodebytes(
                    self.get_argument(
                        u4Ts.config["scrawlFieldName"]).encode("utf-8")),
            }
            data = yield self.save_file(
                base_dir=u4Ts.config["scrawlPathFormat"], fileobj=fileobj)

        elif action == u4Ts.config["snapscreenActionName"]:
            for keys in self.request.files:
                for fileobj in self.request.files[keys]:
                    data = yield self.save_file(
                        base_dir=u4Ts.config["snapscreenPathFormat"],
                        fileobj=fileobj)

        elif action == u4Ts.config["videoActionName"]:
            for keys in self.request.files:
                for fileobj in self.request.files[keys]:
                    data = yield self.save_file(
                        base_dir=u4Ts.config["videoPathFormat"],
                        fileobj=fileobj)

        elif action == u4Ts.config["fileActionName"]:
            for keys in self.request.files:
                for fileobj in self.request.files[keys]:
                    data = yield self.save_file(
                        base_dir=u4Ts.config["filePathFormat"],
                        fileobj=fileobj,
                        is_image=False,
                    )
        self.set_header("Content-Type", "text/html")
        self.write(json.dumps(data))
        self.finish()
コード例 #6
0
from tornado import web
from tornado.concurrent import futures

from sqlalchemy import create_engine

from ..libs.paginator import Paginator
from ..libs.utils import import_object
from ..middlewares import MiddlewareProcess
from ..models import Session as DBSession
from ..models import User
from ..models.base import redis_cli
from ..models.sys_config import SysConfig
from ..session import Session

thread_pool = futures.ThreadPoolExecutor()
process_pool = futures.ProcessPoolExecutor()


class BaseHandler(web.RequestHandler):
    """Handler的基类"""
    _db = None
    _redis_cli = None
    _session = None
    _cache_client = None
    thread_pool = thread_pool
    process_pool = process_pool

    @property
    def cache_client(self):
        if not self._cache_client:
コード例 #7
0
 def __init__(self, api):
     self.io_loop = ioloop.IOLoop.current()
     self.executor = futures.ThreadPoolExecutor(8)
     self.api = api
     self.gitolite = Gitolite(conf.gitolite_wd,
                              self.repo_url('gitolite-admin'))
コード例 #8
0
                print("CurrentBuffer:", frameBuffer)
                if len(frameBuffer) < 24:
                    continue
                loop.run_in_executor(
                    None, partial(self.wrappedDecode, frameBuffer, Q))

                status = Q.get()
                frameBuffer = b''
                if status == self.DECODE_SUC:
                    await stream.write(bytes([0x3e]))
                else:
                    await stream.write(bytes([0x6c]))

            except StreamClosedError:
                print("connection closed from {0:s}:{1:d}".format(
                    address[0], address[1]))
                break

            except gen.TimeoutError:
                frameBuffer = b''
                print("No response in 3 seconds {0:s}:{1:d}".format(
                    address[0], address[1]))


loop = IOLoop.current()  #type: IOLoop
loop.set_default_executor(futures.ThreadPoolExecutor(max_workers=10))
server = SSCServer()
print("server listening at 2334")
server.listen('2334')
loop.start()
コード例 #9
0
 def _create_context(self):
     executors = {
         name: futures.ThreadPoolExecutor(threads)
         for name, threads in self.settings["threads"].items()
     }
     return self.context_class(self.settings, executors, **self.objects)
コード例 #10
0
import os
from pypi_server.handlers.base import BaseHandler
from tempfile import NamedTemporaryFile
from pypi_server.db import init_db, DB
from tornado.testing import AsyncHTTPTestCase
from pypi_server.server import create_app
from rest_client. async import RESTClient
from tornado.testing import gen_test
from tornado.httpclient import HTTPRequest, HTTPError
from tornado.concurrent import futures
from tornado.gen import Return, coroutine
import logging

BaseHandler.THREAD_POOL = futures.ThreadPoolExecutor(2)


class TestCase(AsyncHTTPTestCase):
    def setUp(self):
        super(TestCase, self).setUp()

        # Init DB
        self.__db_file = NamedTemporaryFile(mode="r+")

        logging.getLogger("peewee").setLevel(logging.WARNING)
        init_db("sqlite://{0}".format(self.__db_file.name))
        logging.getLogger("peewee").setLevel(logging.DEBUG)

    def get_app(self):
        return create_app(
            secret=os.urandom(32),
            io_loop=self.io_loop,
コード例 #11
0
 def _create_context(self):
     executor = futures.ThreadPoolExecutor((os.cpu_count() or 1) * 5)
     return self.context_class(self.settings, executor, **self.objects)
コード例 #12
0
class PayPalService(object):
    executor = futures.ThreadPoolExecutor()
    paypal_client = PayPalClient(
        mode=Config.PAYPAL_CONFIG['client_info']['mode'],
        client_id=Config.PAYPAL_CONFIG['client_info']['client_id'],
        client_secret=Config.PAYPAL_CONFIG['client_info']['client_secret']
    )

    @classmethod
    @run_on_executor
    def checkout(cls, size, quantity, total):
        """用户结账"""
        # 查找是否web profile对象已经创建
        web_profile_name = Config.PAYPAL_CONFIG['webprofile']
        web_profile_id = (cls.paypal_client
                             ._traverse_web_profile_id(web_profile_name))
        if web_profile_id is None:
            # 创建新的web profile对象
            web_profile_info = (cls.paypal_client
                                   ._web_profile_create(name=web_profile_name))
            if web_profile_info['error'] != 0:      # 发生错误
                return web_profile_info
            web_profile_obj = web_profile_info['obj']
            web_profile_id = web_profile_obj.id

        transaction_info = cls._get_transactions_info(size, quantity, total)
        payment_info = {
            "payer": {
                "payment_method": "paypal"
            },
            "redirect_urls": {
                "return_url": Config.PAYPAL_CONFIG['redirect_urls']
                                                  ['return_url'],
                "cancel_url": Config.PAYPAL_CONFIG['redirect_urls']
                                                  ['cancel_url']
            },
            "transactions": transaction_info,
            "experience_profile_id": web_profile_id
        }
        # 创建payment对象
        payment_obj = cls.paypal_client._payment_create(**payment_info)
        # 发送payment对象
        status = cls.paypal_client._payment_sent(payment_obj)
        if status['error'] != 0:    # 发生错误
            return status
        # 获取重定向URL
        redirect_url = cls.paypal_client._payment_redirect(payment_obj)
        status['redirect_url'] = redirect_url
        status['payment_id'] = payment_obj['id']
        return status

    @classmethod
    @run_on_executor
    def order_execute(cls, payment_id, payer_id):
        """这是一个支付执行的代理函数,在它的上面加上了@run_on_executor装饰器"""
        result = cls.paypal_client._payment_execute(payment_id=payment_id,
                                                    payer_id=payer_id)
        return result

    @classmethod
    def _get_transactions_info(cls, size, quantity, total):
        """生成交易信息"""
        transactions = [{
            "item_list": {
                "items": [{
                    "name": "legging " + str(size),
                    "sku": "LG001",
                    "price": "19.99",
                    "currency": "USD",
                    "quantity": quantity
                }]},
            "amount": {
                "total": str(total),
                "currency": "USD"
            },
            "description": "niceline legging"
        }]
        return transactions

    @classmethod
    def _get_total_price(cls, quantity, unit_price='19.99'):
        total_price = Decimal(unit_price) * quantity
        return total_price
コード例 #13
0
ファイル: snippet.py プロジェクト: someburner/GistsHub
 def __init__(self, loop=None):
     self.executor = futures.ThreadPoolExecutor(4)
     self.loop = loop or IOLoop.instance()
コード例 #14
0
 def __init__(self, loop=None):
     self.executor = futures.ThreadPoolExecutor(60)
     self.loop = loop or tornado.ioloop.IOLoop.current()
コード例 #15
0
ファイル: rpc_server.py プロジェクト: Sergei-vb/coralline-rpc
 def __init__(self, application, request, **kwargs):
     super().__init__(application, request, **kwargs)
     self.receiver = None
     self.executor = futures.ThreadPoolExecutor(max_workers=4)
コード例 #16
0
ファイル: S3Storage.py プロジェクト: sweetpand/streamlit.io
class S3Storage(AbstractStorage):
    """Class to handle S3 uploads."""

    executor = futures.ThreadPoolExecutor(5)

    def __init__(self):
        """Constructor."""
        super(S3Storage, self).__init__()

        # For now don't enable verbose boto logs
        # TODO(armando): Make this configurable.
        log = logging.getLogger("botocore")
        log.propagate = False

        assert (config.get_option("global.sharingMode") !=
                "off"), 'Sharing is disabled. See "global.sharingMode".'

        self._bucketname = config.get_option("s3.bucket")
        self._url = config.get_option("s3.url")
        self._key_prefix = config.get_option("s3.keyPrefix")
        self._region = config.get_option("s3.region")

        user = os.getenv("USER", None)

        if self._url and "{USER}" in self._url:
            self._url = self._url.replace("{USER}", user)
        if self._key_prefix and "{USER}" in self._key_prefix:
            self._key_prefix = self._key_prefix.replace("{USER}", user)

        # URL where browsers go to load the Streamlit web app.
        self._web_app_url = None

        if not self._url:
            self._web_app_url = os.path.join(
                "https://%s.%s" % (self._bucketname, "s3.amazonaws.com"),
                self._s3_key("index.html"),
            )
        else:
            self._web_app_url = os.path.join(
                self._url, self._s3_key("index.html", add_prefix=False))

        aws_profile = config.get_option("s3.profile")
        access_key_id = config.get_option("s3.accessKeyId")
        secret_access_key = config.get_option("s3.secretAccessKey")

        if aws_profile is not None:
            LOGGER.debug('Using AWS profile "%s".', aws_profile)
            self._s3_client = boto3.Session(
                profile_name=aws_profile).client("s3")
        elif access_key_id is not None and secret_access_key is not None:
            self._s3_client = boto3.client(
                "s3",
                aws_access_key_id=access_key_id,
                aws_secret_access_key=secret_access_key,
            )
        else:
            LOGGER.debug("Using default AWS profile.")
            self._s3_client = boto3.client("s3")

    @run_on_executor
    def _get_static_upload_files(self):
        """Return a list of static files to upload.

        Returns an empty list if the files are already uploaded.
        """
        try:
            self._s3_client.head_object(Bucket=self._bucketname,
                                        Key=self._s3_key("index.html"))
            return []
        except botocore.exceptions.ClientError:
            return list(self._static_files)

    @run_on_executor
    def _bucket_exists(self):
        # THIS DOES NOT WORK because the aws exception isn't being
        # caught and disappearing.
        try:
            self._s3_client.head_bucket(Bucket=self._bucketname)
        except botocore.exceptions.ClientError as e:
            LOGGER.warning(
                '"%s" bucket not found. Do you have s3:HeadBucket permission?',
                self._bucketname,
            )
            LOGGER.warning(e)
            return False
        return True

    @run_on_executor
    def _create_bucket(self):
        LOGGER.debug('Attempting to create "%s" bucket', self._bucketname)
        self._s3_client.create_bucket(
            ACL="public-read",
            Bucket=self._bucketname,
            CreateBucketConfiguration={"LocationConstraint": self._region},
        )
        LOGGER.debug('"%s" bucket created', self._bucketname)

    @gen.coroutine
    def _s3_init(self):
        """Initialize s3 bucket."""
        try:
            bucket_exists = yield self._bucket_exists()
            if not bucket_exists:
                LOGGER.warning("Will attempt to create bucket")
                yield self._create_bucket()

        except botocore.exceptions.NoCredentialsError:
            LOGGER.error(
                'Please set "AWS_ACCESS_KEY_ID" and "AWS_SECRET_ACCESS_KEY" '
                "environment variables")
            raise errors.S3NoCredentials

    def _s3_key(self, relative_path, add_prefix=True):
        """Convert a local file path into an s3 key (ie path)."""
        key = os.path.join(self._release_hash, relative_path)
        if add_prefix:
            key = os.path.join(self._key_prefix, key)
        return os.path.normpath(key)

    @gen.coroutine
    def _save_report_files(self,
                           report_id,
                           files,
                           progress_coroutine=None,
                           manifest_save_order=None):
        """Save files related to a given report.

        See AbstractStorage for docs.
        """
        LOGGER.debug("Saving report %s", report_id)
        yield self._s3_init()
        static_files = yield self._get_static_upload_files()
        files_to_upload = static_files + files

        if manifest_save_order is not None:
            manifest_index = None
            manifest_tuple = None
            for i, file_tuple in enumerate(files_to_upload):
                if file_tuple[0] == "manifest.json":
                    manifest_index = i
                    manifest_tuple = file_tuple
                    break

            if manifest_tuple:
                files_to_upload.pop(manifest_index)

                if manifest_save_order == "first":
                    files_to_upload.insert(0, manifest_tuple)
                else:
                    files_to_upload.append(manifest_tuple)

        yield self._s3_upload_files(files_to_upload, progress_coroutine)

        raise gen.Return("%s?id=%s" % (self._web_app_url, report_id))

    @gen.coroutine
    def _s3_upload_files(self, files, progress_coroutine):
        set_private_acl = config.get_option("s3.requireLoginToView")
        for i, (path, data) in enumerate(files):
            mime_type = mimetypes.guess_type(path)[0]
            if not mime_type:
                mime_type = "application/octet-stream"
            if set_private_acl and path.startswith("report"):
                acl = "private"
            else:
                acl = "public-read"
            self._s3_client.put_object(
                Bucket=self._bucketname,
                Body=data,
                Key=self._s3_key(path),
                ContentType=mime_type,
                ACL=acl,
            )
            LOGGER.debug('Uploaded: "%s"', path)

            if progress_coroutine:
                yield progress_coroutine(math.ceil(100 * (i + 1) / len(files)))
            else:
                yield
コード例 #17
0
ファイル: utils.py プロジェクト: jorviizheng/tornado-aws
class AsyncHTTPTestCase(testing.AsyncHTTPTestCase):

    executor = futures.ThreadPoolExecutor(10)

    def get_app(self):
        return web.Application([(r'/(.*)', RequestHandler)])
コード例 #18
0
def run():
    options.parse_command_line()

    if options.config:
        options.parse_config_file(options.config)

    options.storage = os.path.abspath(options.storage)

    if os.getuid() == 0 and options.user:
        pw = pwd.getpwnam(options.user)
        uid, gid = pw.pw_uid, pw.pw_gid
        log.info("Changind user to %s [%s:%s]", options.user, uid, gid)
        os.setgid(uid)
        os.setuid(uid)

    try:
        if not all(f(options.storage) for f in (os.path.exists, os.path.isdir)):
            log.info('Creating new package storage directory: "%s"', options.storage)
            os.makedirs(options.storage)

        def on_interrupt(*args):
            log.warning("Receiving interrupt signal. Application will be stopped.")
            exit(errno.EINTR)

        log.debug("Preparing signal handling")
        for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGQUIT):
            signal.signal(sig, on_interrupt)

        def handle_pdb(sig, frame):
            import pdb
            pdb.Pdb().set_trace(frame)

        if options.debug:
            signal.signal(signal.SIGUSR2, handle_pdb)

        log.debug("Creating application instance")
        app = create_app(
            options.debug,
            options.secret,
            options.gzip,
        )

        log.debug("Creating IOLoop instance.")
        io_loop = IOLoop.current()

        io_loop.run_sync(lambda: init_db(options.database))

        if not (os.path.exists(options.cache_dir) and os.path.isdir(options.cache_dir)):
            os.makedirs(options.cache_dir)

        Cache.CACHE_DIR = options.cache_dir

        log.info("Init thread pool with %d threads", options.pool_size)
        handlers.base.BaseHandler.THREAD_POOL = futures.ThreadPoolExecutor(options.pool_size)

        AsyncHTTPClient.configure(None, max_clients=options.max_http_clients)

        proxy_url = URL(os.getenv('{0}_proxy'.format(options.pypi_server.scheme)))
        if proxy_url:
            log.debug("Configuring for proxy: %s", proxy_url)
            AsyncHTTPClient.configure(
                    'tornado.curl_httpclient.CurlAsyncHTTPClient',
                    defaults={
                        'proxy_host': proxy_url.host,
                        'proxy_port': proxy_url.port,
                        'proxy_username': proxy_url.user,
                        'proxy_password': proxy_url.password,
                        }
                    )

        PYPIClient.configure(
            options.pypi_server,
            handlers.base.BaseHandler.THREAD_POOL
        )

        if options.pypi_proxy:
            pypi_updater = PeriodicCallback(PYPIClient.packages, HOUR * 1000, io_loop)

            io_loop.add_callback(PYPIClient.packages)
            io_loop.add_callback(pypi_updater.start)

        log.info("Starting server http://%s:%d/", options.address, options.port)
        http_server = HTTPServer(app, xheaders=options.proxy_mode)
        http_server.listen(options.port, address=options.address)

        log.debug('Setting "%s" as storage', options.storage)
        PackageFile.set_storage(options.storage)

        log.debug("Starting main loop")
        io_loop.start()
    except Exception as e:
        log.fatal("Exception on main loop:")
        log.exception(e)
        exit(1)
    else:
        exit(0)
コード例 #19
0
class UploadHandler(BaseHandler):
    executor = futures.ThreadPoolExecutor(100)

    @run_on_executor()
    def save_file(self, fileobj, base_dir, filename=None, user=None, is_image=True):
        if not user:
            user = '******'

        upload_path = user + '/' + datetime.datetime.utcnow().strftime('%Y%m%d') + '/'

        # 安全过滤
        base_dir = base_dir.replace('../', '')
        base_dir = re.sub(r'^/+', '', base_dir)
        if not os.path.exists(base_dir + upload_path):
            os.makedirs(base_dir + upload_path)

        if not filename:
            uuidhex = uuid.uuid1().hex
            file_ext = os.path.splitext(fileobj['filename'])[1].lower()
            filename = uuidhex + file_ext

        if not os.path.exists(base_dir + upload_path + filename):
            with open(base_dir + upload_path + filename, 'wb') as f:
                f.write(fileobj['body'])
            result = {
                'state': 'SUCCESS',
                'url': upload_path + filename,
                'title': filename,
                'original': fileobj['filename'],
            }
            u4Ts.append_file(upload_path + filename, is_image=is_image)
            return result
            # self.write(result)
            # self.finish()

    @gen.coroutine
    def get(self):
        action = self.get_argument('action')
        if action == 'config':
            self.write(ueditor_config)
            return

        elif action == u4Ts.config['imageManagerActionName']:
            start = int(self.get_argument('start'))
            size = int(self.get_argument('size'))
            urls = u4Ts.get_list(start, size, is_image=True)
            result = {
                'state': 'SUCCESS',
                'list': urls,
                'start': start,
                'total': len(urls)
            }
            # self.write(result)
            # self.finish()
            return result

        elif action == u4Ts.config['fileManagerActionName']:
            start = int(self.get_argument('start'))
            size = int(self.get_argument('size'))
            urls = u4Ts.get_list(start, size, is_image=False)
            result = {
                'state': 'SUCCESS',
                'list': urls,
                'start': start,
                'total': len(urls)
            }
            self.write(result)
            self.finish()
            return

        self.finish()

    @gen.coroutine
    def post(self):
        data = {}
        action = self.get_argument('action')
        if action == u4Ts.config['imageActionName']:
            for keys in self.request.files:
                for fileobj in self.request.files[keys]:
                    data = yield self.save_file(base_dir=u4Ts.config['imagePathFormat'], fileobj=fileobj)

        elif action == u4Ts.config['scrawlActionName']:
            # python2
            # fileobj = {'filename': 'scrawl.png', 'body': base64.decodestring(self.get_argument(u4Ts.config['scrawlFieldName']))}
            # python3
            fileobj = {'filename': 'scrawl.png',
                       'body': base64.decodebytes(self.get_argument(u4Ts.config['scrawlFieldName']).encode('utf-8'))}
            data = yield self.save_file(base_dir=u4Ts.config['scrawlPathFormat'], fileobj=fileobj)

        elif action == u4Ts.config['snapscreenActionName']:
            for keys in self.request.files:
                for fileobj in self.request.files[keys]:
                    data = yield self.save_file(base_dir=u4Ts.config['snapscreenPathFormat'], fileobj=fileobj)

        elif action == u4Ts.config['videoActionName']:
            for keys in self.request.files:
                for fileobj in self.request.files[keys]:
                    data = yield self.save_file(base_dir=u4Ts.config['videoPathFormat'], fileobj=fileobj)

        elif action == u4Ts.config['fileActionName']:
            for keys in self.request.files:
                for fileobj in self.request.files[keys]:
                    data = yield self.save_file(base_dir=u4Ts.config['filePathFormat'], fileobj=fileobj, is_image=False)
        self.set_header("Content-Type", "text/html")
        self.write(json.dumps(data))
        self.finish()
コード例 #20
0
ファイル: upload.py プロジェクト: solidworks1210/JustDemo
class UploadHandler(BaseHandler):
    '''文件上传处理'''

    executor = futures.ThreadPoolExecutor(2048)

    @authenticated
    @tornado.web.asynchronous
    @tornado.gen.coroutine
    def post(self, *args, **kwargs):
        utype = self.get_argument('utype', 'normal').encode('UTF-8')
        post_data = {}
        for key in self.request.arguments:
            post_data[key] = self.get_arguments(key)
        if utype == 'excel':
            yield self.savexcel()
        elif utype == 'icloud':
            yield self.saveicloud()
        else:
            yield self.savenormal()

    @run_on_executor
    def savenormal(self):
        '''保存图片'''
        width = int(self.get_argument('width', 500))
        height = int(self.get_argument('height', 500))
        quality = int(self.get_argument('quality', 80))

        #取NGINX传来的文件参数
        file_name = self.get_argument('last-calls_name', 'uploaded.file')
        file_content_type = self.get_argument('last-calls_content_type', "")
        # file_size = self.get_argument('file_size', -1)
        file_md5 = self.get_argument('last-calls_md5', '')
        file_path = self.get_argument('last-calls_path', '').encode('UTF-8')

        # 基础信息验证
        if len(file_name) < 1 or len(file_content_type) < 1 or len(
                file_md5) < 6 or len(file_path) < 1:
            if len(file_path) > 1:
                os.remove(file_path)
            self.write({
                "state": state.API_FAIL[0],
                "msg": state.API_FAIL[1],
                "result": {}
            })
            return
        #判断文件类型
        if not file_content_type or (file_content_type != "image/jpeg"
                                     and file_content_type != "image/png"):
            os.remove(file_path)
            self.write({
                "state": state.API_ERROR_FILE_FORMANT[0],
                "msg": state.API_ERROR_FILE_FORMANT[1],
                "result": {}
            })
            return
        tmp_file_path = hand_photo(file_md5, file_path, width, height, quality)

        if tmp_file_path:
            new_file_name = os.path.basename(tmp_file_path)
            ret_path = os.path.join("/static/upload/tmp", new_file_name)
            self.write({
                "state": state.WEB_SUCCESS[0],
                "msg": state.WEB_SUCCESS[1],
                "result": ret_path
            })

        else:
            self.write({
                "state": state.WEB_FAIL[0],
                "msg": state.WEB_FAIL[1],
                "result": {}
            })

    @run_on_executor
    def savexcel(self):
        '''保存文件'''

        #取NGINX传来的文件参数
        file_name = self.get_argument('last-calls_name', 'uploaded.file')
        file_content_type = self.get_argument('last-calls_content_type', "")
        # file_size = self.get_argument('file_size', -1)
        file_md5 = self.get_argument('last-calls_md5', '')
        file_path = self.get_argument('last-calls_path', '').encode('UTF-8')

        # 基础信息验证
        if len(file_name) < 1 or len(file_content_type) < 1 or len(
                file_md5) < 6 or len(file_path) < 1:
            if len(file_path) > 1:
                os.remove(file_path)
            self.write({
                "state": state.WEB_FAIL_PARAMS[0],
                "msg": state.WEB_FAIL_PARAMS[1],
                "result": {}
            })
            return
        #判断文件类型
        if not file_content_type or (
                file_content_type !=
                "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
                and file_content_type != "application/octet-stream"):
            os.remove(file_path)
            self.write({
                "state": state.WEB_ERROR_FILE_FORMANT[0],
                "msg": state.WEB_ERROR_FILE_FORMANT[1],
                "result": {}
            })
            return
        calls_tuples = excel.read_excel_tuple(file_path)
        try:
            os.remove(file_path)
        except IOError as ex:
            app_log(ex)
        if calls_tuples:

            timeStartIndex = -1
            calltimeIndex = -1
            callTypeIndex = -1
            phoneIndex = -1
            localAddIndex = -1
            desAddIndex = -1

            if calls_tuples[0].count("通话起始时间") > 0:
                timeStartIndex = calls_tuples[0].index("通话起始时间")
            if calls_tuples[0].count("通话时长") > 0:
                calltimeIndex = calls_tuples[0].index("通话时长")
            if calls_tuples[0].count("呼叫类型") > 0:
                callTypeIndex = calls_tuples[0].index("呼叫类型")
            if calls_tuples[0].count("对方号码") > 0:
                phoneIndex = calls_tuples[0].index("对方号码")
            if calls_tuples[0].count("本机通话地") > 0:
                localAddIndex = calls_tuples[0].index("本机通话地")
            if calls_tuples[0].count("对方归属地") > 0:
                desAddIndex = calls_tuples[0].index("对方归属地")

            if timeStartIndex < 0 or calltimeIndex < 0 or callTypeIndex < 0 or phoneIndex < 0 or localAddIndex < 0 or desAddIndex < 0:
                self.write({
                    "state": state.WEB_FAIL[0],
                    "msg": "格式错误",
                    "result": calls_tuples
                })
                return

            calls = []
            del calls_tuples[0]
            for row in calls_tuples:
                new_row = (row[timeStartIndex], row[calltimeIndex],
                           row[callTypeIndex], row[phoneIndex],
                           row[localAddIndex], row[desAddIndex])
                calls.append(new_row)

            self.write({
                "state": state.WEB_SUCCESS[0],
                "msg": state.WEB_SUCCESS[1],
                "result": calls
            })
        else:
            self.write({
                "state": state.WEB_FAIL[0],
                "msg": state.WEB_FAIL[1],
                "result": {}
            })

    @run_on_executor
    def saveicloud(self):
        '''保存icloud'''

        #取NGINX传来的文件参数
        file_name = self.get_argument('last-calls_name', 'uploaded.file')
        file_content_type = self.get_argument('last-calls_content_type', "")
        # file_size = self.get_argument('file_size', -1)
        file_md5 = self.get_argument('last-calls_md5', '')
        file_path = self.get_argument('last-calls_path', '').encode('UTF-8')

        # 基础信息验证
        if len(file_name) < 1 or len(file_content_type) < 1 or len(
                file_md5) < 6 or len(file_path) < 1:
            if len(file_path) > 1:
                os.remove(file_path)
            self.write({
                "state": state.WEB_FAIL_PARAMS[0],
                "msg": state.WEB_FAIL_PARAMS[1],
                "result": {}
            })
            return
        #判断文件类型
        if not file_content_type or (
                file_content_type !=
                "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
                and file_content_type != "application/octet-stream"):
            os.remove(file_path)
            self.write({
                "state": state.WEB_ERROR_FILE_FORMANT[0],
                "msg": state.WEB_ERROR_FILE_FORMANT[1],
                "result": {}
            })
            return
        calls_tuples = excel.read_excel_tuple(file_path)
        try:
            os.remove(file_path)
        except IOError as ex:
            app_log(ex)
        if calls_tuples:
            del calls_tuples[0]

            self.write({
                "state": state.WEB_SUCCESS[0],
                "msg": state.WEB_SUCCESS[1],
                "result": calls_tuples
            })
        else:
            self.write({
                "state": state.WEB_FAIL[0],
                "msg": state.WEB_FAIL[1],
                "result": {}
            })