示例#1
0
文件: spup.py 项目: SonyMobile/spuppy
 def __init__(self, params):
     self._debug = params.debug
     self._subsite = params.subsite
     self._logger = Logger("spuppy", "logs", params.debug)
     self._path = params.directory
     self._files = params.files
     self._out = params.out
示例#2
0
文件: upload.py 项目: zzn91/new_gd
def upload_confirm():
    """
    ***确认上传***

    *** 路径 ***
    :
        path: "/api/v1.0.0/upload/upload_confirm"

    **参数 request.json**
    :   {
            upload_type: requirement
            project_id: 100 # 项目id
            requirement_id: 8 # 需求id
            file_name: [name, name, name, ..., name]
        }
    **返回值 **
    :
        {
            "code": 200,
            "msg": "处理中"
        }
    """
    from config import Logger
    try:
        res = UploadController.confirm_upload(request.json)
    except Exception as e:
        Logger.error(e)
    return res
示例#3
0
 def __init__(self, connection_params: Config, logger: Logger):
     self._connection_params = Config(
     ) if connection_params is None else connection_params
     self._logger = Logger() if logger is None else logger
     self._domain = 'esceer.com'
     self._dns_type = 'A'
     self._dns_record_name = '@'
示例#4
0
文件: upload.py 项目: zzn91/new_gd
def upload_page():
    """
    ***上传文件, 文件分页***

    ***可以根据upload_type 进行逻辑判断.***

    *** 路径 /api/v1.0.0/upload/uploads/page***
    :
        path: ""

    **参数 request.json**
    :   
        upload_type:  project # 上传类型
        project_id: 100 # 项目id
        requirement_id: 8 # 需求id
        page: 1
        per_page: 20
    **返回值 **
    :
        {
            "code": 200,
            "msg": "处理中"
        }
    """
    from config import Logger
    try:
        res = UploadController.upload_page(request.json)
    except Exception as e:
        Logger.error(e)
    return res
示例#5
0
def model_file_check():
    if os.path.isfile(Config.IMAGE_MODEL_FILE):
        return
    Logger.info('正在下载模型文件...')
    with requests.get(Config.OCR.get('image_model_url'), stream=True) as r:
        with open(Config.IMAGE_MODEL_FILE, 'wb') as f:
            shutil.copyfileobj(r.raw, f)
            Logger.info('下载成功\n')
示例#6
0
def model_file_check():
    try:
        if os.path.isfile(Config.IMAGE_MODEL_FILE):
            model_hash_verify()
            return
        Logger.info('正在下载模型文件...')
        with requests.get(Config.OCR.get('image_model_url'), stream=True) as r:
            with open(Config.IMAGE_MODEL_FILE, 'wb') as f:
                shutil.copyfileobj(r.raw, f)
                Logger.info('下载成功\n')
                model_hash_verify()
    except InvalidModelException:
        return model_file_check()
示例#7
0
    def get_coordinate(self, img_str):
        # 储存最终坐标结果
        result = ''

        try:
            # 读取并预处理验证码
            img = cv2.imdecode(np.fromstring(img_str, np.uint8),
                               cv2.IMREAD_COLOR)
            text = self.get_text(img)
            images = np.array(list(self._get_imgs(img)))
            images = self.preprocess_input(images)

            label = self.model.predict(text)
            label = label.argmax()
            text = self.texts[label]

            # list放文字
            titles = [text]

            position = []

            # 获取下一个词
            # 根据第一个词的长度来定位第二个词的位置
            if len(text) == 1:
                offset = 27
            elif len(text) == 2:
                offset = 47
            else:
                offset = 60
            text2 = self.get_text(img, offset=offset)
            if text2.mean() < 0.95:
                label = self.model.predict(text2)
                label = label.argmax()
                text2 = self.texts[label]
                titles.append(text2)

            labels = self.code_model.predict(images)
            labels = labels.argmax(axis=1)

            for pos, label in enumerate(labels):
                if self.texts[label] in titles:
                    position.append(pos + 1)

            # 没有识别到结果
            if len(position) == 0:
                return result
            result = position
            Logger.info('识别结果: %s' % result)
        except:
            pass
        return result
示例#8
0
文件: upload.py 项目: zzn91/new_gd
def upload_func():
    """
    ***文件上传***
    
    ***可以根据upload_type 进行逻辑判断.***

    *** 路径 /api/v1.0.0/upload/uploads***
    :
        path: ""

    **参数 body.from-data**
    :   
        upload_type:  project # 上传类型
        project_id: 100 # 项目id
        requirement_id: 8 # 需求id
        files: binary_file # 上传文件
        file_type: png
    **返回值 **
    :
        {
            "code": 200,
            "msg": "处理中"
        }
    """

    try:
        req_dict = request.form.to_dict()
        if req_dict.get("upload_type") in ("project", "requirement"):
            res = UploadController.start_upload(request.files, request.form)
        else:
            headers = {'Referer': 'http://127.0.0.1:8008/15'}
            api_url = request.base_url.split(VERSION)[-1]
            base_url = BASE_API_URL + VERSION
            url = base_url + api_url
            files = {}
            for req_file in request.files.getlist("files"):
                save_path = os.path.join('upload', req_file.filename)
                req_file.save(save_path)
                files = {'files': open(save_path, 'rb')}

            resp = requests.post(url=url,
                                 cookies=request.cookies,
                                 data=request.form,
                                 files=files,
                                 headers=headers)
            resp_data = json.loads(resp.content)
            return jsonify(resp_data)
    except Exception as e:
        Logger.error(e)
    return res
示例#9
0
def model_hash_verify():
    """
    验证模型文件是否完整
    :return:
    """
    mode_md5 = Config.OCR.get('image_model_md5')
    if not mode_md5:
        # 跳过检测
        return
    md5 = file_md5(Config.IMAGE_MODEL_FILE)
    if md5 != mode_md5:
        os.remove(Config.IMAGE_MODEL_FILE)
        Logger.error('模型文件校验失败,正在重新下载')
        raise InvalidModelException()
    return True
示例#10
0
 def __init__(self, base_path, sources):
     self.sources = list(
         map(
             lambda source: sources[source]
             (base_path / source,
              Logger(logging.getLogger(f"test_assistant_crawler.{source}"),
                     ALERT_FUNCTION, ALERT_SETTINGS)), sources))
示例#11
0
class IpUtils:
    def __init__(self, config: Config, logger: Logger):
        self._config = Config() if config is None else config
        self._logger = Logger() if logger is None else logger

    def get_external_ip(self) -> str:
        external_ip = requests.get(self._config.get_ip_provider_url()).text
        self._logger.info('External ip address: %s' % external_ip)
        return external_ip

    @staticmethod
    def gather_ip_from_dns_response(dns_response_content: str) -> str:
        match = re.search(r'\"data\":\"(\d{1,4}\.\d{1,4}\.\d{1,4}\.\d{1,4})\"',
                          dns_response_content)
        if match:
            return match.group(1)
        else:
            raise ValueError('Ip cannot be gathered from dns response: %s' %
                             dns_response_content)
示例#12
0
def _add_task_thread(url, headers, cookies, project_id, obj_info):
    with app.app_context():
        pos_obj_info = {}
        for _index, _info in enumerate(obj_info):
            info = eval(_info)
            info["object_url"] = ''
            info["object_text"] = ''
            pos_obj_info[_index] = str(info)

        data = {"project_id": project_id, "objects_info": pos_obj_info}
        # print(data)
        import time
        # from datetime import datetime
        # print("请求开始时间: %s" % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        # begin_time =time.time()
        res = requests.post(url=url,
                            headers=headers,
                            cookies=cookies,
                            json=data)
        # pivot_time = time.time()
        # print("请求结束时间: %s" % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        # print("请求远端链接, 花费时间: %s" % (pivot_time-begin_time))
        content = eval(res.text)
        code = content.get('code')
        if code == 200:
            data = content.get('data')
            for task_id, obj_url in zip(data, obj_info):
                obj_url = eval(obj_url)
                Task.create(
                    **{
                        "task_id": task_id,
                        "object_url": obj_url.get("object_url", "")
                    })
            # print("写入数据库话费时间: %s" % (time.time()-pivot_time))
            return True
        else:
            Logger.error("存在上传失败的数据段.")
            return False
示例#13
0
class GoDaddyConnector:
    def __init__(self, connection_params: Config, logger: Logger):
        self._connection_params = Config(
        ) if connection_params is None else connection_params
        self._logger = Logger() if logger is None else logger
        self._domain = 'esceer.com'
        self._dns_type = 'A'
        self._dns_record_name = '@'

    def fetch_ip_from_dns(self) -> str:
        self._logger.debug('Fetching current ip set in dns...')
        response = requests.get(self._get_url(), headers=self._get_headers())
        self._logger.debug(response.content)
        return IpUtils.gather_ip_from_dns_response(
            response.content.decode('utf-8'))

    def update_dns(self, target_ip: str) -> str:
        self._logger.debug('Updating dns information...')
        response = requests.put(self._get_url(),
                                data=self._build_new_dns_info(target_ip),
                                headers=self._get_headers())
        self._logger.debug(response.content)
        return response.content

    def _get_url(self) -> str:
        return '%s/v1/domains/%s/records/%s/%s' % (
            self._connection_params.get_godaddy_url_base(), self._domain,
            self._dns_type, self._dns_record_name)

    def _get_headers(self):
        return {
            'Authorization':
            'sso-key %s:%s' % (self._connection_params.get_api_key(),
                               self._connection_params.get_api_secret()),
            'accept':
            'application/json',
            'Content-Type':
            'application/json'
        }

    def _build_new_dns_info(self, target_ip: str):
        return '[{ "data": "%s", "ttl": 3600 }]' % target_ip
示例#14
0
def uploaded_check(uploaded_files, req_form):
    """
    上传文件
    成功数, 失败数, 错误数.
    """

    req_dict = req_form.to_dict()
    up_type = req_dict.get("upload_type")
    project_id = req_dict.get("project_id")
    requirement_id = req_dict.get("requirement_id")

    # 判断需求状态是否允许上传
    # task = PlatformTask.get_instance(requirement_id)
    # status_list = LocalStatus.query.filter_by(category="task").all()
    # accept_status = [item.id for item in status_list if item.en_name in ("active", "nonactivated")]
    #
    # if requirement_id:
    #     if not task:
    #         return jsonify(code=PARAMS_NOT_PROVIDED, msg="无效的需求id")
    #     if task.status not in accept_status:
    #         return jsonify(code=DATA_NOT_FOUND, msg="需求状态, 不允许上传文件")
    #     if task.creator_id != current_user.id:
    #         return jsonify(code=OPERATOR_ERROR, msg="操作非法")
    #     local_status = LocalStatus.query.filter_by(category='task', name='delete').first()
    #     if task.status == local_status:
    #         return jsonify(code=OPERATOR_ERROR, msg="需求已删除")
    #
    # if project_id:
    #     project = Project.get_instance(project_id)
    #     if not project:
    #         return jsonify(code=PARAMS_NOT_PROVIDED, msg="无效的参数")
    #     if current_user.platform != project.platform:
    #         return jsonify(code=OPERATOR_ERROR, msg="非法操作")

    req_dict["check_type"] = "uploaded_check"
    # res = requests.post("check", json=req_dict)

    # 文件缓存隔离级别为: 单个项目/需求, 每一次上传.
    # 合法检查
    from flask import request
    from config.config import VERSION, BASE_API_URL
    headers = get_headers()
    # api_url = request.base_url.split(VERSION)[-1]
    base_url = BASE_API_URL + VERSION
    url = os.path.join(base_url, 'upload/check_legal')
    res = requests.post(url=url,
                        cookies=request.cookies,
                        json=req_dict,
                        headers=headers)
    print('-----------------------------------')
    print(res.text, type(res.text))
    print('-----------------------------------')
    res_json = json.loads(res.text)
    if res_json.get("code") != 200:
        return res

    upload_tmp_file = UPLOAD_TMP_FILE.format(up_type=up_type,
                                             id=(project_id or requirement_id))
    upload_tmp_url = UPLOAD_TMP_URLS.format(up_type=up_type,
                                            id=(project_id or requirement_id))
    upload_tmp_url_index = UPLOAD_TMP_URLS_INDEX.format(up_type=up_type,
                                                        id=(project_id
                                                            or requirement_id))

    # 图片临时存放位置
    local_tmp_path = get_local_tmp_path(up_type, project_id, requirement_id)
    # 文件合法格式
    img_type, file_type, compress_type, _ = get_upload_file_type()
    # 文件成功数, 失败数, 重复数.
    success_file_num, failure_file_num, repeat_file_num = 0, 0, 0
    error_content = []
    repeat_content = []
    for f in uploaded_files:
        file_name = f.filename
        _name, _type = os.path.splitext(file_name)
        ext = _type.strip('.').lower()

        # 名称检查
        is_legal, res_info = check_name(_name)
        if not is_legal:
            error_content.append({"file": file_name, "msg": '文件名称不合法'})
            Logger.error("%s 文件名称不合法" % file_name)
            failure_file_num += 1
            continue
        # 类型检查
        if ext not in (img_type + file_type + compress_type):
            error_content.append({"file": file_name, "msg": '文件后缀不合规'})
            Logger.error("%s 文件后缀不合规" % file_name)
            failure_file_num += 1
            continue

        file_info = xredis.hgetall(upload_tmp_file)
        # 重名检查
        if file_name in list(file_info.keys()):
            error_content.append({"file": file_name, "msg": '文件名重复'})
            Logger.error("%s 文件已存在, 文件名称重复" % file_name)
            repeat_file_num += 1
            continue

        # 存放路径.
        save_path = os.path.join(local_tmp_path, f.filename)
        # 保存文件, 计算md5值, 判断文件是否已存在
        if ext in img_type:
            md5_code = save_ret_img_md5(f, save_path)
        if ext in file_type:
            md5_code = save_ret_file_md5(f, save_path)

        if md5_code not in list(file_info.values()):
            xredis.hset(upload_tmp_file, file_name, md5_code)
        else:
            os.remove(save_path)
            error_content.append({"file": file_name, "msg": '文件已存在, 内容相同'})
            Logger.error("%s 文件已存在, 内容相同" % file_name)
            repeat_file_num += 1
            continue

        # 图片校验
        if ext in img_type:
            res = check_img_format(save_path)
            if res is False:
                os.remove(save_path)
                xredis.hdel(upload_tmp_file, file_name)
                Logger.error("%s 图片过大不合法" % file_name)
                failure_file_num += 1
                continue
            id_ = requirement_id or project_id
            cdn_path = get_cdn_path(req_dict={
                "upload_type": up_type,
                "id": id_
            },
                                    filename=file_name)
            with open(save_path, 'rb') as pic_f:
                upload_img(pic_f, cdn_path)
                cdn_info = {'object_url': cdn_path, 'object_type': "img"}
                xredis.hset(upload_tmp_url, cdn_path, str(cdn_info))
                l_list = {
                    "object_type": "img",
                    "object_url": cdn_path,
                    "object_text": "",
                    "object_key": cdn_path
                }
                xredis.lpush(upload_tmp_url_index, str(l_list))

        # 文件校验
        if ext in file_type:
            res, _, error_urls, repeat_urls = check_file_format(
                save_path, upload_tmp_url, upload_tmp_url_index)
            if res is True:
                error_content.extend(error_urls)
                repeat_content.extend(repeat_urls)
                success_file_num += 1
            else:
                error_content.extend(error_urls)
                repeat_content.extend(repeat_urls)
                failure_file_num += 1
                xredis.hdel(upload_tmp_file, file_name)

    file_list = xredis.hgetall(upload_tmp_file)
    object_info = xredis.lrange(upload_tmp_url_index, 0, 9)
    ret_info = []
    pos = 0
    for info in list(object_info):
        info = eval(info)
        object_type = info.get("object_type")
        if object_type in ("tracking", ):
            object_url = eval(info.get("object_url"))
            main = object_url.get("main", {})
            base_url = main.get("base_url", '')
            pic_1 = main.get("picture", [''])[0]
            object_url = os.path.join(base_url, pic_1)
            info["object_url"] = os.path.join(get_endpoint, object_url)
        else:
            object_url = info.get("object_url")
            if object_url and ("http" not in object_url):
                abs_url = "{0}/{1}".format(get_endpoint, object_url)
                info["object_url"] = abs_url
        info["id"] = pos
        pos += 1
        ret_info.append(info)
    total = xredis.llen(upload_tmp_url_index)
    data = {
        "file_list": list(file_list.keys()),
        "object_urls": ret_info,
        "error_content": error_content,
        "repeat_content": repeat_content,
        "success_file_num": success_file_num,
        "failure_file_num": failure_file_num,
        "repeat_file_num": repeat_file_num
    }
    # 设定缓存过期时间, 1小时.
    xredis.expire(upload_tmp_url, 60 * 60)
    xredis.expire(upload_tmp_url_index, 60 * 60)
    xredis.expire(upload_tmp_file, 60 * 60)
    return jsonify(code=200,
                   msg="文件上传成功",
                   content={
                       "data": data,
                       "total": total
                   })
示例#15
0
# encoding=utf-8

import pymysql

from com.entity.Article import Article
from config import Logger

log = Logger('all.log', level='debug').logger


class ArticleDao:

    # 查找数据
    @staticmethod
    def select_by_article_id(article_id):
        db = pymysql.connect(host="localhost",
                             user="******",
                             password="******",
                             db="keqiao",
                             charset="utf8mb4")
        cursor = db.cursor()
        select_sql = "select sid, article_id, name, url, read_count, discuss_count, spread_count from keqiao_article where article_id = '{}' ".format(
            article_id)
        try:
            cursor.execute(select_sql)
            result = cursor.fetchone()
            if result is None:
                return None
            else:
                sid = result[0]
                article_id = result[1]
示例#16
0
def confirm_process(req_json):
    """确认上传"""
    filename = req_json.get("file_name")

    upload_type = req_json.get("upload_type")
    project_id = req_json.get('project_id')
    requirement_id = req_json.get('requirement_id')
    # if upload_type == "requirement":
    #     obj = PlatformTask.get_instance(id_=requirement_id)
    #     if not obj:
    #         return jsonify(code=DATA_NOT_FOUND, msg="无效的需求id")
    #     if obj.creator_id != current_user.id:
    #         return jsonify(code=OPERATOR_ERROR, msg="非法操作")
    #
    # if upload_type == "project":
    #     obj = Project.get_instance(_id=project_id)
    #     if not obj:
    #         return jsonify(code=DATA_NOT_FOUND, msg="无效的项目id")
    #     if current_user.platform != obj.platform:
    #         return jsonify(code=OPERATOR_ERROR, msg="非法操作")

    # 合法检查
    upload_tmp_file = UPLOAD_TMP_FILE.format(up_type=upload_type,
                                             id=(project_id or requirement_id))
    upload_tmp_url = UPLOAD_TMP_URLS.format(up_type=upload_type,
                                            id=(project_id or requirement_id))
    upload_tmp_url_index = UPLOAD_TMP_URLS_INDEX.format(up_type=upload_type,
                                                        id=(project_id
                                                            or requirement_id))

    req_json["check_type"] = "confirm_process"
    req_json["counts"] = xredis.llen(upload_tmp_url_index)
    # res = requests.post("check", json=req_json)
    # res_json = json.loads(res.text())
    # if res_json.get("code") != 200:
    #     return res
    from flask import request
    from config.config import VERSION, BASE_API_URL
    headers = get_headers()
    # api_url = request.base_url.split(VERSION)[-1]
    base_url = BASE_API_URL + VERSION
    url = os.path.join(base_url, 'upload/check_legal')
    res = requests.post(url=url,
                        cookies=request.cookies,
                        json=req_json,
                        headers=headers)
    print('-----------------------------------')
    print(res.text, type(res.text))
    print('-----------------------------------')
    res_json = json.loads(res.text)
    if res_json.get("code") != 200:
        return res

    local_path = get_local_tmp_path(upload_type, project_id, requirement_id)
    # 类型检查
    img_type, file_type, compress_type, _ = get_upload_file_type()
    # # 确认提交文件存在.
    # if not check_file_exist(local_path, filename):
    #    return jsonify(code=FILE_NOT_EXIST, msg='文件不存在')

    # # 删除不在指定名称列表内文件.
    # delete_not_exist_file(local_path, filename)
    base_dir = os.path.dirname(local_path)

    if upload_type == "requirement":
        # 生成批次, 重命名tmp文件
        # batch = ImgBatch.create(**{"desc": req_json.get("desc", ""),
        #                            "status": BatchImgStatus.WAITING_PUBLISH.value,
        #                            "img_list": "",
        #                            "requirement_id": requirement_id}
        #                             )

        # 更新批次数量
        # success_count = xredis.llen(upload_tmp_url_index)
        # batch.update(**{"counts": success_count})
        # 重命名tmp文件为批次id
        batch_id = res_json.get("data").get("batch_id")
        os.rename(local_path, os.path.join(base_dir, str(batch_id)))
        xredis.delete(upload_tmp_file)
        xredis.delete(upload_tmp_url_index)
        upload_tmp_batch_urls = UPLOAD_TMP_BATCH_URLS.format(
            up_type=upload_type,
            id=(project_id or requirement_id),
            batch_id=batch_id)
        xredis.rename(upload_tmp_url, upload_tmp_batch_urls)

    if upload_type == "project":
        try:
            obj_urls = xredis.hgetall(upload_tmp_url)
            publish_task(project_id, obj_urls)
            xredis.delete(upload_tmp_file)
            xredis.delete(upload_tmp_url)
            xredis.delete(upload_tmp_url_index)
        except Exception as e:
            Logger.error("发布任务异常 %s" % e)
            return jsonify(code=1, msg="发布任务异常")

        # 文件保存, 图片及压缩包删除.
        for name in filename:
            _type = name.split('.')[-1]
            if _type in file_type:
                os.rename(local_path,
                          os.path.join(base_dir, date2str(datetime.now())))

        if os.path.exists(local_path):
            shutil.rmtree(local_path)

    return jsonify(code=200, msg="确认成功")
示例#17
0
import json
import logging.config
from datetime import datetime
from traceback import format_exc as trace

from config import ALERT_FUNCTION, ALERT_SETTINGS, DB_INFO_ALERT_SETTINGS, MONGODB_CONNECTION, LOG_PATH
from config import LOG_CONFIG, Logger

logging.config.fileConfig(LOG_CONFIG)
errors_logger = Logger(logging.getLogger("test_assistant_crawler.collector"),
                       ALERT_FUNCTION, ALERT_SETTINGS)

db_update_logger = Logger(
    logging.getLogger("test_assistant_crawler.db_updater"), ALERT_FUNCTION,
    DB_INFO_ALERT_SETTINGS)


class Collector:
    def __init__(self, base_path, sources):
        self.sources = list(
            map(
                lambda source: sources[source]
                (base_path / source,
                 Logger(logging.getLogger(f"test_assistant_crawler.{source}"),
                        ALERT_FUNCTION, ALERT_SETTINGS)), sources))

    def load_all(self):

        for module in self.sources:
            db_update_logger.info(
                f'Выполняется загрузка данных из источника {module.__name__}',
示例#18
0
def publish_batch(req_json):
    batch_id = req_json.get("img_batch_id")
    # batch_obj = ImgBatch.query.filter_by(id=batch_id).first()
    # if not batch_obj:
    #     return jsonify(code=PARAMS_NOT_PROVIDED, msg="无效的批次id")
    #
    # published_id = LocalStatus.query.filter_by(category="batch",
    #                                            en_name="published").first().id
    # if batch_obj.status == published_id:
    #     return jsonify(code=PARAMS_NOT_PROVIDED, msg="批次已发布")
    #
    # local_tmp_path = get_local_tmp_path("requirement", project_id=None,
    #                                      requirement_id=batch_obj.requirement_id)
    #
    # task = PlatformTask.get_instance(batch_obj.requirement_id)
    # if task.creator_id != current_user.id:
    #     return jsonify(code=OPERATOR_ERROR, msg="操作非法")
    #
    # active_status = LocalStatus.query.filter_by(category="task",
    #                                             name="active").first()
    # if task.status != active_status.id:
    #     return jsonify(code=PARAMS_NOT_PROVIDED, msg="需求未审核通过, 不允许发布")
    #
    # # 需求已发布项目, 更改批次状态为发布.
    # if batch_obj.project_id:
    #     batch_obj.update(**{"status": published_id})
    #     return jsonify(code=200, msg="更新状态成功")
    #
    # # 创建项目
    # protect_status = LocalStatus.query.filter_by(category="project",
    #                                              name="protect").first()
    # if not current_user.platform:
    #     platform = User.query.filter(User.id==current_user.id).first().platform
    # else:
    #     platform = current_user.platform
    # request.json["name"] = task.name  # 项目名称
    # request.json["status"] = protect_status.id  # 项目状态 默认保护
    # request.json["finish_time"] = date2str(datetime.now() + timedelta(7))
    # request.json["requirement_name"] = task.creator.username or task.creator.email
    # request.json["batch_id"] = batch_obj.id
    # request.json["temp_id"] = task.temp_id
    # request.json["platform"] = platform
    # request.json["demand_user_id"] = current_user.id
    #
    # del request.json["img_batch_id"]

    # 创建项目
    # pro_obj = sql_create(Project, ProjectCreateForm)  # 同步执行.
    #
    # data = json.loads(pro_obj.data)
    # project_id = data["data"]["id"]
    #
    # active_status = LocalStatus.query.filter_by(category="batch",
    #                                             name="published").first()
    # batch_obj.update(**{"project_id": project_id, "status": active_status.id})
    from config.config import VERSION, BASE_API_URL
    headers = get_headers()
    base_url = BASE_API_URL + VERSION
    url = os.path.join(base_url, 'upload/check_legal')
    req_json["check_type"] = "publish_batch"
    res = requests.post(url, json=req_json, headers=headers)
    res_json = json.loads(res.text)
    if res_json.get("code") != 200:
        return res
    try:
        project_id = res_json.get("data").get("project_id")
        requirement_id = res_json.get("data").get("requirement_id")
        upload_tmp_batch_url = UPLOAD_TMP_BATCH_URLS.format(
            up_type="requirement", id=requirement_id, batch_id=batch_id)
        obj_urls = xredis.hgetall(upload_tmp_batch_url)
        publish_task(project_id, obj_urls)
        xredis.delete(upload_tmp_batch_url)
    except Exception as e:
        Logger.error("发布任务异常 %s" % e)
        return jsonify(code=1, msg="发布任务异常")
    return jsonify(code=200, msg="批次发布成功")
示例#19
0
 def __init__(self, config: Config, logger: Logger):
     self._config = Config() if config is None else config
     self._logger = Logger() if logger is None else logger
示例#20
0
文件: upload.py 项目: zzn91/new_gd
def delete_path():
    """
    ***删除路径***

    ***可以根据upload_type 进行逻辑判断.***

    *** 路径 /api/v1.0.0/upload/uploads/delete_path***
    :
        path: ""

    **参数 request.json**
    :   
        upload_type:  project # 上传类型
        project_id: 100 # 项目id
        requirement_id: 8 # 需求id
        obj_urls: []
    **返回值 **
    :
        {
            "code": 200,
            "msg": "处理成功"
        }
    """
    from config import Logger
    try:
        res = UploadController.delete_path(request.json)
    except Exception as e:
        Logger.error(e)
    return res


# # todo  保留.
# @upload_bp.route('/upload/upload_delete', methods=['POST'])
# @verify_perm(code='upload_delete_doc')
# def upload_delete():
#     """
#     ***上传(文档/logo)删除***
#
#     *** 路径 ***
#     :
#         path: "/api/v1.0.0/upload/upload_delete"
#
#     **参数 request.json**
#     :   {
#             path: "requirement_doc/2019/05/2275/状态码统计_1559202980.xls"
#             requirement_id: 100
#             platform_id:100
#         }
#     **返回值 **
#     :
#         {
#             "code": 200,
#             "msg": "处理中"
#         }
#     """
#     from config import  Logger
#     try:
#         res = UploadController.delete_cdn_path(request.json)
#     except Exception as e:
#         Logger.error(e)
#     return res
示例#21
0
import sys

from config import Config, Logger
from utils import IpUtils
from web_connector import GoDaddyConnector

if __name__ == '__main__':
    if len(sys.argv) != 2:
        print('Invalid arguments')
        print('Usage:')
        print('update_dns.py <dev|prod>')
        sys.exit(1)

    environment = sys.argv[1]
    config = Config(environment)
    logger = Logger(config)

    ip_utils = IpUtils(config, logger)
    external_ip = ip_utils.get_external_ip()

    go_daddy_connector = GoDaddyConnector(config, logger)
    if external_ip != go_daddy_connector.fetch_ip_from_dns():
        go_daddy_connector.update_dns(external_ip)
示例#22
0
                    default=5,
                    help='Save model checkpoints every k epochs.')
parser.add_argument('--early_stop', type=bool, default=True)
parser.add_argument('--patience', type=int, default=10)
parser.add_argument('--resume', type=bool, default=False)
parser.add_argument('--resume_path',
                    type=str,
                    default='./saved_models/model_best.pt')
parser.add_argument('--log_step', type=int, default=20)

# other
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--config_file', type=str, default='./config.json')
parser.add_argument('--seed', type=int, default=1234)

logger = Logger()

cfg = Config(logger=logger, args=parser.parse_args())
cfg.print_config()
cfg.save_config(cfg.config['config_file'])

torch.manual_seed(cfg.config['seed'])
torch.cuda.manual_seed(cfg.config['seed'])
torch.backends.cudnn.enabled = False
np.random.seed(cfg.config['seed'])

# vocab
vocab = load_vocab('dataset/vocab.txt')
tokenizer = Tokenizer(vocab)

# data_loader
示例#23
0
            return
        if datetime.now().hour == self.timer_list[0]:
            self.num = self.timer_list.pop(0)
            logger.info('Emiter {0} task start...'.format(self.num))
            alert = self.emiter()
            alert.append(self.emiter_item(alert[0]))
            logger.info('Emiter a {0}:{1} {2} alert'.format(
                alert[0], alert[1], alert[2]))
            return alert
        if datetime.now().hour > self.timer_list[0]:
            self.timer_list.pop(0)
            logger.info('Poped one point...')

    def action(self):
        tasks = self.actions
        for task in tasks:
            target = task['action']['host']
            item = task['action']['item']
            stime = task['action']['start_time']
            rtime = task['action']['recovery_time']
            ack = task['action']['ack']
            state = task['action']['state']
            if ack:
                pass


if __name__ == '__main__':
    Logger()
    a = Emitor('config.yml')
    a.start()
示例#24
0
import argparse
import logging.config
import sys

from config import ELASTICSEARCH_CONNECTION, BASE_PATH, DATA_SOURCES, ALERT_FUNCTION, DB_INFO_ALERT_SETTINGS
from config import LOG_CONFIG, Logger
from sources.collector import Collector

logging.config.fileConfig(LOG_CONFIG)
logger = Logger(logging.getLogger("test_assistant_crawler.runner"),
                ALERT_FUNCTION,
                DB_INFO_ALERT_SETTINGS)

if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('--load', action='store_const', const=True)
    parser.add_argument('--save', action='store_const', const=True)
    parser.add_argument('--drop_elastic', action='store_const', const=True)
    parser.add_argument('--migrate', action='store_const', const=True)

    options = parser.parse_args(sys.argv[1:])

    col = Collector(base_path=BASE_PATH, sources=DATA_SOURCES)

    if options.load:
        logger.info('Action --load triggered', alert=True)

        col.load_all()  # TODO добавить какой-нибудь report об окончании загрузки с информацией о результатах

        logger.info('Action --load completed', alert=True)
'''
Created on Aug 9, 2016

@author: root
'''

import config.Logger as logging

log = logging.getLogger()

def extractValueFromStock(stock, value):
    if not value in stock.map:
        raise ValueError("The value " + value + " for symbol " + stock.symbol + " is not valid.")
    else:
        return float(stock.map[value])
        
        
示例#26
0
文件: spup.py 项目: SonyMobile/spuppy
class Uploader:
    """
    Uploads files to Sharepoint.
    """
    def __init__(self, params):
        self._debug = params.debug
        self._subsite = params.subsite
        self._logger = Logger("spuppy", "logs", params.debug)
        self._path = params.directory
        self._files = params.files
        self._out = params.out

    def main(self):
        """
        Orchestrate upload of a file using given configurations.
        """

        spconf = SharepointConfig(self._subsite)
        runtime = RuntimeConfig(self._debug)
        fconf = FilesConfig(self._path, self._files, self._out)
        verify = fconf.verify()

        if self._debug:
            self._logger.debug(spconf)
            self._logger.debug(runtime)
            self._logger.debug(fconf)

        if verify:
            for v in verify:
                self._logger.error(v)
            return

        if self._debug:
            self._logger.debug(fconf)

        try:
            sp = Sharepoint(fconf, spconf, runtime, self._logger)
            sp.get_token()
            if self._debug:
                self._logger.debug(sp)

            if not sp.verify_url():
                return

            if fconf.out_folder and not sp.add_folder():
                return

            sp.upload_files()
        except Exception as e:  # pylint: disable=broad-except
            # This `except` is intentionally broad to capture it in
            # the log for posterity. main exists anyway.
            self._logger.exception(e)