def main(project_name):

    logger = Logger('_02_valid_model_{}'.format(project_name))
    logger.info('=' * 50)

    model_path = '_model/embedding_model_{}.pt'.format(project_name)
    logger.info('load model from {}'.format(model_path))
    model = torch.load(model_path)

    evaluator = Evaluator()
    evaluator.evaluate(model)
Пример #2
0
def main(project_name):

    tic = time.time()

    logger = Logger('_03_embed_index_{}'.format(project_name))
    logger.info('=' * 50)

    model_path = '_model/embedding_model_{}.pt'.format(project_name)
    logger.info('load model from {}'.format(model_path))
    model = torch.load(model_path)
    model.eval()

    dir_target = '../../input_large_delf/index'
    embedder = ImgEmbedder(model, dir_target)

    f = 512
    t = AnnoyIndex(f, metric='euclidean')

    target_files = os.listdir(dir_target)

    print(len(os.listdir(dir_target)))
    print(len(os.listdir('../../input/index')))

    assert len(os.listdir(dir_target)) == len(os.listdir('../../input/index'))
    assert len(target_files) == len([
        target_file for target_file in target_files
        if target_file[-5:] == '.delf'
    ])

    num_index = len(target_files)

    index_names = list()

    logger.info('===> embed index images')

    for i in tqdm(range(num_index)):

        target_file = target_files[i]
        index_names.append(target_file[:-5])

        # for p in range(3):
        p = 0
        img_feature = embedder.get_vector(target_file[:-5], pos=p)
        t.add_item(i + num_index * p, img_feature.tolist())

    dir_index = '_embed_index'
    os.makedirs(dir_index, exist_ok=True)

    with open(
            os.path.join(dir_index,
                         'index_names_{}.json'.format(project_name)),
            'w') as f:
        json.dump(index_names, f)

    t.build(100)
    t.save(
        os.path.join(dir_index, 'index_features_{}.ann'.format(project_name)))

    toc = time.time() - tic
    logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
Пример #3
0
def main(project_name):

    tic = time.time()

    logger = Logger('_03_embed_index_{}'.format(project_name))
    logger.info('=' * 50)

    model_path = '_model/embedding_model_{}.pt'.format(project_name)
    logger.info('load model from {}'.format(model_path))
    model = torch.load(model_path)
    model.eval()

    dir_target = '../../input_large_delf/index'
    logger.info('head of target files: {}' .format(str(os.listdir(dir_target)[:5])))

    embedder = ImgEmbedder(model, dir_target)

    f = 512
    t = AnnoyIndex(f, metric='euclidean')

    target_files = os.listdir(dir_target)

    # num_index = 1000
    # warnings.warn('[!] debug mode')
    num_index = len(target_files)

    index_names = list()

    logger.info('===> embed index images')

    for i in tqdm(range(num_index)):

        target_file = target_files[i]
        index_names.append(target_file[:-5])

        img_feature = embedder.get_vector(target_file)
        t.add_item(i, img_feature.tolist())

    dir_index = '_embed_index'
    os.makedirs(dir_index, exist_ok=True)

    with open(os.path.join(dir_index, 'index_names_{}.json'.format(project_name)), 'w') as f:
        json.dump(index_names, f)

    t.build(100)
    t.save(os.path.join(dir_index, 'index_features_{}.ann'.format(project_name)))

    toc = time.time() - tic
    logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
Пример #4
0
def main(project_name):

    tic = time.time()

    logger = Logger('_04_embed_test_{}'.format(project_name))
    logger.info('=' * 50)

    model_path = '_model/embedding_model_{}.pt'.format(project_name)
    logger.info('load model from {}'.format(model_path))
    model = torch.load(model_path)
    model.eval()

    dir_target = '../../input/test'
    embedder = ImgEmbedder(model, dir_target)

    f = 512
    t = AnnoyIndex(f, metric='euclidean')

    target_files = os.listdir(dir_target)

    num_index = len(target_files)

    index_names = list()

    logger.info('===> embed index images')

    for i in tqdm(range(num_index)):

        target_file = target_files[i]
        index_names.append(target_file[:-4])

        t.add_item(i, (sum(
            embedder.get_vector(target_file[:-4], pos=p)
            for p in range(3)) / 3.0).tolist())

    dir_index = '_embed_index'
    os.makedirs(dir_index, exist_ok=True)

    with open(
            os.path.join(dir_index, 'test_names_{}.json'.format(project_name)),
            'w') as f:
        json.dump(index_names, f)

    t.build(100)
    t.save(os.path.join(dir_index,
                        'test_features_{}.ann'.format(project_name)))

    toc = time.time() - tic
    logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
Пример #5
0
def main(project_name, aux_projext_name):

    logger = Logger('_03_make_submission_{}'.format(project_name))
    logger.info('=' * 50)

    tic = time.time()

    project_name = project_name + '_' + aux_projext_name

    sample_submission = pd.read_csv('../../dataset/sample_submission.csv')

    images = list()

    test_id_list = sample_submission.id

    logger.info('===> embed test images and get nearest neighbors')

    manager = Manager()
    return_dict = manager.dict()

    num_processor = 8

    l = [(len(test_id_list) + i) // num_processor for i in range(num_processor)]
    processor_target = 0

    list_processors = list()

    for p in range(num_processor):

        pr = Process(target=process,
                     args=(project_name, test_id_list[processor_target:processor_target+l[p]], p, return_dict))

        list_processors.append(pr)
        processor_target += l[p]

    for p in range(num_processor):
        list_processors[p].start()

    for p in range(num_processor):
        list_processors[p].join()

    for p in range(num_processor):
        images.extend(return_dict[p])

    assert len(images) == len(test_id_list)

    submission = pd.DataFrame(test_id_list, columns=['id'])
    submission['images'] = images

    output_path = '../../submission/submission_{}.csv'.format(project_name)
    submission.to_csv(output_path, index=False)

    toc = time.time() - tic
    logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
def main(project_name, aux_projext_name):

    tic = time.time()

    logger = Logger('_01_embed_index_{}'.format(project_name))
    logger.info('=' * 50)

    dir_prj = os.path.join('..', project_name[:-7])
    dir_aux = os.path.join('..', aux_projext_name[:-7])

    with open(
            os.path.join(dir_prj, '_embed_index',
                         'index_names_{}.json'.format(project_name)),
            'r') as f:
        prj_index_names = json.load(f)

    with open(
            os.path.join(dir_aux, '_embed_index',
                         'index_names_{}.json'.format(aux_projext_name)),
            'r') as f:
        aux_index_names = json.load(f)

    prj_u = AnnoyIndex(512, metric='euclidean')
    prj_u.load(
        os.path.join(dir_prj, '_embed_index',
                     'index_features_{}.ann'.format(project_name)))

    aux_u = AnnoyIndex(512, metric='euclidean')
    aux_u.load(
        os.path.join(dir_aux, '_embed_index',
                     'index_features_{}.ann'.format(aux_projext_name)))

    logger.info('===> embed index images')

    index_names, t = merge_index(prj_index_names, prj_u, aux_index_names,
                                 aux_u)

    dir_index = '_embed_index'
    os.makedirs(dir_index, exist_ok=True)

    new_prj_name = project_name + '_' + aux_projext_name

    with open(
            os.path.join(dir_index,
                         'index_names_{}.json'.format(new_prj_name)),
            'w') as f:
        json.dump(index_names, f)

    t.build(100)
    t.save(
        os.path.join(dir_index, 'index_features_{}.ann'.format(new_prj_name)))

    toc = time.time() - tic
    logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
def main(project_name):

    tic = time.time()

    logger = Logger('_01_training_{}'.format(project_name))

    logger.info('==> initialize model')
    embedding = build_model(pretrained=True)

    logger.info('==> train model')
    train(embedding, project_name=project_name)

    toc = time.time() - tic
    logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
Пример #8
0
def main(project_name):

    tic = time.time()

    logger = Logger('_01_training_{}'.format(project_name))

    logger.info('==> initialize model')
    embedding = build_model()

    # additional training
    # logger.info('==> load model')
    # embedding = torch.load(os.path.join('_model', '*****.pt'))

    logger.info('==> train model')
    train(embedding, project_name=project_name)

    toc = time.time() - tic
    logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
def main(project_name):

    tic = time.time()

    logger = Logger('_04_make_submission_{}'.format(project_name))
    logger.info('=' * 50)

    model_path = '_model/embedding_model_{}.pt'.format(project_name)
    logger.info('load model from {}'.format(model_path))
    model = torch.load(model_path)
    model.eval()

    dir_target = '../../input_large_delf/test'
    logger.info('head of target files: {}'.format(
        str(os.listdir(dir_target)[:5])))

    embedder = ImgEmbedder(model, dir_target)

    sample_submission = pd.read_csv('../../dataset/sample_submission.csv')

    images = list()

    with open(
            os.path.join('_embed_index',
                         'index_names_{}.json'.format(project_name)),
            'r') as f:
        index_names = json.load(f)

    # test_id_list = sample_submission.id[:1000]
    # warnings.warn('[!] debug mode')
    test_id_list = sample_submission.id

    f = 512
    u = AnnoyIndex(f, metric='euclidean')
    u.load(
        os.path.join('_embed_index',
                     'index_features_{}.ann'.format(project_name)))

    logger.info('===> embed test images and get nearest neighbors')

    search_k = 1000000

    for test_id in tqdm(test_id_list):

        target_file = '{}.delf'.format(test_id)

        try:
            img_feature = embedder.get_vector(target_file)
            indeces = u.get_nns_by_vector(img_feature.tolist(),
                                          n=100,
                                          search_k=search_k)
        except:
            indeces = list(range(100))

        names = [index_names[index] for index in indeces]

        images.append(' '.join(names))

    submission = pd.DataFrame(test_id_list, columns=['id'])
    submission['images'] = images

    output_path = '../../submission/submission_{}.csv'.format(project_name)
    submission.to_csv(output_path, index=False)

    toc = time.time() - tic
    logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
def main():

    logger = Logger('coord_ica')

    list_mean_x = list()
    list_mean_y = list()
    list_height = list()
    list_aspect_ratio = list()
    list_rotate = list()

    num_error = 0
    num_zero_ship = 0

    os.makedirs('_error_imgs', exist_ok=True)

    sse_array = np.array([])

    for i, image_id in tqdm(enumerate(segmentations.ImageId), total=len(segmentations)):

        encoded = segmentations.iloc[i, 1]

        if encoded == '':

            list_mean_x.append(np.nan)
            list_mean_y.append(np.nan)
            list_height.append(np.nan)
            list_aspect_ratio.append(np.nan)
            list_rotate.append(np.nan)
            num_zero_ship += 1
            continue

        truth_img = rle_decode(encoded)

        reconst_img = np.zeros(truth_img.shape)  # initialize

        threshold_iter = 0.95
        threshold_last = 0.6

        truth_img_norm = truth_img / 255.0

        try:

            mean_x, mean_y, height, aspect_ratio, rotate, img_size = img2_coord_iter(truth_img_norm, threshold_iter)
            reconst_img = coord2_img(mean_x, mean_y, height, aspect_ratio, rotate, img_size)
            reconst_img_norm = reconst_img / 255.0

            sse = np.sum((reconst_img_norm - truth_img_norm) ** 2)
            sse_array = np.append(sse_array, sse)

            area_intersect = np.sum(truth_img_norm * reconst_img_norm)
            area_union = np.sum(truth_img_norm) + np.sum(reconst_img_norm) - area_intersect
            matching_degree = area_intersect / area_union

            if matching_degree < threshold_last:
                logger.info('[{}] sse: {} matching_degree: {}'.format(image_id, sse, matching_degree))
                raise RuntimeError

            list_mean_x.append(mean_x)
            list_mean_y.append(mean_y)
            list_height.append(height)
            list_aspect_ratio.append(aspect_ratio)
            list_rotate.append(rotate)

        except (RuntimeError, ValueError):

            num_error += 1

            list_mean_x.append(np.nan)
            list_mean_y.append(np.nan)
            list_height.append(np.nan)
            list_aspect_ratio.append(np.nan)
            list_rotate.append(np.nan)

        if matching_degree < threshold_last:

            try:
                Image.fromarray(reconst_img).save(
                    os.path.join('_error_imgs', image_id[:-4] + '_deg{:.3f}_re.png'.format(matching_degree)))
                Image.fromarray(truth_img).save(
                    os.path.join('_error_imgs', image_id[:-4] + '_deg{:.3f}_truth.png'.format(matching_degree)))
            except:
                pass

    logger.info('mean of reconstruct error: {:.3f}'.format(np.mean(sse_array)))

    logger.info('num zero ship: {0:d} / {1:d}'.format(num_zero_ship, len(segmentations)))
    logger.info('num_error: {0:d} / {1:d}'.format(num_error, len(segmentations)))

    result = pd.DataFrame()
    result['ImageID'] = segmentations.ImageId
    result['x'] = list_mean_y
    result['y'] = list_mean_x
    result['height'] = list_height
    result['width'] = [height / ratio for height, ratio in zip(list_height, list_aspect_ratio)]
    result['rotate'] = list_rotate

    result.to_csv('../../input/coordinates.csv', index=False, float_format='%.4f')
Пример #11
0
class Assertions:
    """
    @summary  封装assert模块
    """
    def __init__(self):
        self.log = Logger()

    def assert_status_code(self, rq_status_code, ep_status_code):
        """
        @summary  校验返回状态码
        @param    rq_status_code  nu  实际请求码
        @param    ep_status_code  nu  期望请求码
        """
        # try:
        #     assert rq_status_code == ep_status_code
        #     return True
        # except Exception:
        #     self.log.error('请求status_code返回值是{},和预期结果{}不符合'.format(rq_status_code, ep_status_code))
        #     return False
        self.log.info('请求status_code返回值是:{},预期结果是:{}'.format(
            rq_status_code, ep_status_code))
        assert rq_status_code == ep_status_code

    def assert_json_value(self, rq_json_value, ep_json_value):
        """
        @summary  校验json中键对应的值是否正确
        @param    rq_json_value  任意类型的值  实际值
        @param    ep_json_value  任意类型的值  期望值
        """
        # try:
        #     assert rq_json_value == ep_json_value
        #     return True
        # except Exception as e:
        #     self.log.error('请求返回值为{},和预期结果{}不符合'.format(rq_json_value, ep_json_value))
        #     return False
        self.log.info('请求返回值为:{},预期结果为:{}'.format(rq_json_value,
                                                  ep_json_value))
        assert rq_json_value == ep_json_value

    def assert_json_list_len(self, rq_json_list_len, ep_json_list_len, sumbol):
        """
        @summary  校验json中键对应list的长度
        @param    rq_json_list_len  nu  实际长度
        @param    ep_json_list_len  nu  期望长度
        @param    sumbol 比较运算符  
        """
        mappings = {
            '<': operator.lt,
            '<=': operator.le,
            '>': operator.gt,
            '>=': operator.ge,
            '==': operator.eq,
            '!=': operator.ne
        }
        # try:
        #     assert mappings[sumbol](rq_json_list_len, ep_json_list_len)
        #     return True
        # except Exception:
        #     self.log.error('请求结果返回长度为{},期望结果为{},不符合预期'.format(rq_json_list_len, ep_json_list_len))
        #     return False
        self.log.info('请求结果返回长度为:{},期望结果为:{}'.format(rq_json_list_len,
                                                     ep_json_list_len))
        assert mappings[sumbol](rq_json_list_len, ep_json_list_len)

    def assert_json_value_not_null(self, rq_json_value):
        """
        @summary  校验json中键对应的值是否为空
        @param    rq_json_value  任意格式  实际值
        """
        # try:
        #     assert rq_json_value != null
        #     return True
        # except Exception as e:
        #     self.log.error('请求结果返回值为空,不符合预期')
        #     return False
        self.log.info('字段返回值为:{}'.format(rq_json_value))
        assert rq_json_value != ''
Пример #12
0
def process(project_name, target_id_list, procnum, return_dict):

    logger = Logger('_05_make_submission_{}'.format(project_name))
    logger.info('=' * 50)

    images = list()

    with open(
            os.path.join('_embed_index',
                         'index_names_{}.json'.format(project_name)),
            'r') as f:
        index_names = json.load(f)

    with open(
            os.path.join('_embed_index',
                         'test_names_{}.json'.format(project_name)), 'r') as f:
        test_names = json.load(f)

    num_index = len(index_names)

    f = 512
    u = AnnoyIndex(f, metric='euclidean')
    u.load(
        os.path.join('_embed_index',
                     'index_features_{}.ann'.format(project_name)))

    db_test = AnnoyIndex(f, metric='euclidean')
    db_test.load(
        os.path.join('_embed_index',
                     'test_features_{}.ann'.format(project_name)))

    logger.info('===> embed test images and get nearest neighbors')

    search_k = 1_000_00

    for test_id in tqdm(target_id_list):

        # main query

        try:
            db_index = test_names.index(test_id)
            img_feature = db_test.get_item_vector(db_index)

            indeces, dists = u.get_nns_by_vector(img_feature,
                                                 n=300,
                                                 search_k=search_k,
                                                 include_distances=True)
        except ValueError:
            logger.info('{}: ValueError error'.format(test_id))
            indeces, dists = list(range(300)), np.ones(300).tolist()

        indeces_init = list(indeces)
        dists_init = list(dists)

        # sub query

        list_dict = list()
        sub_id_selected = list()

        num_sub_query = 3
        for j in range(num_sub_query):

            sub_id = indeces[0]
            sub_id_selected.append(sub_id)

            # search from index
            indeces_exp, dists_exp = u.get_nns_by_item(sub_id,
                                                       n=600,
                                                       search_k=search_k,
                                                       include_distances=True)

            d = defaultdict(lambda: float(dists_exp[-1]))

            for key, dist_exp in zip(indeces_exp, dists_exp):
                d[key] = dist_exp

            # add result of sub query
            list_dict.append(d)

            # take average by initial query and current sub queries
            indeces, dists = dist_average(indeces_init, dists_init, list_dict)

            # remove selected sub_ids
            indeces, dists = zip(*[(_id, _dist)
                                   for _id, _dist, in zip(indeces, dists)
                                   if _id not in sub_id_selected])

        # merge selected sub_ids and sorted other sub_ids
        indeces = sub_id_selected + list(indeces)

        indeces = [index % num_index for index in indeces]

        names = [index_names[index] for index in indeces]
        names = unique_order_preserving(names)[:100]

        images.append(' '.join(names))

    return_dict[procnum] = images
Пример #13
0
class BaseClient(object):
    def __new__(cls, *args, **kwargs):
        if cls is not __class__:
            return super().__new__(cls)
        else:
            raise TypeError('基类不允许实例化')  # 因为基类中exec_shell_cmd未实现

    def __init__(self):
        self.logger = Logger()
        self.test_mode = getattr(settings, 'TEST_MODE', False)
        self.asset_api = settings.ASSET_API
        self.key = settings.KEY
        self.key_header_name = settings.AUTH_KEY_NAME
        self.key_header = self.auth_key()

    @classmethod
    def exec_shell_cmd(cls, cmd, hostname):
        raise NotImplementedError(
            '请在子类中实现exec_shell_cmd方法:用于在待采集资产机器上执行该命令,返回结果')

    def process(self):
        raise NotImplementedError('请在子类中实现process方法:用于采集资产,并发送给API')

    def auth_key(self):
        """ 接口认证 """
        ha = hashlib.md5(self.key.encode('utf-8'))
        time_span = time.time()
        ha.update(bytes("{}|{}".format(self.key, time_span), encoding='utf-8'))
        encryption = ha.hexdigest()
        result = "{}|{}".format(encryption, time_span)
        return {self.key_header_name: result}

    def get_asset(self, hostname):
        response = BaseResponse()

        ret = {}
        entries = asset.get_asset_entries()
        for asset_name, method in entries.items():
            asset_resp = BaseResponse()
            cmds, parse_method = method
            lst = []
            try:
                for cmd in cmds:
                    lst.append(self.exec_shell_cmd(cmd, hostname))
                asset_resp.data = parse_method('\n'.join(lst))
            except Exception:
                print('------------------', asset_name)
                msg = '{} {} plugin error: {}'.format(hostname, asset_name,
                                                      traceback.format_exc())
                self.logger.error(msg)
                asset_resp.status = False
                asset_resp.error = msg
            ret[asset_name] = asset_resp

        response.data = ret
        return response

    def post_asset(self,
                   name,
                   data,
                   callback=None):  # TODO 发送数据前,数据中有response类实例需先处理
        """ 向API提交数据 """
        print('-------------------:', name, type(Json.dumps(data)),
              Json.dumps(data))
        try:
            response = requests.post(
                url=self.asset_api,
                headers=self.key_header,
                json=Json.dumps(data),
            )
            status = True
        except Exception as e:
            print(e)
            response = e
            status = False
        if callback:
            callback(status, response)

    def callback(self, status, response):
        """
            提交资产后的回调函数
        :param status:      请求是否成功
        :param response:    请求成功,返回的是响应报文; 请求失败,则是异常对象
        :return:
        """
        if not status:
            self.logger.error(str(response))
            return
        ret = json.loads(response.text)
        if ret['code'] == 1000:
            self.logger.info(ret['message'])
        else:
            self.logger.error(ret['message'])
Пример #14
0
class Config:
    # titles:
    TITLE_DEBUG = "english"
    TITLE_DB = "monkey_test_db"
    TITLE_AI = "test_monkey_ai"
    TITLE_OA = "test_platform_oa"
    TITLE_TECH_DB = "monkey_test_tech_db"
    TITLE_Grow = "test_grow_url"

    # values:
    # [debug\release]
    VALUE_URL_BASE = "url_base"

    #[monkey_test_db/monkey_test_tech_db]
    VALUE_DB_HOST = "host"
    VALUE_DB_PORT = "port"
    VALUE_DB_USER = "******"
    VALUE_DB_PASSWORD = "******"
    VALUE_DB_CHARSET = "charset"

    # [test_monkey_ai]
    VALUE_AI_URL_BASE = "url_base"
    VALUE_AI_URL_BASE_SMS = "url_base_sms"

    # [test_monkey_oa]
    VALUE_OA_URL_BASE = "url_base"

    #[test_grow_url]
    Value_Grow_Base = "test_base_url"

    def __init__(self):
        """
        初始化
        """
        self.log = Logger()
        self.config = configparser.RawConfigParser()
        # self.log = Log.MyLog()
        # path
        try:
            PATH_LIST = os.getcwd().split("\\")
            self.log.info('执行路径为:{}'.format(PATH_LIST))
            PATH = "\\".join(PATH_LIST[:PATH_LIST.index('mts') + 1])
        except Exception:
            PATH_LIST = os.getcwd().split("/")
            self.log.info('执行路径为:{}'.format(PATH_LIST))
            PATH = "/".join(PATH_LIST[:PATH_LIST.index('mts') + 1])
        self.log.info('跟路径为:{}'.format(PATH))
        try:
            self.conf_path = os.path.join(PATH, 'conf\conf.ini')
            os.stat(self.conf_path)
        except Exception:
            self.conf_path = os.path.join(PATH, 'conf/conf.ini')
            os.stat(self.conf_path)
        if not os.path.exists(self.conf_path):
            raise FileNotFoundError("请确保配置文件存在!")

        self.config.read(self.conf_path, encoding='utf-8')

        self.db_host = self.get_conf(Config.TITLE_DB, Config.VALUE_DB_HOST)
        self.db_port = self.get_conf(Config.TITLE_DB, Config.VALUE_DB_PORT)
        self.db_user = self.get_conf(Config.TITLE_DB, Config.VALUE_DB_USER)
        self.db_password = self.get_conf(Config.TITLE_DB,
                                         Config.VALUE_DB_PASSWORD)
        self.db_charset = self.get_conf(Config.TITLE_DB,
                                        Config.VALUE_DB_CHARSET)

        self.tech_db_host = self.get_conf(Config.TITLE_TECH_DB,
                                          Config.VALUE_DB_HOST)
        self.tech_db_port = self.get_conf(Config.TITLE_TECH_DB,
                                          Config.VALUE_DB_PORT)
        self.tech_db_user = self.get_conf(Config.TITLE_TECH_DB,
                                          Config.VALUE_DB_USER)
        self.tech_db_password = self.get_conf(Config.TITLE_TECH_DB,
                                              Config.VALUE_DB_PASSWORD)
        self.tech_db_charset = self.get_conf(Config.TITLE_TECH_DB,
                                             Config.VALUE_DB_CHARSET)

        self.url_base = self.get_conf(Config.TITLE_DEBUG,
                                      Config.VALUE_URL_BASE)

        self.ai_url_base = self.get_conf(Config.TITLE_AI,
                                         Config.VALUE_AI_URL_BASE)
        self.ai_url_base_sms = self.get_conf(Config.TITLE_AI,
                                             Config.VALUE_AI_URL_BASE_SMS)

        self.oa_url_base = self.get_conf(Config.TITLE_OA,
                                         Config.VALUE_OA_URL_BASE)

        self.grow_url_base = self.get_conf(Config.TITLE_Grow,
                                           Config.Value_Grow_Base)

    def get_conf(self, title, value):
        """
        配置文件读取
        :param title:
        :param value:
        :return:
        """
        return self.config.get(title, value)

    def set_conf(self, title, value, text):
        """
        配置文件修改
        :param title:
        :param value:
        :param text:
        :return:
        """
        self.config.set(title, value, text)
        with open(self.conf_path, "w+") as f:
            return self.config.write(f)

    def add_conf(self, title):
        """
        配置文件添加
        :param title:
        :return:
        """
        self.config.add_section(title)
        with open(self.conf_path, "w+") as f:
            return self.config.write(f)
Пример #15
0
class RequestBase(object):
    """
    @summary 封装request模块
    """
    def __init__(self):
        self.log = Logger()
        #self.cookid = dict(anonymid=jk63khrk-y97r4p; _r01_=1; [email protected])

    def get_json(self, **kw):
        """
        @summary  get请求返回结果处理为json
        @param    kw    request请求的参数
        @return   返回处理后的请求结果 
        """
        try:
            request = requests.get(**kw)
            result = request.json()
            self.log.info('GET请求返回json为:\n{}'.format(result))
            return result
        except Exception:
            self.log.error('发送GET请求,json结果解析失败,错误堆栈:\n{}'.format(
                traceback.format_exc()))

    def post_json(self, **kw):
        """
        @summary  post请求返回结果处理为json
        @param    kw    request请求的参数 
        @return   返回处理后的请求结果        
        """
        try:
            request = requests.post(**kw)
            result = request.json()
            self.log.info('POST请求返回json为:\n{}'.format(result))
            return result
        except Exception as e:
            self.log.error('发送POST请求,并将结果解析json失败,错误堆栈:\n{}'.format(
                traceback.format_exc()))

    def get_status_code(self, **kw):
        """
        @summary  get请求状态码
        @param    kw    request请求的参数 
        @return   返回处理后的请求结果       
        """
        try:
            request = requests.get(**kw)
            result = request.status_code
            self.log.info('GET请求返回status_code为:\n{}'.format(result))
            return result
        except Exception:
            self.log.error('发送GET请求,并获取请求状态失败,错误堆栈:\n{}'.format(
                traceback.format_exc()))

    def post_status_code(self, **kw):
        """
        @summary  post请求状态码
        @param    kw    request请求的参数 
        @return   返回处理后的请求结果        
        """
        try:
            request = requests.post(**kw)
            result = request.status_code
            self.log.info('POST请求返回status_code为:\n{}'.format(result))
            return result
        except Exception:
            self.log.error('发送POST请求,并获取请求状态失败,错误堆栈:\n{}'.format(
                traceback.format_exc()))

    def get_text(self, **kw):
        """
        @summary  get请求返回结果处理为text
        @param    kw    request请求的参数
        @return   返回处理后的请求结果 
        """
        try:
            request = requests.get(**kw)
            result = request.text
            self.log.info('GET请求返回text为:\n{}'.format(result))
            return result
        except Exception:
            self.log.error('发送GET请求,并将结果解析text失败,错误堆栈:\n{}'.format(
                traceback.format_exc()))

    def post_text(self, **kw):
        """
        @summary  post请求返回结果处理为text
        @param    kw    request请求的参数 
        @return   返回处理后的请求结果 
        """
        try:
            request = requests.post(**kw)
            result = request.text
            self.log.info('POST请求返回text为:\n{}'.format(result))
            return result
        except Exception:
            self.log.error('发送POST请求,并将结果解析text失败,错误堆栈:\n{}'.format(
                traceback.format_exc()))

    def post_multipart(self, url, files):
        """
        @summary  post请求表单上传
        @param    url  str  请求url
        @param    file 文件存储路径 
        @return   返回处理后的请求结果 
        """
        try:
            files = {'file': (file, 'rb')}
            result = requests.post(url=url, files=files)
            return result
        except Exception:
            self.log.error('POST表单上产失败,错误堆栈:\n{}'.format(
                traceback.format_exc()))

    def delete_json(self, **kw):
        """
        @summary  delete请求返回结果处理为json
        @param    kw    request请求的参数
        @return   返回处理后的请求结果 
        """
        try:
            request = requests.delete(**kw)
            result = request.json()
            self.log.info('delete请求返回json为:\n{}'.format(result))
            return result
        except Exception:
            self.log.error('发送delete请求,json结果解析失败,错误堆栈:\n{}'.format(
                traceback.format_exc()))