Exemple #1
0
    def fast_push_file(self, kwargs):
        """
        快速分发文件
        """

        kwargs.update({
            "bk_username": self.bk_username,
        })

        result = JobV3Api.fast_transfer_file(kwargs, raw=True)
        if result["result"]:
            query_kwargs = {
                "job_instance_id": result["data"].get("job_instance_id"),
                "bk_biz_id": kwargs.get("bk_biz_id"),
            }
            result = self.get_task_result_status(query_kwargs)
            logger.info(
                build_job_exec_log_format(self.bk_username, 'fast_push_file',
                                          kwargs['task_name']))
            return result
        else:
            logger.error(
                build_job_err_log_format(self.bk_username, 'fast_push_file',
                                         kwargs, result))
            return None
Exemple #2
0
    def create_record_detail(self, task_type):
        """
            create_record_detail方法:存储任务记录
            @param task_type: 内部定义的任务流程id, 每个id都会对应不同的任务名称,这样方便后端统一管理,并节省数据库存储字段的长度,优化搜索
        """
        task_info = copy.deepcopy(self.param_info)
        # 屏蔽 password 等字段,防止录入到数据库
        if task_info.get('password'):
            task_info.pop('password')

        logger.info(self.param_info)
        self.task_id = create_record_detail(
            {
                "db_type": self.db_type,
                "app_id": self.param_info['app_id'],
                "cluster_name": self.param_info['cluster_name'],
                "task_type": task_type,
                "task_mode": 1,
                "pipeline_id": "000",
                "op_user": self.param_info['bk_username'],
                "task_params": task_info,
            }
        )
        if self.task_id == 0:
            logger.error("task表生成失败,任务结束")
            return False

        return True
Exemple #3
0
    def fast_execute_script(self, kwargs):
        """
        快速执行脚本
        """
        # shell 脚本内容需要base64编码
        kwargs.update({
            "bk_username": self.bk_username,
        })

        result = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if result["result"]:
            query_kwargs = {
                "job_instance_id": result["data"].get("job_instance_id"),
                "bk_biz_id": kwargs.get("bk_biz_id"),
            }
            result = self.get_task_result_status(query_kwargs)
            logger.info(
                build_job_exec_log_format(self.bk_username,
                                          'fast_execute_script',
                                          kwargs['task_name']))
            return result
        else:
            logger.error(
                build_job_err_log_format(self.bk_username,
                                         'fast_execute_script', kwargs,
                                         result))
            return None
Exemple #4
0
def home(request):
    """
    @summary: 移动端首页
    """
    # 蓝鲸用户
    bk_username = request.user.username
    # 微信用户
    wx_username = model_to_dict(request.weixin_user)

    logger.info('bk_username:{bk_username}, wx_username:{wx_username}'.format(
        bk_username=bk_username, wx_username=wx_username))
    return render(request, 'weixin/index.html')
Exemple #5
0
def consume_omnibus_overtime():
    try:
        topics = KAFKA.get('topics')
        broker_list = KAFKA.get('broker_list')
        consumer = KafkaConsumer(
            bootstrap_servers=broker_list,
            group_id='celery_topic_poll',
            consumer_timeout_ms=20000,
        )
        for topic in topics:
            partitions = consumer.partitions_for_topic(topic)
            logger.info('主题%s对应分区为%s' % (topic, partitions))
            for partition in partitions:
                tp = TopicPartition(topic, partition)
                consumer.assign([tp])
                logger.info(consumer.end_offsets([tp]))
                # next_offset = consumer.end_offsets([tp_fault]).get(tp_fault)
                # consumer.seek(tp_fault, int(next_offset) - 1)
                msg = consumer.poll(timeout_ms=50)
                if not msg:
                    logger.info(str(msg))
                else:
                    records = msg.get(tp)
                    for record in records:
                        into_database(record)
                consumer.commit()
    except Exception as err:
        logger.info(str(err))
Exemple #6
0
def consume_fault_location():
    try:
        topic = 'HXB_Fault_Location'
        broker_list = KAFKA.get('broker_list')
        consumer = KafkaConsumer(
            bootstrap_servers=broker_list,
            group_id='celery_fault_location_poll',
            consumer_timeout_ms=20000,
        )
        partitions = consumer.partitions_for_topic(topic)
        logger.info('主题%s对应分区为%s' % (topic, partitions))
        for partition in partitions:
            tp_fault = TopicPartition(topic, partition)
            consumer.assign([tp_fault])
            logger.info(consumer.end_offsets([tp_fault]))
            # next_offset = consumer.end_offsets([tp_fault]).get(tp_fault)
            # consumer.seek(tp_fault, int(next_offset) - 1)
            msg = consumer.poll(timeout_ms=50)
            if not msg:
                logger.info(str(msg))
            else:
                records = msg.get(tp_fault)
                for record in records:
                    logger.info(
                        f'当前记录{record.topic, record.partition, record.offset}')
                    if not os.getenv('BK_ENV'):
                        data = eval(
                            zlib.decompress(base64.b64decode(
                                record.value)).decode('gbk'))
                    else:
                        data = json.loads(record.value.decode('utf-8'))
                    fault_location = FaultLocation.objects.filter(
                        topic=record.topic,
                        partition=int(record.partition),
                        offset=int(record.offset))
                    if fault_location:
                        fault_location[0].offset = int(record.offset)
                        fault_location[0].value = str(data)
                        fault_location[0].save()
                    else:
                        new_fault = FaultLocation(topic=record.topic,
                                                  partition=int(
                                                      record.partition),
                                                  offset=int(record.offset),
                                                  value=str(data))
                        new_fault.save()
            consumer.commit()
    except Exception as err:
        logger.info(str(err))
Exemple #7
0
def consume_omnibus_overtime():
    try:
        topics = KAFKA.get('topics')
        broker_list = KAFKA.get('broker_list')
        consumer = KafkaConsumer(
            bootstrap_servers=broker_list,
            group_id='api_data_subscribe_omnibus_overtime',
            # consumer_timeout_ms=20000,
        )
        for topic in topics:
            partitions = consumer.partitions_for_topic(topic)
            logger.info('主题%s对应分区为%s' % (topic, partitions))
        consumer.subscribe(topics=topics)
        for record in consumer:
            # logger.info(datetime.now(), record.topic, record.partition, record.offset)
            into_database(record)
    except Exception as err:
        logger.info(str(err))
Exemple #8
0
    def execution(self, task, task_id, user):

        self.ip_error_list.clear()
        exe = ExecuteLog.objects.create(work_code=str(uuid.uuid1()),
                                        exec_state=1,
                                        task_id=self.task_id,
                                        operator=self.user)
        for index, quota in enumerate(self.refrefsh_quota_list()):
            # 1
            task_result_obj = TaskResult.objects.create(
                task=task,
                task_state=0,
                task_step_state=0,
                quota_id=int(quota['id']),
            )
            task_result = TaskResult.objects.filter(id=task_result_obj.id)
            # 执行前修改表状态
            # 任务结果表
            task_result.update(task_state=1)
            self.execute_script(task_result_obj, task_result, quota, exe.id)
            # 判断是否是脚本列表的最后一组数据
            if index >= len(self.refrefsh_quota_list()) - 1:
                dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                # 任务结果表
                task_result.update(task_state=2)
                # 任务表
                Task.objects.filter(id=task.id).update(exec_state=0)
                # 删除redis缓存状态
                redis_link.hdel('current_task', self.task_id)
                # 历史记录表
                ExecuteLog.objects.filter(id=exe.id).update(exec_state=2,
                                                            end_time=dt)
                redis_link.delete("current_task")

                # 通知
                notify_result = send_notify(task_id)
                logger.info(
                    f"发送通知 总状态({notify_result['result']}) 发送方式({notify_result['send_type']}) 发送详情({notify_result['send_info']})"
                )
                return
Exemple #9
0
def into_database(record):
    # logger.info(record)
    data = zlib.decompress(base64.b64decode(record.value)).decode('gbk')
    if not os.getenv('BK_ENV'):  # 开发环境判断
        data = eval(data)
    else:
        data = json.loads(data)
    # logger.info(len(data))
    for item in data:
        if not item.get('children'):
            hash_id = hashlib.md5(
                str(item).encode(encoding='UTF-8')).hexdigest()
            params = {'hash_id': hash_id}
            if item.get('summary'):
                # logger.info('***omnibus***', str(item))
                model_obj = lens._registry.get(EmergencyOmnibus)
                response = model_obj._api(method='GET', data=params)
                if response['code'] == 1:
                    response_data = response.get('data')
                    if not response_data.get('items'):
                        for key, value in item.items():
                            if key in EmergencyOmnibus._meta.field_map.keys():
                                params[EmergencyOmnibus._meta.field_map.get(
                                    key)] = value
                            else:
                                params[key] = value
                        params['operation'] = int([
                            choice[0] for choice in choices.get(
                                'EmergencyOmnibus_opration')
                            if choice[1] == params['operation']
                        ][0])
                        params['level'] = '0' if params['level'] not in {
                            '10', '20', '40', '50'
                        } else params['level']
                        # logger.info('omnibus params', params)
                        response = model_obj._api(method='POST', data=params)
                        if response['code'] != 1:
                            logger.info(
                                f'综合类记录{record.topic, record.partition, record.offset}写入失败',
                                str(response))
                    elif response_data.get('items')[0].get(
                            'hash_id') == hash_id:
                        logger.info(
                            f'综合类记录{hash_id}已存在{record.topic, record.partition, record.offset}'
                        )
                else:
                    logger.info(str(response))
            if item.get('key'):
                model_obj = lens._registry.get(EmergencyOvertime)
                response = model_obj._api(method='GET', data=params)
                if response['code'] == 1:
                    response_data = response.get('data')
                    if not response_data.get('items'):
                        for key, value in item.items():
                            if key in EmergencyOvertime._meta.field_map.keys():
                                params[EmergencyOvertime._meta.field_map.get(
                                    key)] = value
                            else:
                                params[key] = value
                        code_code_cn = params.pop('code')
                        rtcode_rtcode_cn = params.pop('rtcode_cn')
                        params['code'] = code_code_cn.split(':')[0]
                        params['code_cn'] = code_code_cn.split(':')[1]
                        params['rtcode'] = rtcode_rtcode_cn.split(':')[0]
                        params['rtcode_cn'] = rtcode_rtcode_cn.split(':')[1]
                        # logger.info('overtime params', params)
                        response = model_obj._api(method='POST', data=params)
                        if response['code'] != 1:
                            logger.info(
                                f'交易类记录{record.topic, record.partition, record.offset}写入失败',
                                str(response))
                    elif response_data.get('items')[0].get(
                            'hash_id') == hash_id:
                        logger.info(
                            f'交易类记录{hash_id}已存在{record.topic, record.partition, record.offset}'
                        )
                else:
                    logger.info(str(response))
Exemple #10
0
def myFunc(request):
    logger.info("这是提示信息")
    logger.debug("这是debug信息")
    logger.warning("这是警告信息")
    logger.error('这是错误信息')
    return render(request, 'mytestapp/index.html', {})
Exemple #11
0
 def info(msg, *args, **kwargs):
     bk_logger.info(msg, *args, **kwargs)
     if ISDEV:
         print(msg, *args)