Example #1
0
def get_analysis_results(analysis):
    # 获取分析任务结果
    url, success_code, error_code = get_task_interface('result_uri')
    serial_number = analysis.serial_number
    payload = {
        'serial_number': serial_number,
    }
    try:
        resp = requests.post(url,
                             data=json.dumps(payload),
                             headers=get_headers(),
                             timeout=60)
        results = json.loads(resp.text)
        if results.get('code', False):
            if str(results['code']) == success_code:
                api_logger.debug(f'成功获取分析任务结果, 返回:{results}')
                return True, results['code'], results['messages'], results[
                    'results']
            if str(results['code']) == error_code:
                api_logger.warning(f'分析任务获取结果失败, 返回:{results}')
                return False, results['code'], results['messages'], results[
                    'results']
            api_logger.warning(f'获取分析任务结果获得未知返回码, 返回:{results}')
            return False, 400, '获取分析任务结果获得未知返回码', results
        else:
            api_logger.warning(f'获取分析任务结果无法获取返回码, 返回:{results}')
            return False, 400, '获取分析任务结果无法获取返回码', results
    except Exception as request_error:
        error_messages = f'获取分析任务结果请求发生错误:{request_error}'
        api_logger.error(error_messages)
        return False, 500, error_messages, {}
Example #2
0
 def post(self, request, *args, **kwargs):
     username = request.data.get('username')
     password = request.data.get('password')
     if not username:
         result = {'success': False, 'messages': '用户帐号名必须存在, 请发送用户名!'}
         return Response(result, status=status.HTTP_400_BAD_REQUEST)
     if not password:
         result = {'success': False, 'messages': '密码必须存在, 请发送密码!'}
         return Response(result, status=status.HTTP_400_BAD_REQUEST)
     user = User.objects.filter(username=username).first()
     if user:
         if not user.is_active or user.is_deleted:
             result = {'success': False, 'messages': '当前用户已禁用或删除, 请使用其他用户!'}
             return Response(result, status=status.HTTP_403_FORBIDDEN)
         login_user = authenticate(username=username.strip(), password=password)
         if login_user is None:
             result = {'success': False, 'messages': '用户名:{}密码错误, 请输入正确密码!'.format(username)}
             return Response(result, status=status.HTTP_403_FORBIDDEN)
     else:
         result = {'success': False, 'messages': '用户名:{}不存在或无效, 请输入正确用户!'.format(username)}
         return Response(result, status=status.HTTP_403_FORBIDDEN)
     # 认证成功
     token = get_jwt_token(login_user)
     response_data = jwt_response_payload_handler(token, login_user, request)
     response = Response(response_data, status=status.HTTP_200_OK)
     # 操作日志记录
     api_logger.debug(f'{login_user}登录系统')
     action_log(request=request, user=login_user, action_type=AUTH_ACTION_TYPE, old_instance=None,
                instance=login_user, action_info=f'使用账号密码登录系统')
     return response
Example #3
0
 def valid(self, request, *args, **kwargs):
     """验证当前用户是否工作区成员"""
     query_params = self.request.query_params
     work_zone_id = query_params.get('work_zone', False)
     api_logger.debug(f'获取work_zone_id:{work_zone_id}')
     try:
         work_zone = WorkZone.objects.get(pk=int(work_zone_id))
     except:
         work_zone = None
     if work_zone:
         work_zone_member = WorkZoneMember.objects.filter(
             work_zone=work_zone, user=request.user).first()
         api_logger.debug(f'获取 work_zone_members:{work_zone_member}')
         if work_zone_member:
             result = {
                 'success': True,
                 'messages': f'当前用户为该工作区成员!',
                 'results': {}
             }
             return Response(result, status=status.HTTP_200_OK)
         else:
             result = {
                 'success': False,
                 'messages': f'当前用户不是该工作区成员!',
                 'results': {}
             }
             return Response(result, status=status.HTTP_200_OK)
     else:
         result = {
             'success': False,
             'messages': f'当前工作区获取错误!',
             'results': {}
         }
         return Response(result, status=status.HTTP_200_OK)
Example #4
0
    def list(self, request, *args, **kwargs):
        query_params = self.request.query_params
        not_page = query_params.get('not_page', False)
        work_zone_id = query_params.get('work_zone', False)
        api_logger.debug(f'{work_zone_id}')
        analysis_status = query_params.get('status', False)
        project_id = query_params.get('project', False)
        try:
            work_zone = WorkZone.objects.get(pk=int(work_zone_id))
        except:
            work_zone = None
        queryset = self.filter_queryset(self.get_queryset())
        api_logger.debug(f'{work_zone}')
        if work_zone:
            queryset = queryset.filter(work_zone=work_zone)
        if analysis_status:
            queryset = queryset.filter(status=analysis_status)
        if project_id:
            project = Project.objects.get(pk=int(project_id))
            queryset = queryset.filter(project=project)
        queryset = queryset.distinct()
        if not_page and not_page.lower() != 'false':
            serializer = self.get_serializer(queryset, many=True)
            result = {
                'success': True,
                'messages': '获取分析任务不分页数据!',
                'results': serializer.data
            }
            return Response(result, status=status.HTTP_200_OK)
        else:
            page = self.paginate_queryset(queryset)
            if page is not None:
                serializer = self.get_serializer(page, many=True)
                return self.get_paginated_response(serializer.data)

            serializer = self.get_serializer(queryset, many=True)
            result = {
                'success': True,
                'messages': '获取分析任务不分页数据!',
                'results': serializer.data
            }
            return Response(result, status=status.HTTP_200_OK)
Example #5
0
def action_log(request,
               user,
               action_type,
               old_instance=None,
               instance=None,
               action_info="",
               object_changes=None):
    # 批量操作,可以先实例化日志对象,在进行bulk_create操作
    # 日志记录
    try:
        # 取出操作请求,从其中得到ip地址
        if request is not None:
            x_real_ip = request.META.get('HTTP_X_REAL_IP')
            if x_real_ip:
                remote_ip = x_real_ip.split(',')[0]
            else:
                http_remote_ip = request.META.get('HTTP_REMOTE_ADDR')
                if http_remote_ip:
                    remote_ip = http_remote_ip.split(',')[0]
                else:
                    remote_ip = request.META.get('REMOTE_ADDR')
        else:
            remote_ip = None
        # 如果instance存在日记记录对象为instance,否则为old_instance
        content_object = instance if instance else old_instance
        if old_instance and instance:
            object_changes = diff_model_instance(old_instance, instance)
        elif action_type in [DELETE_ACTION_TYPE, BULK_DELETE_ACTION_TYPE]:
            # old_instance -> content_object
            object_changes = diff_model_instance(content_object, None)
        ActionLog.objects.create(content_object=content_object,
                                 user=user,
                                 action_type=action_type,
                                 action_info=action_info,
                                 object_changes=object_changes,
                                 remote_ip=remote_ip)
        api_logger.debug(
            f'记录日志信息:{remote_ip}-{content_object}-{user}-{action_type}-{action_info}-{object_changes}'
        )
    except Exception as error:
        api_logger.error("日志记录过程中发生错误, 具体错误:{}".format(error))
Example #6
0
def get_task_interface(uri_type='start_uri'):
    task_interface = AnalysisTaskInterface.objects.get_active().first()
    api_logger.debug(f'get_task_interface : {task_interface}')
    if task_interface:
        domain = task_interface.domain
        if not domain.endswith('/'):
            domain = f'{domain}/'
        api_logger.debug(f'get_task_interface domain: {task_interface.domain}')
        uri = getattr(task_interface, uri_type, '')
        api_logger.debug(f'get_task_interface uri: {uri}')
        url = urllib.parse.urljoin(domain, uri)
        api_logger.debug(f'get_task_interface url: {url}')
        success_code = task_interface.success_code
        error_code = task_interface.error_code
    else:
        domain = DELETE_URI['domain']
        uri = DELETE_URI[uri_type]
        url = urllib.parse.urljoin(domain, uri)
        success_code = DELETE_URI['success_code']
        error_code = DELETE_URI['error_code']
    if not url.endswith('/'):
        url = f'{url}/'
    api_logger.debug(f'{url}')
    return url, success_code, error_code
Example #7
0
def start_analysis(analysis):
    # 启动分析任务
    url, success_code, error_code = get_task_interface('start_uri')
    serial_number = analysis.serial_number
    if analysis.analysis_module:
        analysis_module_name = analysis.analysis_module.name
        analysis_module_version = analysis.analysis_module.version
        analysis_module_path = analysis.analysis_module.file_uri
    else:
        analysis_module_name = ''
        analysis_module_version = ''
        analysis_module_path = ''
    analysis_parameters = json.loads(analysis.analysis_parameter)
    main_command = analysis.command
    command = f'{main_command} {analysis_module_path}'
    for parameter in analysis_parameters:
        command = f'{command} {parameter["command_tag"]} {parameter["parameter_key"]} {parameter["parameter_value"]}'
    payload = {
        'serial_number': serial_number,
        'command': command,
        'main_command': main_command,
        'analysis_module_name': analysis_module_name,
        'analysis_module_version': analysis_module_version,
        'analysis_module_path': analysis_module_path,
        'analysis_parameters': analysis_parameters,
    }
    api_logger.debug(f'payload: {payload}')
    try:
        resp = requests.post(url,
                             data=json.dumps(payload),
                             headers=get_headers(),
                             timeout=60)
        results = json.loads(resp.text)
        api_logger.debug(f'results: {results}')
        if results.get('code', False):
            if str(results['code']) == success_code:
                api_logger.debug(f'成功启动分析任务, 返回:{results}')
                return True, results['code'], results['messages'], results[
                    'results']
            if str(results['code']) == error_code:
                api_logger.warning(f'分析任务启动失败, 返回:{results}')
                return False, results['code'], results['messages'], results[
                    'results']
            api_logger.warning(f'启动分析任务获得未知返回码, 返回:{results}')
            return False, 400, '启动分析任务获得未知返回码', results
        else:
            api_logger.warning(f'启动分析任务无法获取返回码, 返回:{results}')
            return False, 400, '启动分析任务无法获取返回码', results
    except Exception as request_error:
        error_messages = f'启动分析任务启请求发生错误:{request_error}'
        api_logger.error(error_messages)
        return False, 500, error_messages, {}
Example #8
0
    def file_upload(self, request, *args, **kwargs):
        # 上传文件/大文件/断点续传
        # todo: 若前端进行并发请求, 将导致保存数据时出现BUG, 后续需要解决, 完善并发请求问题
        if request.method == 'GET':
            query_params = self.request.query_params
            data_type = int(query_params.get('data_type', PROJECT_DATA))
            current_path = query_params.get('current_path', '')
            work_zone_id = int(query_params.get('work_zone', None))
            try:
                work_zone = WorkZone.objects.get(pk=int(work_zone_id))
            except:
                work_zone = None
            if not data_type:
                result = {'success': False, 'messages': '无法确认当前所属文件类型!'}
                return Response(result, status=status.HTTP_400_BAD_REQUEST)
            chunk_index = query_params.get('chunk_index', None)  # 当前是第几个分片
            chunk_size = query_params.get('chunk_size', None)  # 每个分块大小
            current_chunk_size = query_params.get('current_chunk_size', None)  # 当前分块大小
            total_size = int(query_params.get('total_size', 0))  # 文件总大小
            file_md5 = query_params.get('file_md5', None)  # MD5
            file_name = query_params.get('file_name', None)  # 文件名称
            relative_path = query_params.get('relative_path', None)  # 关联路径
            total_chunks = int(query_params.get('total_chunks', 0))  # 分块总数
            if not file_md5:
                result = {'success': False, 'messages': '请指定文件MD5值'}
                return Response(result, status=status.HTTP_400_BAD_REQUEST)
            if not file_name:
                result = {'success': False, 'messages': '请指定文件名称:file_name'}
                return Response(result, status=status.HTTP_400_BAD_REQUEST)
            data_path = get_data_directory_path(data_type=data_type,
                                                current_path=current_path,
                                                work_zone_id=work_zone_id)
            file_path = os.path.join(data_path, file_name)
            save_directory = os.path.join(data_path, f'{file_md5}-part')
            # 若已经存在相同MD5文件, 但文件路径不一致, 将该文件复制到指定路径上, 并返回已上传标志, 实现秒传
            # 文件路径+文件MD5 一致, 即为完全相同的同一个文件,
            # 若文件存在, 即已经上传, 实现秒传
            # 若记录存在, 文件不存在, 即未上传完毕, 对比文件大小以及分块总数, 若对不上, 即分块有变化, 删除已有分块文件, 重新上传
            # 否则返回已上传分块
            saved_dataset = DataSet.objects.filter(file_md5=file_md5, uploaded=True).first()
            dataset = DataSet.objects.filter(data_type=data_type, work_zone=work_zone,
                                             directory_path=current_path, file_name=file_name, file_md5=file_md5).first()
            if saved_dataset and not dataset:
                api_logger.debug(f'已存在相同MD5数据, 目标路径不一样, 进行复制')
                create_or_get_directory(os.path.dirname(file_path))
                shutil.copyfile(saved_dataset.file_full_path, file_path)
                DataSet.objects.create(directory_path=current_path, file_name=file_name, data_type=data_type,
                                       file_type=FILE, file_size=total_size, uploaded=True, file_md5=file_md5,
                                       uploaded_chunks=saved_dataset.uploaded_chunks, total_chunks=total_chunks,
                                       work_zone=work_zone, creator=request.user)
                uploaded = True
                uploaded_chunks = saved_dataset.uploaded_chunks.split(',')
            else:
                if dataset:
                    if os.path.exists(file_path):
                        api_logger.debug(f'已存在文件路径与MD5一致的文件, uploaded_chunks:{dataset.uploaded_chunks}')
                        uploaded = True
                        uploaded_chunks = dataset.uploaded_chunks.split(',')
                    else:
                        if dataset.total_chunks != total_chunks or dataset.file_size != total_size:
                            api_logger.debug(f'存在记录, 但文件不存在, 且分块数与大小对不上')
                            uploaded = False
                            uploaded_chunks = []
                            if os.path.exists(file_path):
                                os.remove(file_path)
                            if os.path.exists(save_directory):
                                shutil.rmtree(save_directory)
                        else:
                            api_logger.debug(f'存在记录, 但文件不存在, 返回已记录上传的分块:{dataset.uploaded_chunks}')
                            uploaded = False
                            uploaded_chunks = dataset.uploaded_chunks.split(',')
                else:
                    api_logger.debug(f'不存在记录')
                    uploaded = False
                    uploaded_chunks = []
                    if os.path.exists(file_path):
                        os.remove(file_path)
                    if os.path.exists(save_directory):
                        shutil.rmtree(save_directory)
            uploaded_chunks = [int(uploaded_chunk) for uploaded_chunk in uploaded_chunks if uploaded_chunk]
            # 确保重新上传最后分块, 触发合并操作
            if len(uploaded_chunks) >= 1:
                uploaded_chunks = uploaded_chunks[:-1]
            result = {'success': True, 'messages': f'当前文件:{file_name}的分块情况',
                      'results': {'file_path': file_path, 'file_name': file_name,
                                  'uploaded': uploaded,
                                  'uploaded_chunks': uploaded_chunks,
                                  }}
            return Response(result, status=status.HTTP_200_OK)
        else:
            data_type = int(request.data.get('data_type', 0))
            current_path = request.data.get('current_path', '')
            work_zone_id = int(request.data.get('work_zone', 0))
            try:
                work_zone = WorkZone.objects.get(pk=int(work_zone_id))
            except:
                work_zone = None
            if not data_type:
                result = {'success': False, 'messages': '无法确认当前所属文件类型!'}
                return Response(result, status=status.HTTP_400_BAD_REQUEST)
            chunk_index = request.data.get('chunk_index', None)  # 当前是第几个分片
            chunk_size = request.data.get('chunk_size', None)  # 每个分块大小
            current_chunk_size = request.data.get('current_chunk_size', None)  # 当前分块大小
            total_size = int(request.data.get('total_size', 0))  # 文件总大小
            file_md5 = request.data.get('file_md5', None)  # MD5
            file_name = request.data.get('file_name', None)  # 文件名称
            relative_path = request.data.get('relative_path', None)  # 关联路径
            total_chunks = int(request.data.get('total_chunks', 0))  # 分块总数
            chunk_file = request.data.get('file', None)  # 具体文件

            dataset = DataSet.objects.filter(data_type=data_type, work_zone=work_zone,
                                             directory_path=current_path, file_name=file_name, file_md5=file_md5).first()
            if not dataset:
                dataset = DataSet.objects.create(data_type=data_type, file_type=10, work_zone=work_zone, file_md5=file_md5,
                                                 directory_path=current_path, file_name=file_name,
                                                 file_size=total_size, total_chunks=total_chunks,
                                                 creator=request.user)
            save_directory = os.path.join(dataset.directory_full_path, f'{file_md5}-part')
            save_path = os.path.join(save_directory, f'{file_name}.part{chunk_index}')
            # 保存
            # default_storage不会覆盖文件, 若文件存在, 删除后重新上传
            if default_storage.exists(save_path):
                default_storage.delete(save_path)
            default_storage.save(save_path, ContentFile(chunk_file.read()))
            uploaded_chunks = dataset.uploaded_chunks
            api_logger.debug(f'当前分块:{chunk_index}')
            if uploaded_chunks:
                uploaded_chunks = set(uploaded_chunks.split(','))
                uploaded_chunks.add(chunk_index)
            else:
                uploaded_chunks = [chunk_index]
            api_logger.debug(f'保存后所有分块:{uploaded_chunks}')
            dataset.uploaded_chunks = ','.join(list(uploaded_chunks))
            dataset.save()
            api_logger.debug(f'当前分块长度:{len(dataset.uploaded_chunks.split(","))}, 获取分块长度:{total_chunks}')
            if len(dataset.uploaded_chunks.split(',')) == int(total_chunks):
                api_logger.debug(f'文件全部接收, 开始合并:{save_directory}/*.part*')
                uploaded = True
                with open(dataset.file_full_path, 'wb') as uploaded_file:
                    for index in range(int(total_chunks)):
                        chunk_file = os.path.join(save_directory, f'{file_name}.part{index+1}')
                        api_logger.debug(f'当前文件{chunk_file}')
                        try:
                            chunk_file = open(chunk_file, 'rb')  # 按序打开每个分片
                            uploaded_file.write(chunk_file.read())  # 读取分片内容写入新文件
                            chunk_file.close()
                        except Exception as error:
                            api_logger.error(f'合并文件:{file_name} form {save_directory}失败:{error}')
                            uploaded = False
                            # 检查合并后的MD5
                uploaded_file_md5 = check_md5_sum(file_name=dataset.file_full_path)
                if uploaded_file_md5 != file_md5:
                    api_logger.debug(f'合并文件MD5不一致:{uploaded_file_md5}, {file_md5}')
                    uploaded = False
                if uploaded:
                    dataset.uploaded = uploaded
                    dataset.save()
                    shutil.rmtree(save_directory)
                    result = {'success': True, 'messages': '成功上传文件并合并!',
                              'results': {'uploaded': uploaded, 'total_chunks': total_chunks,
                                          'file_name': file_name, 'file_md5': file_md5}}
                    return Response(result, status=status.HTTP_200_OK)
                else:
                    result = {'success': False, 'messages': '合并文件失败, 请重新上传!'}
                    return Response(result, status=status.HTTP_400_BAD_REQUEST)
            else:
                result = {'success': True, 'messages': f'成功上传分块文件:{chunk_index}!',
                          'results': {'uploaded': False, 'total_chunks': total_chunks,
                                      'file_name': file_name, 'file_md5': file_md5}
                          }
                return Response(result, status=status.HTTP_200_OK)