def all(cls, *args): try: dem_args = list(args) if some(dem_args, lambda result: isinstance(result, Error)): return head( filter_(dem_args, lambda result: isinstance(result, Error))) if every(dem_args, lambda result: isinstance(result, Ok) == False): return Error( Exception('Some items passed in were not a Result.')) return Ok(map_(dem_args, lambda result: result.getValue())) except Exception as e: return Error(e)
def store_test_result(self, test_result, build_details): test_case_id = self.get_test_case_id(test_result) if not test_case_id: return client = CLIENT[BUCKET] try: document = client.get(test_case_id).value os = build_details['os'] if os not in document['os']: document['os'][os] = [] if not document['component']: document['component'] = build_details['component'] if not document['subComponent'] and 'subComponent' in build_details: document['subComponent'] = build_details['subComponent'] tests = document['os'][os] """ Check if already updated, return if true """ build = build_details['build'] build_id = build_details['build_id'] already_updated = pydash.some( tests, lambda test: test['build'] == build and test['build_id'] == build_id) if already_updated: return test = {} test['build_id'] = build_id test['build'] = build test['result'] = test_result['status'] test['duration'] = test_result['duration'] test['errorStackTrace'] = test_result['errorStackTrace'] test['url'] = build_details['url'] """ Trim tests to store only TESTS_RESULT_LIMIT tests results""" if len(tests) > TESTS_RESULT_LIMIT - 1: tests = tests[len(tests) - TESTS_RESULT_LIMIT + 1:] tests.append(test) client.upsert(test_case_id, document) except Exception as e: print e
def file_upload_batch(): try: fileHandler = FileHandler() # { # files:[{binary1}, {binary2}] # files_info: {sub_dir: '', file_key: '', curr_chunk: 1, total_chunks: 10, md5: 'abcdefghijklmn' } # | [ # {file_id: 1,sub_dir: '', file_key: '', curr_chunk: 1, total_chunks: 10, md5: 'abcdefghijklmn'}, # {file_id: 2,sub_dir: '', file_key: '', curr_chunk: 2, total_chunks: 10, md5: 'abcdefghijklmn'} # ] # } # 文件或分片列表,可以包含小的完整文件和部分分片混合 files = [] for f in request.files: files.append(request.files[f]) # 文件信息 orignal_files_info = request.form.get('files_info') orignal_files_info = json.loads(orignal_files_info) if isinstance( orignal_files_info, str) else orignal_files_info files_info = orignal_files_info if not files: return standard_expection('文件列表为空.') # 支持同时上传批量和同时上传单个 if not isinstance(files_info, (list, dict)): return standard_expection('文件信息无法解析.') if isinstance(files_info, list) and len(files_info) != len(files): return standard_expection('文件信息与文件对象不匹配.') if isinstance(files_info, dict) and len(files) != 1: return standard_expection('文件信息与文件对象不匹配.') tenant_id = g.tenant_id if hasattr(g, 'tenant_id') else 0 user_id = g.user_id if hasattr(g, 'user_id') else 0 cache_expired_time = current_app.config['REDIS_CACHE_EXPIRE_FILE'] files_info = [files_info] if isinstance(files_info, dict) else files_info if _.some( files_info, lambda x: x.get('total_chunks') is None or (x.get('curr_chunk') is not None and not _.is_integer( x.get('curr_chunk')))): return standard_expection('文件信息格式错误 files_info.') file_groups = _.group_by(files_info, lambda x: x.get('file_key')) files_set = {} for f in files: files_set[f.name] = f msg_list = [] md5_error_list = [] from app import aios_redis index = 0 for file_key, file_partations_info in file_groups.items(): # file_key: 每个文件的唯一标识, 如果分片,所有分片的file_key应该一致 # file_partations_info: 一组分片(有可能是一个大文件的一部分分片)或单个完整文件信息 for file_partation in file_partations_info: id = file_partation.get('id') # 文件原始名 file_name = file_partation.get('file_name') # 默认约定目录为 <模块>/<企业>/<自定义目录>/原始文件名 tenant_id = tenant_id sub_dir = file_partation.get( 'sub_dir', os.path.join('cs', str(tenant_id), file_key)) # curr_chunk 从1开始 curr_chunk = file_partation.get('curr_chunk') # 文件总分片数 total_chunks = file_partation.get('total_chunks') # md5值,如果有则做CRC校验 md5 = file_partation.get('md5') # 文件对象 file = files_set[id] # file = files[index] index += 1 absolute_dir_path = fileHandler.get_standard_sub_dir(sub_dir) absolute_file_path = os.path.join(absolute_dir_path, f'{file_key}.{curr_chunk}') # 防止多次重传的问题 # is_valid = True # for f in os.listdir(absolute_dir_path): # # 有可能出现文件名中包含"-" # if '-' in f.split('.')[1]: # [(start, end)] = re.findall('.*\.(\d+)-(\d+)$', f) # if int(start) <= int(curr_chunk) <= int(end): # is_valid = False # print('=====分片已存在', f, f'{file_key}.{curr_chunk}') # if os.path.exists(absolute_file_path): # os.rename(absolute_file_path, absolute_file_path + '.deleted') # continue # if is_valid is False: # continue if os.path.exists(absolute_file_path): os.remove(absolute_file_path) file.save(absolute_file_path) # 文件md5校验 # if md5: # is_valid, msg = fileHandler.valid_md5(absolute_file_path, md5) # if not is_valid: # md5_error_list.append({'file_key': file_key, 'curr_chunk': curr_chunk}) # continue aios_redis.set(f'plus_uploader:{file_key}:{curr_chunk}', 'done', cache_expired_time) fileHandler.log_print(file_key, curr_chunk, f'{curr_chunk}/{total_chunks}') # 发布消息,通知后台线程开始尝试合并文件 msg = { 'file_key': file_key, 'dir_path': absolute_dir_path, 'curr_chunk': curr_chunk, 'total_chunks': total_chunks, 'file_name': file_name, 'tenant_id': tenant_id, 'user_id': user_id, 'cache_expired_time': cache_expired_time } msg_list.append(msg) if len(md5_error_list): print('文件MD5校验异常') return standard_expection(json.dumps(md5_error_list)) succ_list, err_list = fileHandler.multi_process_handler(msg_list) if len(err_list): print('文件合并异常') return standard_expection(json.dumps(err_list)) partations_info = [] # 容器内目录 container_dir = os.path.join(os.getenv('FLASK_CONFIG'), sub_dir.strip(os.path.sep)).replace( os.path.sep, '/') # 获取当前分片完成状态与存放目录 for succ in succ_list: partations_info.append({ 'file_key': succ['file_key'], 'curr_chunk': succ['curr_chunk'], 'status': True, 'host': container_dir, 'msg': 'ok' }) print('<成功>', orignal_files_info) return standard_response(partations_info, 200) except Exception as err: import traceback traceback.print_exc() print('<失败>') return standard_expection(str(err))
def has_blank(records, fields): return pydash.some(values_for_records(fields, records), lambda x: x is None)
def iteratee(item): return pyd.some(self.funcs, lambda func: func(item))
def __call__(self, obj): """Return result of disjoin `obj` with :attr:`funcs` predicates.""" def iteratee(item): return pyd.some(self.funcs, lambda func: func(item)) return pyd.some(obj, iteratee)
def test_some(case, expected): assert _.some(*case) == expected
def __call__(self, obj): """Return result of disjoin `obj` with :attr:`funcs` predicates.""" return pyd.some(obj, lambda item: pyd.some(self.funcs, lambda func: func(item)))
def callback(item): return pyd.some(self.funcs, lambda func: func(item))
def some_blank(self): return pydash.some(self.values, lambda x: x is None)
def some_values_blank(self): return pydash.some(self.factored_records, lambda data_record: data_record.some_blank())
def is_cached(self, fn_name, opts): fn_cache_details = self._get_fn_cache_details(fn_name) return _.some(fn_cache_details, lambda val, key: val == opts)