完善lsfx mock服务上传状态接口与部署文档

This commit is contained in:
wkc
2026-03-13 16:38:07 +08:00
parent bda89202ba
commit 109b5220b2
29 changed files with 4489 additions and 67 deletions

View File

@@ -2,18 +2,78 @@ from fastapi import BackgroundTasks, UploadFile
from utils.response_builder import ResponseBuilder
from config.settings import settings
from typing import Dict, List, Union
from dataclasses import dataclass, field
import time
from datetime import datetime
from datetime import datetime, timedelta
import random
import uuid
@dataclass
class FileRecord:
"""文件记录模型(扩展版)"""
# 原有字段
log_id: int
group_id: int
file_name: str
status: int = -5 # -5 表示解析成功
upload_status_desc: str = "data.wait.confirm.newaccount"
parsing: bool = True # True表示正在解析
# 新增字段 - 账号和主体信息
account_no_list: List[str] = field(default_factory=list)
enterprise_name_list: List[str] = field(default_factory=list)
# 新增字段 - 银行和模板信息
bank_name: str = "ZJRCU"
real_bank_name: str = "ZJRCU"
template_name: str = "ZJRCU_T251114"
data_type_info: List[str] = field(default_factory=lambda: ["CSV", ","])
# 新增字段 - 文件元数据
file_size: int = 50000
download_file_name: str = ""
file_package_id: str = field(default_factory=lambda: str(uuid.uuid4()).replace('-', ''))
# 新增字段 - 上传用户信息
file_upload_by: int = 448
file_upload_by_user_name: str = "admin@support.com"
file_upload_time: str = field(default_factory=lambda: datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
# 新增字段 - 法律实体信息
le_id: int = 10000
login_le_id: int = 10000
log_type: str = "bankstatement"
log_meta: str = "{\"lostHeader\":[],\"balanceAmount\":\"-1\"}"
lost_header: List[str] = field(default_factory=list)
# 新增字段 - 记录统计
rows: int = 0
source: str = "http"
total_records: int = 150
is_split: int = 0
# 新增字段 - 交易日期范围
trx_date_start_id: int = 20240101
trx_date_end_id: int = 20241231
class FileService:
"""文件上传和解析服务"""
def __init__(self):
self.file_records = {} # logId -> record
self.parsing_status = {} # logId -> is_parsing
self.file_records: Dict[int, FileRecord] = {} # logId -> FileRecord
self.log_counter = settings.INITIAL_LOG_ID
def _infer_bank_name(self, filename: str) -> tuple:
"""根据文件名推断银行名称和模板名称"""
if "支付宝" in filename or "alipay" in filename.lower():
return "ALIPAY", "ALIPAY_T220708"
elif "绍兴银行" in filename or "BSX" in filename:
return "BSX", "BSX_T240925"
else:
return "ZJRCU", "ZJRCU_T251114"
async def upload_file(
self, group_id: int, file: UploadFile, background_tasks: BackgroundTasks
) -> Dict:
@@ -31,51 +91,199 @@ class FileService:
self.log_counter += 1
log_id = self.log_counter
# 获取当前时间
upload_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# 推断银行信息
bank_name, template_name = self._infer_bank_name(file.filename)
# 立即存储文件记录(初始状态:解析中)
self.file_records[log_id] = {
"logId": log_id,
"groupId": group_id,
"status": -5,
"uploadStatusDesc": "parsing",
"uploadFileName": file.filename,
"fileSize": 0, # 简化处理
"bankName": "MOCK",
"uploadTime": upload_time,
}
# 生成合理的交易日期范围
end_date = datetime.now()
start_date = end_date - timedelta(days=random.randint(90, 365))
trx_date_start_id = int(start_date.strftime("%Y%m%d"))
trx_date_end_id = int(end_date.strftime("%Y%m%d"))
# 标记为解析中
self.parsing_status[log_id] = True
# 生成随机账号和主体
account_no = f"{random.randint(10000000000, 99999999999)}"
enterprise_names = ["测试主体"] if random.random() > 0.3 else [""]
# 启动后台任务,延迟解析
background_tasks.add_task(
self._simulate_parsing, log_id, settings.PARSE_DELAY_SECONDS
# 创建完整的文件记录
file_record = FileRecord(
log_id=log_id,
group_id=group_id,
file_name=file.filename,
download_file_name=file.filename,
bank_name=bank_name,
real_bank_name=bank_name,
template_name=template_name,
account_no_list=[account_no],
enterprise_name_list=enterprise_names,
le_id=10000 + random.randint(0, 9999),
login_le_id=10000 + random.randint(0, 9999),
file_size=random.randint(10000, 100000),
total_records=random.randint(100, 300),
trx_date_start_id=trx_date_start_id,
trx_date_end_id=trx_date_end_id,
parsing=True,
status=-5
)
# 存储记录
self.file_records[log_id] = file_record
# 添加后台任务(延迟解析)
background_tasks.add_task(self._delayed_parse, log_id)
# 构建响应
response = ResponseBuilder.build_success_response(
"upload", log_id=log_id, upload_time=upload_time
)
return self._build_upload_response(file_record)
return response
def _build_upload_response(self, file_record: FileRecord) -> dict:
"""构建上传接口的完整响应"""
return {
"code": "200",
"data": {
"accountsOfLog": {
str(file_record.log_id): [
{
"bank": file_record.bank_name,
"accountName": file_record.enterprise_name_list[0] if file_record.enterprise_name_list else "",
"accountNo": file_record.account_no_list[0] if file_record.account_no_list else "",
"currency": "CNY"
}
]
},
"uploadLogList": [
{
"accountNoList": file_record.account_no_list,
"bankName": file_record.bank_name,
"dataTypeInfo": file_record.data_type_info,
"downloadFileName": file_record.download_file_name,
"enterpriseNameList": file_record.enterprise_name_list,
"filePackageId": file_record.file_package_id,
"fileSize": file_record.file_size,
"fileUploadBy": file_record.file_upload_by,
"fileUploadByUserName": file_record.file_upload_by_user_name,
"fileUploadTime": file_record.file_upload_time,
"leId": file_record.le_id,
"logId": file_record.log_id,
"logMeta": file_record.log_meta,
"logType": file_record.log_type,
"loginLeId": file_record.login_le_id,
"lostHeader": file_record.lost_header,
"realBankName": file_record.real_bank_name,
"rows": file_record.rows,
"source": file_record.source,
"status": file_record.status,
"templateName": file_record.template_name,
"totalRecords": file_record.total_records,
"trxDateEndId": file_record.trx_date_end_id,
"trxDateStartId": file_record.trx_date_start_id,
"uploadFileName": file_record.file_name,
"uploadStatusDesc": file_record.upload_status_desc
}
],
"uploadStatus": 1
},
"status": "200",
"successResponse": True
}
def _simulate_parsing(self, log_id: int, delay_seconds: int):
def _delayed_parse(self, log_id: int):
"""后台任务:模拟文件解析过程
Args:
log_id: 日志ID
delay_seconds: 延迟秒数
"""
time.sleep(delay_seconds)
time.sleep(settings.PARSE_DELAY_SECONDS)
# 解析完成,更新状态
if log_id in self.file_records:
self.file_records[log_id]["uploadStatusDesc"] = (
"data.wait.confirm.newaccount"
)
self.parsing_status[log_id] = False
self.file_records[log_id].parsing = False
def _generate_deterministic_record(self, log_id: int, group_id: int) -> dict:
"""
基于 logId 生成确定性的文件记录
Args:
log_id: 文件ID用作随机种子
group_id: 项目ID
Returns:
文件记录字典26个字段
"""
# 银行类型选项
bank_options = [
("ALIPAY", "ALIPAY_T220708"),
("BSX", "BSX_T240925"),
("ZJRCU", "ZJRCU_T251114")
]
bank_name, template_name = random.choice(bank_options)
# 生成交易日期范围
end_date = datetime.now()
start_date = end_date - timedelta(days=random.randint(90, 365))
# 生成账号和主体
account_no = f"{random.randint(10000000000, 99999999999)}"
enterprise_names = ["测试主体"] if random.random() > 0.3 else [""]
return {
"accountNoList": [account_no],
"bankName": bank_name,
"dataTypeInfo": ["CSV", ","],
"downloadFileName": f"测试文件_{log_id}.csv",
"enterpriseNameList": enterprise_names,
"fileSize": random.randint(10000, 100000),
"fileUploadBy": 448,
"fileUploadByUserName": "admin@support.com",
"fileUploadTime": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"isSplit": 0,
"leId": 10000 + random.randint(0, 9999),
"logId": log_id,
"logMeta": "{\"lostHeader\":[],\"balanceAmount\":\"-1\"}",
"logType": "bankstatement",
"loginLeId": 10000 + random.randint(0, 9999),
"lostHeader": [],
"realBankName": bank_name,
"rows": 0,
"source": "http",
"status": -5,
"templateName": template_name,
"totalRecords": random.randint(100, 300),
"trxDateEndId": int(end_date.strftime("%Y%m%d")),
"trxDateStartId": int(start_date.strftime("%Y%m%d")),
"uploadFileName": f"测试文件_{log_id}.pdf",
"uploadStatusDesc": "data.wait.confirm.newaccount"
}
def _build_log_detail(self, record: FileRecord) -> dict:
"""构建日志详情对象"""
return {
"accountNoList": record.account_no_list,
"bankName": record.bank_name,
"dataTypeInfo": record.data_type_info,
"downloadFileName": record.download_file_name,
"enterpriseNameList": record.enterprise_name_list,
"fileSize": record.file_size,
"fileUploadBy": record.file_upload_by,
"fileUploadByUserName": record.file_upload_by_user_name,
"fileUploadTime": record.file_upload_time,
"isSplit": record.is_split,
"leId": record.le_id,
"logId": record.log_id,
"logMeta": record.log_meta,
"logType": record.log_type,
"loginLeId": record.login_le_id,
"lostHeader": record.lost_header,
"realBankName": record.real_bank_name,
"rows": record.rows,
"source": record.source,
"status": record.status,
"templateName": record.template_name,
"totalRecords": record.total_records,
"trxDateEndId": record.trx_date_end_id,
"trxDateStartId": record.trx_date_start_id,
"uploadFileName": record.file_name,
"uploadStatusDesc": record.upload_status_desc
}
def check_parse_status(self, group_id: int, inprogress_list: str) -> Dict:
"""检查文件解析状态
@@ -90,23 +298,59 @@ class FileService:
# 解析logId列表
log_ids = [int(x.strip()) for x in inprogress_list.split(",") if x.strip()]
# 检查是否还在解析中
is_parsing = any(
self.parsing_status.get(log_id, False) for log_id in log_ids
)
pending_list = []
all_parsing_complete = True
# 获取待处理列表
pending_list = [
self.file_records[log_id]
for log_id in log_ids
if log_id in self.file_records
]
for log_id in log_ids:
if log_id in self.file_records:
record = self.file_records[log_id]
if record.parsing:
all_parsing_complete = False
pending_list.append(self._build_log_detail(record))
return {
"code": "200",
"data": {"parsing": is_parsing, "pendingList": pending_list},
"data": {
"parsing": not all_parsing_complete,
"pendingList": pending_list
},
"status": "200",
"successResponse": True,
"successResponse": True
}
def get_upload_status(self, group_id: int, log_id: int = None) -> dict:
"""
获取文件上传状态(基于 logId 生成确定性数据)
Args:
group_id: 项目ID
log_id: 文件ID可选
Returns:
上传状态响应字典
"""
logs = []
if log_id:
# 使用 logId 作为随机种子,确保相同 logId 返回相同数据
random.seed(log_id)
# 生成确定性的文件记录
record = self._generate_deterministic_record(log_id, group_id)
logs.append(record)
# 返回响应
return {
"code": "200",
"data": {
"logs": logs,
"status": "",
"accountId": 8954,
"currency": "CNY"
},
"status": "200",
"successResponse": True
}
def delete_files(self, group_id: int, log_ids: List[int], user_id: int) -> Dict:
@@ -121,30 +365,38 @@ class FileService:
删除响应字典
"""
# 删除文件记录
deleted_count = 0
for log_id in log_ids:
self.file_records.pop(log_id, None)
self.parsing_status.pop(log_id, None)
if log_id in self.file_records:
del self.file_records[log_id]
deleted_count += 1
return {
"code": "200",
"data": {"message": "delete.files.success"},
"code": "200 OK", # 注意:这里是 "200 OK" 不是 "200"
"data": {
"message": "delete.files.success"
},
"message": "delete.files.success",
"status": "200",
"successResponse": True,
"successResponse": True
}
def fetch_inner_flow(self, request: Union[Dict, object]) -> Dict:
"""拉取行内流水(模拟无数据场景
"""拉取行内流水(返回随机logId
Args:
request: 拉取流水请求(可以是字典或对象
request: 拉取流水请求(保留参数以符合接口规范当前Mock实现不使用
Returns:
流水响应字典
流水响应字典包含随机生成的logId数组
"""
# 模拟无行内流水文件场景
# 随机生成一个logId范围10000-99999
log_id = random.randint(10000, 99999)
# 返回成功的响应包含logId数组
return {
"code": "200",
"data": {"code": "501014", "message": "无行内流水文件"},
"data": [log_id],
"status": "200",
"successResponse": True,
}