|
@@ -0,0 +1,417 @@
|
|
|
|
|
+"""
|
|
|
|
|
+QA模式问答对解析器工作流
|
|
|
|
|
+
|
|
|
|
|
+使用LangGraph实现完整的PDF到QA知识库工作流:
|
|
|
|
|
+PDF OCR解析 → 文本分块 → 生成QA对 → 上传RAGFlow
|
|
|
|
|
+"""
|
|
|
|
|
+
|
|
|
|
|
+import os
|
|
|
|
|
+import json
|
|
|
|
|
+import csv
|
|
|
|
|
+import tempfile
|
|
|
|
|
+import concurrent.futures
|
|
|
|
|
+from concurrent.futures import ThreadPoolExecutor
|
|
|
|
|
+from typing import List, Dict, Any, Optional
|
|
|
|
|
+from pydantic import BaseModel, Field, ConfigDict
|
|
|
|
|
+from langgraph.graph import StateGraph, START, END
|
|
|
|
|
+from langchain.chat_models import init_chat_model
|
|
|
|
|
+from langchain_core.messages import HumanMessage, SystemMessage
|
|
|
|
|
+from langchain_text_splitters import RecursiveCharacterTextSplitter
|
|
|
|
|
+from src.datasets.parser.pdf_parser.pdf_splitter import PDFSplitter
|
|
|
|
|
+from src.model.qwen_vl import QWenVLParser
|
|
|
|
|
+from src.utils.ragflow.ragflow_service import RAGFlowService
|
|
|
|
|
+from src.conf.settings import model_settings
|
|
|
|
|
+from langfuse.langchain import CallbackHandler
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+class QAParsingState(BaseModel):
|
|
|
|
|
+ """QA解析工作流状态"""
|
|
|
|
|
+ model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
|
|
|
+
|
|
|
|
|
+ # 输入参数
|
|
|
|
|
+ pdf_path: str = Field(..., description="PDF文件路径")
|
|
|
|
|
+ dataset_id: str = Field(..., description="RAGFlow数据集ID")
|
|
|
|
|
+ qa_count_per_chunk: int = Field(default=50, description="每块生成的QA数量")
|
|
|
|
|
+ chunk_size: int = Field(default=1000, description="文本分块大小")
|
|
|
|
|
+ chunk_overlap: int = Field(default=200, description="分块重叠大小")
|
|
|
|
|
+
|
|
|
|
|
+ # 中间状态
|
|
|
|
|
+ extracted_text: str = Field(default="", description="OCR提取的文本")
|
|
|
|
|
+ chunks: List[str] = Field(default_factory=list, description="分块后的文本列表")
|
|
|
|
|
+ qa_pairs: List[Dict[str, Any]] = Field(default_factory=list, description="生成的QA对列表")
|
|
|
|
|
+ csv_path: str = Field(default="", description="临时CSV文件路径")
|
|
|
|
|
+
|
|
|
|
|
+ # 输出状态
|
|
|
|
|
+ uploaded_document_id: str = Field(default="", description="上传后的文档ID")
|
|
|
|
|
+ qa_count: int = Field(default=0, description="生成的QA对数量")
|
|
|
|
|
+ is_complete: bool = Field(default=False, description="是否处理完成")
|
|
|
|
|
+ error_message: Optional[str] = Field(default=None, description="错误信息")
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+class QuestionAnswerParserWorkflow:
|
|
|
|
|
+ """QA问答对解析工作流"""
|
|
|
|
|
+
|
|
|
|
|
+ def __init__(self, model_name: str = None):
|
|
|
|
|
+ """
|
|
|
|
|
+ 初始化QA解析工作流
|
|
|
|
|
+
|
|
|
|
|
+ Args:
|
|
|
|
|
+ model_name: VL模型名称,用于PDF OCR解析
|
|
|
|
|
+ """
|
|
|
|
|
+ self.model_name = model_name or model_settings.model_name
|
|
|
|
|
+ self.ragflow_service = RAGFlowService()
|
|
|
|
|
+ self.langfuse_handler = CallbackHandler()
|
|
|
|
|
+
|
|
|
|
|
+ # 初始化LangChain chat model用于QA生成
|
|
|
|
|
+ self.chat_model = init_chat_model(
|
|
|
|
|
+ model_provider=model_settings.model_provider,
|
|
|
|
|
+ model=model_settings.chat_model_name,
|
|
|
|
|
+ api_key=model_settings.api_key,
|
|
|
|
|
+ base_url=model_settings.base_url,
|
|
|
|
|
+ temperature=0.7
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ self.workflow = self._build_workflow()
|
|
|
|
|
+
|
|
|
|
|
+ def _build_workflow(self):
|
|
|
|
|
+ """构建LangGraph工作流"""
|
|
|
|
|
+ graph = StateGraph(QAParsingState)
|
|
|
|
|
+
|
|
|
|
|
+ # 添加节点
|
|
|
|
|
+ graph.add_node("parse_pdf", self._parse_pdf_node)
|
|
|
|
|
+ graph.add_node("split_text", self._split_text_node)
|
|
|
|
|
+ graph.add_node("generate_qa", self._generate_qa_node)
|
|
|
|
|
+ graph.add_node("export_csv", self._export_csv_node)
|
|
|
|
|
+ graph.add_node("upload_document", self._upload_document_node)
|
|
|
|
|
+ graph.add_node("parse_document", self._parse_document_node)
|
|
|
|
|
+ graph.add_node("complete", self._complete_node)
|
|
|
|
|
+
|
|
|
|
|
+ # 定义边
|
|
|
|
|
+ graph.add_edge(START, "parse_pdf")
|
|
|
|
|
+ graph.add_edge("parse_pdf", "split_text")
|
|
|
|
|
+ graph.add_edge("split_text", "generate_qa")
|
|
|
|
|
+ graph.add_edge("generate_qa", "export_csv")
|
|
|
|
|
+ graph.add_edge("export_csv", "upload_document")
|
|
|
|
|
+ graph.add_edge("upload_document", "parse_document")
|
|
|
|
|
+ graph.add_edge("parse_document", "complete")
|
|
|
|
|
+ graph.add_edge("complete", END)
|
|
|
|
|
+
|
|
|
|
|
+ return graph.compile()
|
|
|
|
|
+
|
|
|
|
|
+ def _parse_pdf_node(self, state: QAParsingState) -> Dict[str, Any]:
|
|
|
|
|
+ """PDF OCR解析节点 - 提取文本内容"""
|
|
|
|
|
+ print(f"开始解析PDF: {state.pdf_path}")
|
|
|
|
|
+
|
|
|
|
|
+ try:
|
|
|
|
|
+ # 使用PDFSplitter拆分PDF为图片
|
|
|
|
|
+ splitter = PDFSplitter()
|
|
|
|
|
+ pages = splitter.split_pdf(state.pdf_path)
|
|
|
|
|
+
|
|
|
|
|
+ # 使用QWenVL模型提取每页文本
|
|
|
|
|
+ extracted_texts = []
|
|
|
|
|
+ parser = QWenVLParser(self.model_name)
|
|
|
|
|
+
|
|
|
|
|
+ for page in pages:
|
|
|
|
|
+ page_number = page["page_number"]
|
|
|
|
|
+ image = page["image"]
|
|
|
|
|
+
|
|
|
|
|
+ # OCR提取文本的prompt
|
|
|
|
|
+ prompt = """请提取图片中的所有文字内容,保持原有的段落结构。
|
|
|
|
|
+只输出提取的文字,不要添加任何额外的说明或格式。"""
|
|
|
|
|
+
|
|
|
|
|
+ result = parser.parse_image(image, page_number, prompt)
|
|
|
|
|
+ text = result.get("content", "")
|
|
|
|
|
+ extracted_texts.append(text)
|
|
|
|
|
+ print(f"第 {page_number} 页文本提取完成")
|
|
|
|
|
+
|
|
|
|
|
+ full_text = "\n\n".join(extracted_texts)
|
|
|
|
|
+ print(f"PDF解析完成,提取文本长度: {len(full_text)} 字符")
|
|
|
|
|
+
|
|
|
|
|
+ return {"extracted_text": full_text}
|
|
|
|
|
+
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ print(f"PDF解析失败: {str(e)}")
|
|
|
|
|
+ return {"error_message": f"PDF解析失败: {str(e)}"}
|
|
|
|
|
+
|
|
|
|
|
+ def _split_text_node(self, state: QAParsingState) -> Dict[str, Any]:
|
|
|
|
|
+ """文本分块节点"""
|
|
|
|
|
+ print("开始文本分块...")
|
|
|
|
|
+
|
|
|
|
|
+ if state.error_message:
|
|
|
|
|
+ return {}
|
|
|
|
|
+
|
|
|
|
|
+ if not state.extracted_text:
|
|
|
|
|
+ return {"error_message": "没有提取到文本内容"}
|
|
|
|
|
+
|
|
|
|
|
+ try:
|
|
|
|
|
+ # 使用LangChain的RecursiveCharacterTextSplitter
|
|
|
|
|
+ text_splitter = RecursiveCharacterTextSplitter(
|
|
|
|
|
+ chunk_size=state.chunk_size,
|
|
|
|
|
+ chunk_overlap=state.chunk_overlap,
|
|
|
|
|
+ length_function=len,
|
|
|
|
|
+ separators=["\n\n", "\n", "。", "!", "?", ";", " ", ""]
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ chunks = text_splitter.split_text(state.extracted_text)
|
|
|
|
|
+ print(f"文本分块完成,共 {len(chunks)} 个分块")
|
|
|
|
|
+
|
|
|
|
|
+ return {"chunks": chunks}
|
|
|
|
|
+
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ print(f"文本分块失败: {str(e)}")
|
|
|
|
|
+ return {"error_message": f"文本分块失败: {str(e)}"}
|
|
|
|
|
+
|
|
|
|
|
+ def _generate_qa_for_chunk(self, chunk: str, count: int, chunk_index: int) -> List[Dict[str, str]]:
|
|
|
|
|
+ """为单个分块生成QA对"""
|
|
|
|
|
+ print(f"正在为分块 {chunk_index + 1} 生成 {count} 个QA对...")
|
|
|
|
|
+
|
|
|
|
|
+ system_prompt = """你是一个专业的问答对生成专家。你的任务是根据给定的文本内容,生成高质量的问答对。
|
|
|
|
|
+
|
|
|
|
|
+ 要求:
|
|
|
|
|
+ 1. 问题应该覆盖内容的不同方面和细节
|
|
|
|
|
+ 2. 答案应该准确、简洁,直接来自文本
|
|
|
|
|
+ 3. 问答对应该有助于知识检索和理解
|
|
|
|
|
+ 4. 避免生成过于简单或重复的问题
|
|
|
|
|
+ 5. 确保问题是自包含的,不需要额外上下文即可理解"""
|
|
|
|
|
+
|
|
|
|
|
+ user_prompt = f"""请根据以下内容,生成 {count} 个高质量的问答对。
|
|
|
|
|
+
|
|
|
|
|
+ 内容:
|
|
|
|
|
+ {chunk}
|
|
|
|
|
+
|
|
|
|
|
+ 请严格以JSON格式输出,格式如下:
|
|
|
|
|
+ [
|
|
|
|
|
+ {{"question": "问题1", "answer": "答案1"}},
|
|
|
|
|
+ {{"question": "问题2", "answer": "答案2"}}
|
|
|
|
|
+ ]
|
|
|
|
|
+
|
|
|
|
|
+ 只输出JSON数组,不要添加任何其他内容。"""
|
|
|
|
|
+
|
|
|
|
|
+ try:
|
|
|
|
|
+ messages = [
|
|
|
|
|
+ SystemMessage(content=system_prompt),
|
|
|
|
|
+ HumanMessage(content=user_prompt)
|
|
|
|
|
+ ]
|
|
|
|
|
+
|
|
|
|
|
+ response = self.chat_model.invoke(messages)
|
|
|
|
|
+ content = response.content if hasattr(response, 'content') else str(response)
|
|
|
|
|
+
|
|
|
|
|
+ # 尝试解析JSON
|
|
|
|
|
+ # 清理可能的markdown代码块标记
|
|
|
|
|
+ content = content.strip()
|
|
|
|
|
+ if content.startswith("```json"):
|
|
|
|
|
+ content = content[7:]
|
|
|
|
|
+ if content.startswith("```"):
|
|
|
|
|
+ content = content[3:]
|
|
|
|
|
+ if content.endswith("```"):
|
|
|
|
|
+ content = content[:-3]
|
|
|
|
|
+ content = content.strip()
|
|
|
|
|
+
|
|
|
|
|
+ qa_list = json.loads(content)
|
|
|
|
|
+ print(f"分块 {chunk_index + 1} 生成了 {len(qa_list)} 个QA对")
|
|
|
|
|
+ return qa_list
|
|
|
|
|
+
|
|
|
|
|
+ except json.JSONDecodeError as e:
|
|
|
|
|
+ print(f"分块 {chunk_index + 1} JSON解析失败: {str(e)}")
|
|
|
|
|
+ return []
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ print(f"分块 {chunk_index + 1} QA生成失败: {str(e)}")
|
|
|
|
|
+ return []
|
|
|
|
|
+
|
|
|
|
|
+ def _generate_qa_node(self, state: QAParsingState) -> Dict[str, Any]:
|
|
|
|
|
+ """生成QA对节点 - 并行处理所有分块"""
|
|
|
|
|
+ print(f"开始为 {len(state.chunks)} 个分块生成QA对...")
|
|
|
|
|
+
|
|
|
|
|
+ if state.error_message:
|
|
|
|
|
+ return {}
|
|
|
|
|
+
|
|
|
|
|
+ if not state.chunks:
|
|
|
|
|
+ return {"error_message": "没有可处理的文本分块"}
|
|
|
|
|
+
|
|
|
|
|
+ all_qa_pairs = []
|
|
|
|
|
+
|
|
|
|
|
+ # 使用ThreadPoolExecutor并行处理
|
|
|
|
|
+ with ThreadPoolExecutor(max_workers=5, thread_name_prefix="qa_gen_") as executor:
|
|
|
|
|
+ futures = {
|
|
|
|
|
+ executor.submit(
|
|
|
|
|
+ self._generate_qa_for_chunk,
|
|
|
|
|
+ chunk,
|
|
|
|
|
+ state.qa_count_per_chunk,
|
|
|
|
|
+ i
|
|
|
|
|
+ ): i for i, chunk in enumerate(state.chunks)
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ for future in concurrent.futures.as_completed(futures):
|
|
|
|
|
+ chunk_index = futures[future]
|
|
|
|
|
+ try:
|
|
|
|
|
+ qa_list = future.result()
|
|
|
|
|
+ for qa in qa_list:
|
|
|
|
|
+ qa["chunk_index"] = chunk_index
|
|
|
|
|
+ all_qa_pairs.extend(qa_list)
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ print(f"分块 {chunk_index + 1} 处理异常: {str(e)}")
|
|
|
|
|
+
|
|
|
|
|
+ # 按chunk_index排序
|
|
|
|
|
+ all_qa_pairs.sort(key=lambda x: x.get("chunk_index", 0))
|
|
|
|
|
+
|
|
|
|
|
+ print(f"QA对生成完成,共生成 {len(all_qa_pairs)} 个问答对")
|
|
|
|
|
+ return {"qa_pairs": all_qa_pairs}
|
|
|
|
|
+
|
|
|
|
|
+ def _export_csv_node(self, state: QAParsingState) -> Dict[str, Any]:
|
|
|
|
|
+ """导出QA对到CSV临时文件节点"""
|
|
|
|
|
+ print(f"开始导出 {len(state.qa_pairs)} 个QA对到CSV文件...")
|
|
|
|
|
+
|
|
|
|
|
+ if state.error_message:
|
|
|
|
|
+ return {}
|
|
|
|
|
+
|
|
|
|
|
+ if not state.qa_pairs:
|
|
|
|
|
+ return {"error_message": "没有可导出的QA对"}
|
|
|
|
|
+
|
|
|
|
|
+ try:
|
|
|
|
|
+ # 使用PDF文件名作为CSV文件名前缀
|
|
|
|
|
+ pdf_basename = os.path.splitext(os.path.basename(state.pdf_path))[0]
|
|
|
|
|
+
|
|
|
|
|
+ # 创建临时CSV文件
|
|
|
|
|
+ temp_file = tempfile.NamedTemporaryFile(
|
|
|
|
|
+ mode='w',
|
|
|
|
|
+ suffix='.csv',
|
|
|
|
|
+ prefix=f'{pdf_basename}_qa_',
|
|
|
|
|
+ delete=False,
|
|
|
|
|
+ encoding='utf-8',
|
|
|
|
|
+ newline=''
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ # 使用TAB作为分隔符写入CSV
|
|
|
|
|
+ writer = csv.writer(temp_file, delimiter='\t')
|
|
|
|
|
+
|
|
|
|
|
+ # 写入数据(问题和答案)
|
|
|
|
|
+ for qa in state.qa_pairs:
|
|
|
|
|
+ writer.writerow([qa['question'], qa['answer']])
|
|
|
|
|
+
|
|
|
|
|
+ temp_file.close()
|
|
|
|
|
+ csv_path = temp_file.name
|
|
|
|
|
+
|
|
|
|
|
+ print(f"CSV文件导出完成: {csv_path}")
|
|
|
|
|
+ print(f"共导出 {len(state.qa_pairs)} 个QA对")
|
|
|
|
|
+
|
|
|
|
|
+ return {
|
|
|
|
|
+ "csv_path": csv_path,
|
|
|
|
|
+ "qa_count": len(state.qa_pairs)
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ print(f"导出CSV失败: {str(e)}")
|
|
|
|
|
+ return {"error_message": f"导出CSV失败: {str(e)}"}
|
|
|
|
|
+
|
|
|
|
|
+ def _upload_document_node(self, state: QAParsingState) -> Dict[str, Any]:
|
|
|
|
|
+ """上传CSV文档到RAGFlow节点"""
|
|
|
|
|
+ print(f"开始上传CSV文件到RAGFlow: {state.csv_path}")
|
|
|
|
|
+
|
|
|
|
|
+ if state.error_message:
|
|
|
|
|
+ return {}
|
|
|
|
|
+
|
|
|
|
|
+ if not state.csv_path:
|
|
|
|
|
+ return {"error_message": "没有可上传的CSV文件"}
|
|
|
|
|
+
|
|
|
|
|
+ try:
|
|
|
|
|
+ # 上传文档到RAGFlow
|
|
|
|
|
+ document_info_list = self.ragflow_service.upload_document(
|
|
|
|
|
+ dataset_id=state.dataset_id,
|
|
|
|
|
+ file_path=state.csv_path
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ if document_info_list and len(document_info_list) > 0:
|
|
|
|
|
+ document_id = document_info_list[0]["id"]
|
|
|
|
|
+ print(f"CSV文档上传成功,文档ID: {document_id}")
|
|
|
|
|
+
|
|
|
|
|
+ # 清理临时文件
|
|
|
|
|
+ try:
|
|
|
|
|
+ os.remove(state.csv_path)
|
|
|
|
|
+ print(f"临时文件已清理: {state.csv_path}")
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ print(f"清理临时文件失败: {str(e)}")
|
|
|
|
|
+
|
|
|
|
|
+ return {"uploaded_document_id": document_id}
|
|
|
|
|
+ else:
|
|
|
|
|
+ return {"error_message": "文档上传失败: 未返回有效的文档信息"}
|
|
|
|
|
+
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ print(f"上传文档失败: {str(e)}")
|
|
|
|
|
+ return {"error_message": f"上传文档失败: {str(e)}"}
|
|
|
|
|
+
|
|
|
|
|
+ def _parse_document_node(self, state: QAParsingState) -> Dict[str, Any]:
|
|
|
|
|
+ """RAGFLOW文档解析节点"""
|
|
|
|
|
+ print(f"开始解析文档 {state.dataset_id}: {state.uploaded_document_id}")
|
|
|
|
|
+
|
|
|
|
|
+ try:
|
|
|
|
|
+ # 解析文档
|
|
|
|
|
+ parse_success = self.ragflow_service.parse_document(
|
|
|
|
|
+ dataset_id=state.dataset_id,
|
|
|
|
|
+ document_ids=[state.uploaded_document_id]
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ # 检查响应parse_success为bool
|
|
|
|
|
+ if parse_success:
|
|
|
|
|
+ print(f"文档解析成功,文档ID: {state.uploaded_document_id}")
|
|
|
|
|
+ # 返回空列表,因为parsed_results字段期望是列表类型
|
|
|
|
|
+ return {
|
|
|
|
|
+ "parsed_results": []
|
|
|
|
|
+ }
|
|
|
|
|
+ else:
|
|
|
|
|
+ print("文档解析失败: 未返回有效的解析结果")
|
|
|
|
|
+ raise Exception("文档解析失败: 未返回有效的解析结果")
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ print(f"解析文档时出错: {str(e)}")
|
|
|
|
|
+ raise
|
|
|
|
|
+
|
|
|
|
|
+ def _complete_node(self, state: QAParsingState) -> Dict[str, Any]:
|
|
|
|
|
+ """完成节点"""
|
|
|
|
|
+ if state.error_message:
|
|
|
|
|
+ print(f"工作流完成(有错误): {state.error_message}")
|
|
|
|
|
+ else:
|
|
|
|
|
+ print(f"QA解析工作流完成!")
|
|
|
|
|
+ print(f" - 提取文本: {len(state.extracted_text)} 字符")
|
|
|
|
|
+ print(f" - 分块数量: {len(state.chunks)}")
|
|
|
|
|
+ print(f" - 生成QA对: {state.qa_count}")
|
|
|
|
|
+ print(f" - 上传文档ID: {state.uploaded_document_id}")
|
|
|
|
|
+
|
|
|
|
|
+ return {"is_complete": True}
|
|
|
|
|
+
|
|
|
|
|
+ def run(
|
|
|
|
|
+ self,
|
|
|
|
|
+ pdf_path: str,
|
|
|
|
|
+ dataset_id: str,
|
|
|
|
|
+ qa_count_per_chunk: int = 50,
|
|
|
|
|
+ chunk_size: int = 1000,
|
|
|
|
|
+ chunk_overlap: int = 200
|
|
|
|
|
+ ) -> Dict[str, Any]:
|
|
|
|
|
+ """
|
|
|
|
|
+ 运行QA解析工作流
|
|
|
|
|
+
|
|
|
|
|
+ Args:
|
|
|
|
|
+ pdf_path: PDF文件路径
|
|
|
|
|
+ dataset_id: RAGFlow数据集ID
|
|
|
|
|
+ qa_count_per_chunk: 每块生成的QA数量,默认50
|
|
|
|
|
+ chunk_size: 文本分块大小,默认1000
|
|
|
|
|
+ chunk_overlap: 分块重叠大小,默认200
|
|
|
|
|
+
|
|
|
|
|
+ Returns:
|
|
|
|
|
+ Dict: 包含最终状态的字典
|
|
|
|
|
+ """
|
|
|
|
|
+ initial_state = QAParsingState(
|
|
|
|
|
+ pdf_path=pdf_path,
|
|
|
|
|
+ dataset_id=dataset_id,
|
|
|
|
|
+ qa_count_per_chunk=qa_count_per_chunk,
|
|
|
|
|
+ chunk_size=chunk_size,
|
|
|
|
|
+ chunk_overlap=chunk_overlap
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ result = self.workflow.invoke(
|
|
|
|
|
+ initial_state,
|
|
|
|
|
+ config={"callbacks": [self.langfuse_handler]}
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ if isinstance(result, dict):
|
|
|
|
|
+ return result
|
|
|
|
|
+ else:
|
|
|
|
|
+ return result.dict()
|