main
commit
e51c776c19
|
@ -0,0 +1,252 @@
|
|||
from flask import Flask, request, jsonify
|
||||
from llama_index.llms.ollama import Ollama
|
||||
from llama_index.core.llms import MockLLM
|
||||
import logging
|
||||
import time
|
||||
|
||||
# 初始化日志
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
# 定义任务映射
|
||||
TASK_MAPPING = {
|
||||
"1": ["文章内容", 400], # 表示文章
|
||||
"2": ["文章标题", 20], # 表示标题
|
||||
"3": ["文章摘要", 200], # 表示摘要
|
||||
"4": ["文章关键词", 10], # 表示关键词
|
||||
"5": ["文章描述", 50] # 表示问题描述
|
||||
}
|
||||
|
||||
# 自定义 MockLLM 实现
|
||||
class CustomMockLLM(MockLLM):
|
||||
def complete(self, prompt):
|
||||
# 模拟生成过程
|
||||
time.sleep(1) # 模拟处理时间
|
||||
return f"这是MockLLM生成的测试响应。\n原始输入: {prompt}"
|
||||
|
||||
# 统一的模型接口
|
||||
class ModelInterface:
|
||||
def __init__(self, model):
|
||||
self.model = model
|
||||
|
||||
def generate_text(self, prompt):
|
||||
if isinstance(self.model, Ollama):
|
||||
response = self.model.complete(prompt)
|
||||
return response.text
|
||||
elif isinstance(self.model, CustomMockLLM):
|
||||
return self.model.complete(prompt)
|
||||
else:
|
||||
raise ValueError("Unsupported model type")
|
||||
|
||||
# 初始化LLM
|
||||
try:
|
||||
logging.info("尝试初始化 Ollama 模型...")
|
||||
raw_llm = Ollama(
|
||||
model="qwen2.5",
|
||||
temperature=0,
|
||||
request_timeout=120.0,
|
||||
base_url="http://localhost:11434"
|
||||
)
|
||||
logging.info("Ollama 模型初始化成功!")
|
||||
except Exception as e:
|
||||
logging.warning(f"无法连接到 Ollama 服务器,使用 MockLLM。错误: {e}")
|
||||
raw_llm = CustomMockLLM()
|
||||
|
||||
# 创建统一的模型接口
|
||||
llm = ModelInterface(raw_llm)
|
||||
|
||||
# 获取映射后的任务类型
|
||||
def get_mapped_task(task_id):
|
||||
return TASK_MAPPING.get(task_id, ["未知任务类型", 200]) # 默认返回未知任务类型和200字限制
|
||||
|
||||
# 调用模型生成内容
|
||||
def generate_with_ollama(x, y, text):
|
||||
source_type, _ = get_mapped_task(x)
|
||||
target_type, word_limit = get_mapped_task(y)
|
||||
|
||||
prompt = (
|
||||
f"你是一名专业的SEO内容策划专家。\n"
|
||||
f"任务:根据以下{source_type}生成符合要求的高质量SEO{target_type}。\n"
|
||||
f"- 内容要求:清晰、连贯,准确概括核心内容。包含专业但易懂的科学解释,自然地融入关键词,不要出现特殊符号和表情符号,地名要在[]之中\n"
|
||||
f"- 写作风格:专业但通俗易懂,适合一般读者阅读,保持科学准确性\n"
|
||||
f"- 输出限制:仅返回生成的{target_type},不需要任何解释或补充内容。\n"
|
||||
f"- 字数限制:{word_limit}字以内。\n"
|
||||
f"- 语言要求:输出内容符合中文语言习惯。\n"
|
||||
f"- 输出格式:请严格按照以下格式返回:\n"
|
||||
f"{target_type}\n\n"
|
||||
f"输入:\n{text}\n"
|
||||
)
|
||||
logging.info(f"生成 Prompt:{prompt}")
|
||||
|
||||
try:
|
||||
response = llm.generate_text(prompt)
|
||||
logging.info(f"生成结果:{response}")
|
||||
return {"result": response}
|
||||
except Exception as e:
|
||||
logging.error(f"生成失败:{e}")
|
||||
return {"error": f"模型生成失败: {e}"}
|
||||
|
||||
# 调用模型优化内容
|
||||
def optimize_with_ollama(x, text):
|
||||
task_type, word_limit = get_mapped_task(x)
|
||||
|
||||
prompt = (
|
||||
f"任务:对以下{task_type}进行优化。\n"
|
||||
f"要求:\n"
|
||||
f"1. 保持原文信息完整性,优化内容更清晰、逻辑更连贯。\n"
|
||||
f"2. 适当简化句式,使内容更简洁流畅。\n"
|
||||
f"3. 控制字数在{word_limit}字以内。\n"
|
||||
f"4. 请直接输出优化后的{task_type}内容,不需要额外解释。\n"
|
||||
f"\n输入:\n{text}\n"
|
||||
)
|
||||
logging.info(f"优化 Prompt:{prompt}")
|
||||
|
||||
try:
|
||||
response = llm.generate_text(prompt)
|
||||
logging.info(f"优化结果:{response}")
|
||||
return {"result": response}
|
||||
except Exception as e:
|
||||
logging.error(f"优化失败:{e}")
|
||||
return {"error": f"模型优化失败: {e}"}
|
||||
|
||||
# 调用模型根据 x 和 y 进行优化
|
||||
def optimize_with_target(x, y, text):
|
||||
source_type, _ = get_mapped_task(x)
|
||||
target_type, word_limit = get_mapped_task(y)
|
||||
|
||||
prompt = (
|
||||
f"任务:将以下{source_type}优化为{target_type}。\n"
|
||||
f"要求:\n"
|
||||
f"1. 确保内容符合{target_type}的表达特点,逻辑清晰,语言简洁。\n"
|
||||
f"2. 控制内容在{word_limit}字以内。\n"
|
||||
f"3. 请直接输出优化后的{target_type}内容,不要附加多余解释。\n"
|
||||
f"\n输入:\n{text}\n"
|
||||
)
|
||||
logging.info(f"优化(带目标类型)Prompt:{prompt}")
|
||||
|
||||
try:
|
||||
response = llm.generate_text(prompt)
|
||||
logging.info(f"优化结果:{response}")
|
||||
return {"result": response}
|
||||
except Exception as e:
|
||||
logging.error(f"优化失败:{e}")
|
||||
return {"error": f"模型优化失败: {e}"}
|
||||
|
||||
|
||||
# 调用模型生成关键词
|
||||
def generate_keywords(x, y, text):
|
||||
task_type, word_limit = get_mapped_task(x)
|
||||
|
||||
# 构建关键词生成的 Prompt
|
||||
prompt = (
|
||||
f"任务:根据以下{task_type}提取关键词。\n"
|
||||
f"要求:\n"
|
||||
f"1. 提取{y}个关键词,每个关键词应准确概括内容核心思想。\n"
|
||||
f"2. 关键词之间用逗号分隔。\n"
|
||||
f"3. 请仅输出关键词列表,不要添加其他内容。\n"
|
||||
f"\n输入:\n{text}\n"
|
||||
)
|
||||
logging.info(f"生成关键词 Prompt:{prompt}")
|
||||
|
||||
try:
|
||||
response = llm.generate_text(prompt)
|
||||
logging.info(f"生成关键词结果:{response}")
|
||||
return {"result": response}
|
||||
except Exception as e:
|
||||
logging.error(f"生成关键词失败:{e}")
|
||||
return {"error": f"模型生成关键词失败: {e}"}
|
||||
|
||||
# 定义生成接口
|
||||
@app.route('/generate', methods=['POST'])
|
||||
def generate():
|
||||
params = request.json
|
||||
if not params:
|
||||
return jsonify({"error": "缺少参数"}), 400
|
||||
|
||||
x = params.get("x")
|
||||
y = params.get("y")
|
||||
text = params.get("text")
|
||||
|
||||
if not x or not y or not text:
|
||||
return jsonify({"error": "参数不完整"}), 400
|
||||
|
||||
if x not in TASK_MAPPING:
|
||||
return jsonify({"error": f"无效的源任务类型: {x}"}), 400
|
||||
if y not in TASK_MAPPING:
|
||||
return jsonify({"error": f"无效的目标任务类型: {y}"}), 400
|
||||
|
||||
result = generate_with_ollama(x, y, text)
|
||||
return jsonify(result)
|
||||
|
||||
# 定义优化接口
|
||||
@app.route('/optimize', methods=['POST'])
|
||||
def optimize():
|
||||
params = request.json
|
||||
if not params:
|
||||
return jsonify({"error": "缺少参数"}), 400
|
||||
|
||||
x = params.get("x")
|
||||
text = params.get("text")
|
||||
|
||||
if not x or not text:
|
||||
return jsonify({"error": "参数不完整"}), 400
|
||||
|
||||
if x not in TASK_MAPPING:
|
||||
return jsonify({"error": f"无效的任务类型: {x}"}), 400
|
||||
|
||||
result = optimize_with_ollama(x, text)
|
||||
return jsonify(result)
|
||||
|
||||
# 定义优化带目标接口
|
||||
@app.route('/optimize_with_target', methods=['POST'])
|
||||
def optimize_with_target_api():
|
||||
params = request.json
|
||||
if not params:
|
||||
return jsonify({"error": "缺少参数"}), 400
|
||||
|
||||
x = params.get("x")
|
||||
y = params.get("y")
|
||||
text = params.get("text")
|
||||
|
||||
if not x or not y or not text:
|
||||
return jsonify({"error": "参数不完整"}), 400
|
||||
|
||||
if x not in TASK_MAPPING:
|
||||
return jsonify({"error": f"无效的源任务类型: {x}"}), 400
|
||||
if y not in TASK_MAPPING:
|
||||
return jsonify({"error": f"无效的目标任务类型: {y}"}), 400
|
||||
|
||||
result = optimize_with_target(x, y, text)
|
||||
return jsonify(result)
|
||||
|
||||
# 定义关键词生成接口
|
||||
@app.route('/generate_keywords', methods=['POST'])
|
||||
def generate_keywords_api():
|
||||
params = request.json
|
||||
if not params:
|
||||
return jsonify({"error": "缺少参数"}), 400
|
||||
|
||||
x = params.get("x")
|
||||
y = params.get("y")
|
||||
text = params.get("text")
|
||||
|
||||
if not x or not y or not text:
|
||||
return jsonify({"error": "参数不完整"}), 400
|
||||
|
||||
# 验证任务类型是否有效
|
||||
if x not in TASK_MAPPING:
|
||||
return jsonify({"error": f"无效的任务类型: {x}"}), 400
|
||||
|
||||
try:
|
||||
y = int(y)
|
||||
if y <= 0:
|
||||
return jsonify({"error": "关键词数量必须为正整数"}), 400
|
||||
except ValueError:
|
||||
return jsonify({"error": "关键词数量必须为整数"}), 400
|
||||
|
||||
result = generate_keywords(x, y, text)
|
||||
return jsonify(result)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(host='0.0.0.0', port=5000)
|
Loading…
Reference in New Issue