1. 基础环境配置
import requests import json from typing import list, dict, optional from dataclasses import dataclass @dataclass class promptcontext: task: str domain: str requirements: list[str] class ollamaservice: def __init__(self, base_url: str = "http://localhost:11434"): self.base_url = base_url self.models = { 'mistral': 'mistral', 'llama2': 'llama2', 'neural-chat': 'neural-chat' }
2. 核心功能实现
2.1 提示词生成服务
class promptgenerationservice: def __init__(self, model_name: str = 'mistral'): self.model_name = model_name self.api_url = "http://localhost:11434/api/generate" async def generate_prompt(self, context: promptcontext) -> str: prompt = f""" task: create a detailed prompt for the following context: - task type: {context.task} - domain: {context.domain} - requirements: {', '.join(context.requirements)} generate a structured prompt that includes: 1. context setting 2. specific requirements 3. output format 4. constraints 5. examples (if applicable) """ response = requests.post( self.api_url, json={ "model": self.model_name, "prompt": prompt, "stream": false } ) return response.json()["response"] async def optimize_prompt(self, original_prompt: str) -> dict: prompt = f""" analyze and optimize the following prompt: "{original_prompt}" provide: 1. improved version 2. explanation of changes 3. potential variations """ response = requests.post( self.api_url, json={ "model": self.model_name, "prompt": prompt, "stream": false } ) return response.json()["response"]
2.2 提示词模板管理
class prompttemplates: @staticmethod def get_code_review_template(code: str) -> str: return f""" analyze the following code: [code] provide: 1. code quality assessment 2. potential improvements 3. security concerns 4. performance optimization """ @staticmethod def get_documentation_template(component: str) -> str: return f""" generate documentation for: {component} include: 1. overview 2. api reference 3. usage examples 4. best practices """ @staticmethod def get_refactoring_template(code: str) -> str: return f""" suggest refactoring for: [code] consider: 1. design patterns 2. clean code principles 3. performance impact 4. maintainability """
3. 使用示例
async def main(): # 初始化服务 prompt_service = promptgenerationservice(model_name='mistral') # 代码生成提示词示例 code_context = promptcontext( task='code_generation', domain='web_development', requirements=[ 'react component', 'typescript', 'material ui', 'form handling' ] ) code_prompt = await prompt_service.generate_prompt(code_context) print("代码生成提示词:", code_prompt) # 文档生成提示词示例 doc_context = promptcontext( task='documentation', domain='api_reference', requirements=[ 'openapi format', 'examples included', 'error handling', 'authentication details' ] ) doc_prompt = await prompt_service.generate_prompt(doc_context) print("文档生成提示词:", doc_prompt) # 提示词优化示例 original_prompt = "写一个react组件" optimized_prompt = await prompt_service.optimize_prompt(original_prompt) print("优化后的提示词:", optimized_prompt) if __name__ == "__main__": import asyncio asyncio.run(main())
4. 工具类实现
class promptutils: @staticmethod def format_requirements(requirements: list[str]) -> str: return "\n".join([f"- {req}" for req in requirements]) @staticmethod def validate_prompt(prompt: str) -> bool: # 简单的提示词验证 return len(prompt.strip()) > 0 @staticmethod def enhance_prompt(prompt: str) -> str: # 添加通用的提示词增强 return f""" {prompt} additional requirements: - provide clear and detailed explanations - include practical examples - consider edge cases - follow best practices """
5. 错误处理
class promptgenerationerror(exception): pass class modelconnectionerror(exception): pass def handle_api_errors(func): async def wrapper(*args, **kwargs): try: return await func(*args, **kwargs) except requests.exceptions.connectionerror: raise modelconnectionerror("无法连接到ollama服务") except exception as e: raise promptgenerationerror(f"提示词生成错误: {str(e)}") return wrapper
6. 配置管理
class config: models = { 'mistral': { 'name': 'mistral', 'description': '快速、轻量级提示词生成', 'parameters': { 'temperature': 0.7, 'max_tokens': 2000 } }, 'llama2': { 'name': 'llama2', 'description': '复杂、详细的提示词需求', 'parameters': { 'temperature': 0.8, 'max_tokens': 4000 } }, 'neural-chat': { 'name': 'neural-chat', 'description': '交互式提示词优化', 'parameters': { 'temperature': 0.9, 'max_tokens': 3000 } } }
使用这个python实现,你可以:
- 生成结构化的提示词
- 优化现有提示词
- 使用预定义模板
- 处理各种场景的提示词需求
主要优点:
- 面向对象的设计
- 异步支持
- 错误处理
- 类型提示
- 配置管理
- 模块化结构
这个实现可以作为一个基础框架,根据具体需求进行扩展和定制。
到此这篇关于python实现ollama的提示词生成与优化的文章就介绍到这了,更多相关python ollama提示词内容请搜索代码网以前的文章或继续浏览下面的相关文章希望大家以后多多支持代码网!
发表评论