init
This commit is contained in:
		
							
								
								
									
										35
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,35 @@ | |||||||
|  | *.7z filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.arrow filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.bin filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.bz2 filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.ckpt filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.ftz filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.gz filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.h5 filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.joblib filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.lfs.* filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.mlmodel filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.model filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.msgpack filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.npy filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.npz filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.onnx filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.ot filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.parquet filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.pb filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.pickle filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.pkl filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.pt filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.pth filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.rar filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.safetensors filter=lfs diff=lfs merge=lfs -text | ||||||
|  | saved_model/**/* filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.tar.* filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.tar filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.tflite filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.tgz filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.wasm filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.xz filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.zip filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *.zst filter=lfs diff=lfs merge=lfs -text | ||||||
|  | *tfevents* filter=lfs diff=lfs merge=lfs -text | ||||||
							
								
								
									
										6
									
								
								README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								README.md
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,6 @@ | |||||||
|  | --- | ||||||
|  | license: apache-2.0 | ||||||
|  | sdk: gradio | ||||||
|  | app_file: app.py | ||||||
|  | 
 | ||||||
|  | --- | ||||||
							
								
								
									
										144
									
								
								app.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										144
									
								
								app.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,144 @@ | |||||||
|  | import os | ||||||
|  | import json | ||||||
|  | import gradio as gr | ||||||
|  | import requests | ||||||
|  | from dotenv import load_dotenv | ||||||
|  | 
 | ||||||
|  | # 加载环境变量 | ||||||
|  | load_dotenv() | ||||||
|  | API_KEY = os.getenv("API_KEY") | ||||||
|  | API_URL = "https://api.deepseek.com/v1/chat/completions" | ||||||
|  | 
 | ||||||
|  | def build_messages(query: str, history: list): | ||||||
|  |     """构造对话历史""" | ||||||
|  |     messages = [{"role": "system", "content": "你是一个有帮助的助手"}] | ||||||
|  |     for user_msg, bot_msg in history: | ||||||
|  |         messages.extend([ | ||||||
|  |             {"role": "user", "content": user_msg}, | ||||||
|  |             {"role": "assistant", "content": bot_msg} | ||||||
|  |         ]) | ||||||
|  |     messages.append({"role": "user", "content": query}) | ||||||
|  |     return messages | ||||||
|  | 
 | ||||||
|  | def stream_response(query: str, history: list): | ||||||
|  |     """流式响应生成器""" | ||||||
|  |     headers = { | ||||||
|  |         "Authorization": f"Bearer {API_KEY}", | ||||||
|  |         "Content-Type": "application/json" | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     try: | ||||||
|  |         response = requests.post( | ||||||
|  |             API_URL, | ||||||
|  |             headers=headers, | ||||||
|  |             json={ | ||||||
|  |                 "model": "deepseek-chat", | ||||||
|  |                 "messages": build_messages(query, history), | ||||||
|  |                 "temperature": 0.7, | ||||||
|  |                 "stream": True | ||||||
|  |             }, | ||||||
|  |             stream=True, | ||||||
|  |             timeout=60 | ||||||
|  |         ) | ||||||
|  |          | ||||||
|  |         # 先检查响应状态再处理内容 | ||||||
|  |         if response.status_code != 200: | ||||||
|  |             error_msg = f"API 返回错误状态码: {response.status_code} - {response.text}" | ||||||
|  |             print(error_msg) | ||||||
|  |             yield error_msg | ||||||
|  |             return | ||||||
|  | 
 | ||||||
|  |         partial_message = "" | ||||||
|  |         for chunk in response.iter_lines(): | ||||||
|  |             # 过滤空行和 keep-alive 消息 | ||||||
|  |             if not chunk or b'[DONE]' in chunk: | ||||||
|  |                 continue | ||||||
|  | 
 | ||||||
|  |             try: | ||||||
|  |                 # 调试输出原始数据 | ||||||
|  |                 print("原始 chunk:", chunk) | ||||||
|  |                  | ||||||
|  |                 decoded = chunk.decode('utf-8').strip() | ||||||
|  |                 # 处理可能的多个 data: 前缀 | ||||||
|  |                 if decoded.startswith('data:'): | ||||||
|  |                     json_str = decoded[5:].strip() | ||||||
|  |                 else: | ||||||
|  |                     json_str = decoded | ||||||
|  |                  | ||||||
|  |                 # 验证 JSON 有效性 | ||||||
|  |                 if not json_str.startswith('{'): | ||||||
|  |                     continue | ||||||
|  |                  | ||||||
|  |                 data = json.loads(json_str) | ||||||
|  |                  | ||||||
|  |                 if content := data['choices'][0]['delta'].get('content'): | ||||||
|  |                     partial_message += content | ||||||
|  |                     yield partial_message | ||||||
|  | 
 | ||||||
|  |             except json.JSONDecodeError as e: | ||||||
|  |                 print(f"JSON 解析失败: {e} | 原始数据: {decoded}") | ||||||
|  |                 continue | ||||||
|  |             except Exception as e: | ||||||
|  |                 print(f"处理 chunk 时发生错误: {str(e)}") | ||||||
|  |                 continue | ||||||
|  | 
 | ||||||
|  |     except Exception as e: | ||||||
|  |         yield f"⚠️ 请求失败:{str(e)}" | ||||||
|  | 
 | ||||||
|  | def stream_response_back(query: str, history: list): | ||||||
|  |     """流式响应生成器""" | ||||||
|  |     headers = { | ||||||
|  |         "Authorization": f"Bearer {API_KEY}", | ||||||
|  |         "Content-Type": "application/json" | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     print(query) | ||||||
|  |     print(history) | ||||||
|  | 
 | ||||||
|  |     try: | ||||||
|  |         response = requests.post( | ||||||
|  |             API_URL, | ||||||
|  |             headers=headers, | ||||||
|  |             json={ | ||||||
|  |                 "model": "deepseek-chat", | ||||||
|  |                 "messages": build_messages(query, history), | ||||||
|  |                 "temperature": 0.7, | ||||||
|  |                 "stream": True | ||||||
|  |             }, | ||||||
|  |             stream=True, | ||||||
|  |             timeout=60 | ||||||
|  |         ) | ||||||
|  |         response.raise_for_status() | ||||||
|  |          | ||||||
|  |         print(response) | ||||||
|  | 
 | ||||||
|  |         partial_message = "" | ||||||
|  |         for chunk in response.iter_lines(): | ||||||
|  |             if chunk: | ||||||
|  |                 # 处理流式数据格式 | ||||||
|  |                 decoded = chunk.decode('utf-8').strip() | ||||||
|  |                 if decoded.startswith('data:'): | ||||||
|  |                     data = json.loads(decoded[5:]) | ||||||
|  |                     if content := data['choices'][0]['delta'].get('content'): | ||||||
|  |                         partial_message += content | ||||||
|  |                         yield partial_message | ||||||
|  | 
 | ||||||
|  |     except Exception as e: | ||||||
|  |         yield f"⚠️ 请求失败:{str(e)}" | ||||||
|  | 
 | ||||||
|  | # 创建带打字机效果的聊天界面 | ||||||
|  | demo = gr.ChatInterface( | ||||||
|  |     fn=stream_response, | ||||||
|  |     title="DeepSeek 智能助手", | ||||||
|  |     description="输入消息开始对话(支持流式打字效果)", | ||||||
|  |     theme="soft", | ||||||
|  |     examples=["你好!", "如何学习AI?", "写一首关于春天的诗"], | ||||||
|  |     cache_examples=False, | ||||||
|  |     # retry_btn=None, | ||||||
|  |     # undo_btn=None, | ||||||
|  |     # clear_btn="清空历史", | ||||||
|  |     stop_btn="停止生成", | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | if __name__ == "__main__": | ||||||
|  |     demo.launch(server_name="0.0.0.0", server_port=7860) | ||||||
							
								
								
									
										1
									
								
								requirements.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								requirements.txt
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1 @@ | |||||||
|  | uvicorn>=0.14.0 | ||||||
		Reference in New Issue
	
	Block a user