Update venice/chat.py

This commit is contained in:
2026-01-15 11:51:01 +00:00
parent 455fc3556f
commit 994f43f735

View File

@@ -1,7 +1,7 @@
"""
title: Venice.ai Chat
author: Jeff Smith
version: 1.4.0
version: 1.3.1
license: MIT
required_open_webui_version: 0.6.0
requirements: httpx, pydantic
@@ -18,11 +18,10 @@ description: |
Use venice_info/list_models("text") to discover available models.
changelog:
1.4.0:
1.3.1:
- Added VeniceChat namespace class for helper functions to avoid method collisions
- Moved _get_api_key, _truncate, _format_error to VeniceChat namespace
- Prevents Open WebUI framework introspection method name collisions
1.3.0:
- Fixed UserValves access pattern for per-user API keys
- Added __request__ parameter handling for zero-config API calls
- Enhanced __init__ for framework-driven configuration injection
@@ -41,7 +40,7 @@ import time
class VeniceChat:
"""
Namespaced helpers for Venice chat operations.
Using a separate class prevents Open WebUI framework introspection
from colliding with tool methods that have generic names like _get_api_key.
"""
@@ -52,9 +51,11 @@ class VeniceChat:
# Check __user__ parameter first (for direct method calls)
if __user__ and "valves" in __user__:
user_valves_dict = __user__.get("valves")
if isinstance(user_valves_dict, dict) and user_valves_dict.get("VENICE_API_KEY"):
if isinstance(user_valves_dict, dict) and user_valves_dict.get(
"VENICE_API_KEY"
):
return user_valves_dict["VENICE_API_KEY"]
# Fall back to UserValves instance
return user_valves.VENICE_API_KEY or valves.VENICE_API_KEY
@@ -598,7 +599,9 @@ class Tools:
return VeniceChat.truncate("\n".join(lines), self.valves.MAX_RESPONSE_SIZE)
except httpx.HTTPStatusError as e:
error_msg = VeniceChat.format_error(e, f"conversation with {resolved_model}")
error_msg = VeniceChat.format_error(
e, f"conversation with {resolved_model}"
)
if __event_emitter__:
await __event_emitter__({"type": "status", "data": {"done": True}})
return f"Error: {error_msg}"
@@ -775,8 +778,13 @@ class Tools:
payload = {
"model": resolved_model,
"messages": [{"role": "user",}],
"temperature": 0.3, "content": query # Lower for factual responses
"messages": [
{
"role": "user",
}
],
"temperature": 0.3,
"content": query, # Lower for factual responses
"max_tokens": 2048,
"stream": False,
"venice_parameters": {