diff --git a/venice/chat.py b/venice/chat.py index d55a4b3..6dfafdc 100644 --- a/venice/chat.py +++ b/venice/chat.py @@ -83,26 +83,16 @@ class Tools: default="", description="Your Venice.ai API key (overrides admin default)" ) - def __init__(self, valves: Optional[Dict] = None, __user__: dict = None, __request__: dict = None): + def __init__(self): """Initialize with optional valve configuration from framework""" # Handle valves configuration from framework - if valves: - self.valves = self.Valves(**valves) - else: - self.valves = self.Valves() + self.valves = self.Valves() # Enable tool usage visibility for debugging self.citation = True # Handle user valves configuration - if __user__ and "valves" in __user__: - user_valves_dict = __user__.get("valves", {}) - if isinstance(user_valves_dict, dict): - self.user_valves = self.UserValves(**user_valves_dict) - else: - self.user_valves = self.UserValves() - else: - self.user_valves = self.UserValves() + self.user_valves = self.UserValves() # Simple in-memory cache self._cache: dict = {} @@ -117,7 +107,7 @@ class Tools: return user_valves["VENICE_API_KEY"] elif hasattr(user_valves, "VENICE_API_KEY"): return user_valves.VENICE_API_KEY - + # Fall back to UserValves instance return self.user_valves.VENICE_API_KEY or self.valves.VENICE_API_KEY @@ -137,10 +127,10 @@ class Tools: return False return (time.time() - self._cache_times[key]) < self.valves.MODEL_CACHE_TTL - def _format_error(self, e: Exception, context: str = "") -> str: + def _format_error(self, e, context: str = "") -> str: """Format HTTP error with detailed context for LLM understanding.""" try: - if hasattr(e, 'response') and e.response is not None: + if hasattr(e, "response") and e.response is not None: error_msg = e.response.text[:200] try: error_json = e.response.json() @@ -180,7 +170,9 @@ class Tools: pass return {} - async def _get_available_models(self, model_type: str = "text", __user__: dict = None) -> list[dict]: + async def _get_available_models( + self, model_type: str = "text", __user__: dict = None + ) -> list[dict]: """Fetch available models (cached).""" cache_key = f"models_{model_type}" if self._is_cache_valid(cache_key): @@ -635,7 +627,11 @@ class Tools: # Resolve and validate model (require reasoning capability) resolved_model, error = await self._resolve_model( - model, "text", require_reasoning=True, __model__=__model__, __user__=__user__ + model, + "text", + require_reasoning=True, + __model__=__model__, + __user__=__user__, ) if error: return f"Error: {error}"