5 Commits

Author SHA1 Message Date
f9d5762ad9 slowly working on the revised version of the chat, my rethink the whole idea 2026-01-18 19:30:15 -05:00
889680a7b7 feat(gitea): implement gitea_coder role with scope enforcement (#20)
## Summary

Implements the gitea_coder role as defined in issue #11, providing a complete workflow automation layer for Git operations with scope enforcement.

## Features

### Branch Management with Scope Gating
-  Enforces branch naming conventions (feature/, fix/, refactor/, docs/, test/, chore/)
-  Prevents direct pushes to protected branches (main, master, develop, dev)
-  Auto-appends issue numbers to branch names

### Unified Commit Workflow
-  Automatic create vs replace detection
-  Conventional commits format with issue references
-  Detailed commit message generation

### PR Creation
-  Validates source branch is not protected
-  Auto-references issues in PR description
-  Uses existing gitea/dev.py operations

### Ticket Integration
-  Reads and parses issue requirements
-  Extracts testing criteria and technical notes
-  Suggests branch names from issue content

## Files Added
- `gitea/coder.py` - Complete gitea_coder role implementation

## Files Modified
- `README.md` - Added gitea_coder documentation

## Testing Criteria
 Can create feature branch from ticket
 Can modify files according to ticket requirements
 Can generate commit messages with issue references
 Can create PR for review

Refs: #11
Reviewed-on: #20
2026-01-18 22:24:53 +00:00
3d8a8190f9 return type fixes (#9)
Reviewed-on: #9
Co-authored-by: Jeffrey Smith <jasafpro@gmail.com>
Co-committed-by: Jeffrey Smith <jasafpro@gmail.com>
2026-01-17 12:31:59 +00:00
dc9f128eb9 Update gitea/dev.py (#8)
Reviewed-on: #8
2026-01-15 20:43:12 +00:00
7f35b8fac4 fix(dev.py): fix CRUD operation bugs
- Fixed redundant __user__ checks in _get_token, _get_repo, _get_branch, _get_org
- Fixed merge_pull_request: proper conflict detection (409), merged status check, and empty response handling
- Fixed update_file: proper 404 handling before raise_for_status
- Fixed delete_file: proper 404 handling before raise_for_status
- Updated version to 1.4.1 with changelog

Refs: bug hunt fix
2026-01-15 17:43:06 +00:00
5 changed files with 3896 additions and 57 deletions

View File

@@ -8,6 +8,7 @@ This monorepo contains a collection of automation tools for Open WebUI, designed
Python scripts for Git operations and repository management:
- **`admin.py`**: Administrative utilities for managing Gitea repositories, potentially including user management, permissions, and batch operations.
- **`dev.py`**: Development-focused tools for Git workflows, branch handling, and repository interactions tailored for software development processes.
- **`coder.py`**: Development workflow role that reads tickets, creates branches with scope enforcement, generates commit messages with issue references, and creates pull requests.
### venice/
Tools for interacting with Venice AI services:

2889
gitea/coder.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,18 @@
"""
title: Gitea Dev - Native Mode Optimized
author: Jeff Smith + Claude + minimax + kimi-k2
version: 1.4.0
version: 1.4.1
license: MIT
description: Interact with Gitea repositories - native tool calling optimized for high-tier LLMs with robust error handling
requirements: pydantic, httpx
changelog:
1.4.2:
- Fixed renamed update_file to replace file, as update implied diff
1.4.1:
- Fixed redundant __user__ checks in _get_token, _get_repo, _get_branch, _get_org
- Fixed merge_pull_request: proper conflict detection, merged status check, and empty response handling
- Fixed delete_file: proper 404 handling before raise_for_status
- Fixed update_file: proper 404 handling before raise_for_status
1.4.0:
- Added CRUD operations for Issues (get, update, close, reopen, delete, comments)
- Added CRUD operations for Pull Requests (get, update, merge, comments)
@@ -102,8 +109,9 @@ class Tools:
def _get_token(self, __user__: dict = None) -> str:
"""Extract Gitea token from user context with robust handling"""
if __user__ and "valves" in __user__:
user_valves = __user__.get("valves") if __user__ else None
return user_valves.GITEA_TOKEN
user_valves = __user__.get("valves")
if user_valves:
return user_valves.GITEA_TOKEN
return ""
def _headers(self, __user__: dict = None) -> dict:
@@ -132,9 +140,10 @@ class Tools:
if repo:
return repo
if __user__ and "valves" in __user__:
user_valves = __user__.get("valves") if __user__ else None
if self.valves.ALLOW_USER_OVERRIDES and user_valves.USER_DEFAULT_REPO:
return user_valves.USER_DEFAULT_REPO
user_valves = __user__.get("valves")
if user_valves:
if self.valves.ALLOW_USER_OVERRIDES and user_valves.USER_DEFAULT_REPO:
return user_valves.USER_DEFAULT_REPO
return self.valves.DEFAULT_REPO
def _get_branch(self, branch: Optional[str], __user__: dict = None) -> str:
@@ -142,9 +151,10 @@ class Tools:
if branch:
return branch
if __user__ and "valves" in __user__:
user_valves = __user__.get("valves") if __user__ else None
if self.valves.ALLOW_USER_OVERRIDES and user_valves.USER_DEFAULT_BRANCH:
return user_valves.USER_DEFAULT_BRANCH
user_valves = __user__.get("valves")
if user_valves:
if self.valves.ALLOW_USER_OVERRIDES and user_valves.USER_DEFAULT_BRANCH:
return user_valves.USER_DEFAULT_BRANCH
return self.valves.DEFAULT_BRANCH
def _get_org(self, org: Optional[str], __user__: dict = None) -> str:
@@ -152,9 +162,10 @@ class Tools:
if org:
return org
if __user__ and "valves" in __user__:
user_valves = __user__.get("valves") if __user__ else None
if self.valves.ALLOW_USER_OVERRIDES and user_valves.USER_DEFAULT_ORG:
return user_valves.USER_DEFAULT_ORG
user_valves = __user__.get("valves")
if user_valves:
if self.valves.ALLOW_USER_OVERRIDES and user_valves.USER_DEFAULT_ORG:
return user_valves.USER_DEFAULT_ORG
return self.valves.DEFAULT_ORG
def _resolve_repo(
@@ -546,7 +557,7 @@ class Tools:
f"Error: Unexpected failure during file fetch: {type(e).__name__}: {e}"
)
async def update_file(
async def replace_file(
self,
path: str,
content: str,
@@ -729,7 +740,7 @@ class Tools:
except httpx.HTTPStatusError as e:
error_msg = self._format_error(e, f"file creation for '{path}'")
if e.response.status_code == 422:
return f"Error: File already exists: `{path}`. Use `update_file()` to modify it instead."
return f"Error: File already exists: `{path}`. Use `replace_file()` to modify it instead."
return f"Error: Failed to create file. {error_msg}"
except Exception as e:
return f"Error: Unexpected failure during file creation: {type(e).__name__}: {e}"
@@ -2419,14 +2430,33 @@ class Tools:
json={"merge_strategy": merge_strategy},
)
# Check for conflict before raise_for_status
if response.status_code == 409:
try:
error_data = response.json()
error_msg = error_data.get("message", "Merge conflicts detected")
except Exception:
error_msg = "Merge conflicts detected"
return f"Error: PR #{pr_number} cannot be merged due to conflicts.\n\nDetails: {error_msg}"
if response.status_code == 405:
return "Error: PR cannot be merged. Check if it's already merged or has conflicts."
try:
error_data = response.json()
error_msg = error_data.get("message", "PR cannot be merged")
except Exception:
error_msg = "PR cannot be merged"
return f"Error: PR #{pr_number} cannot be merged. {error_msg}"
response.raise_for_status()
result = response.json() if response.text else {}
merged = result.get("merged", True)
commit_sha = result.get("merge_commit", {}).get("sha", "")[:8]
# Handle successful merge response (may be empty or have merge details)
if not response.text:
merged = True
commit_sha = ""
else:
result = response.json() if response.text else {}
merged = result.get("merged", True) # Default to True if key missing
commit_sha = result.get("merge_commit", {}).get("sha", "")[:8] if result else ""
if __event_emitter__:
await __event_emitter__(
@@ -2446,10 +2476,12 @@ class Tools:
)
return output
else:
return f"**PR #{pr_number} Merge Result:**\n\n{result}\n"
return f"**PR #{pr_number} Merge Result:**\n\nMerge operation returned success=false"
except httpx.HTTPStatusError as e:
error_msg = self._format_error(e, f"PR #{pr_number} merge")
if e.response.status_code == 409:
return f"Error: PR #{pr_number} cannot be merged due to conflicts."
if e.response.status_code == 405:
return f"Error: PR #{pr_number} cannot be merged. It may already be merged or have merge conflicts."
return f"Error: Failed to merge PR. {error_msg}"

842
owui/chat.py Normal file
View File

@@ -0,0 +1,842 @@
"""
title: Multi-Model Chat Orchestrator
author: Jeff Smith + Kimi K2
version: 1.0.0
license: MIT
required_open_webui_version: 0.7.0
requirements: httpx, pydantic
description: |
Genuine model-to-model conversations with full persistence. Enables an orchestrator model to
coordinate real chats between different models. Features relationship-based model discovery,
programmatic tagging, and robust multi-turn conversation handling.
🚀 QUICK START:
1. list_models() - See available models (no limits)
2. get_models_by_tag("cap:artist") - Find models by capability tag
3. discover_model_relationships("rel:works-with") - Visualize model networks
4. chat_with_model("new", "luna", "Hello!") - Start new orchestrated chat
5. chat_with_model("{chat_id}", None, "Continue...") - Multi-turn conversation
🔑 ORCHESTRATOR PATTERN:
- YOU are the orchestrator - you coordinate, you don't write content
- Target models receive messages with injection: "(You may respond in chat {chat_id})"
- All completions are automatically persisted with proper parent-child linkage
- Use cap:artist, rel:works-with-luna tags for intelligent model selection
"""
import json
import uuid
import time
import asyncio
from typing import Callable, Any, Optional, Dict, List
from pydantic import BaseModel, Field
import httpx
class OWUIChat:
"""
"""
def __init(self, tools_instance):
self.tools = tools_instance
@property
def valves(self):
"""Access valves from parent Tools instance"""
return self.tools.valves
def get_api_config(self, __request__=None) -> tuple[str, dict]:
"""Extract base URL and auth from request context [1]."""
base_url = ""
headers = {"Content-Type": "application/json"}
if __request__:
base_url = str(getattr(__request__, "base_url", "") or "")
req_headers = getattr(__request__, "headers", {})
if req_headers:
auth = dict(req_headers).get("authorization", "")
if auth:
headers["Authorization"] = auth
return base_url.rstrip("/"), headers
async def api_request(
self,
method: str,
endpoint: str,
__request__=None,
json_data: Dict = None,
params: Dict = None,
) -> tuple[int, Any]:
"""Make authenticated API request with proper error handling."""
base_url, headers = self.get_api_config(__request__)
if not base_url:
return 0, {"error": "Could not determine API URL"}
url = f"{base_url}{endpoint}"
try:
async with httpx.AsyncClient(
headers=headers, timeout=float(self.valves().TIMEOUT)
) as client:
response = await client.request(
method, url, json=json_data, params=params
)
content_type = response.headers.get("content-type", "")
if "application/json" in content_type:
return response.status_code, response.json()
return response.status_code, response.text
except Exception as e:
return 0, {"error": f"Request failed: {str(e)}"}
class Tools:
"""Multi-Model Chat Orchestrator - Real model-to-model communication with persistence."""
class Valves(BaseModel):
"""Configuration for the orchestrator."""
DEFAULT_MODEL_TAGS: str = Field(
default="assistant,helper,agent",
description="Comma-separated tags for automatic model discovery",
)
TIMEOUT: int = Field(default=180, description="API request timeout in seconds")
DEBUG_MODE: bool = Field(
default=False, description="Enable verbose error output"
)
MAX_RESULTS: int = Field(
default=50, description="Maximum models to display in list operations"
)
TAG_PREFIX_CAPABILITY: str = Field(
default="cap:", description="Prefix for capability tags"
)
TAG_PREFIX_RELATIONSHIP: str = Field(
default="rel:", description="Prefix for relationship tags"
)
class UserValves(BaseModel):
pass
def __init__(self):
self.valves = self.Valves()
self.owui_chat = OWUIChat(self)
async def find_chat_by_title(self, title: str, __request__=None) -> dict:
"""Find chat ID by title (partial match). Returns exact ID if one match found."""
status, data = await self.owui_chat.api_request("GET", "/api/v1/chats/", __request__)
retVal = {"status": "failure", "message": ""}
if status != 200:
retVal["message"] = f"❌ find_chat_by_title FAILED\nHTTP {status}: {data}"
return retVal
chat_list = (
data
if isinstance(data, list)
else data.get("items", []) if isinstance(data, dict) else []
)
matches = []
for chat in chat_list:
if not isinstance(chat, dict):
continue
chat_title = chat.get("title", "")
if title.lower() in chat_title.lower():
matches.append(
{
"id": chat.get("id"),
"title": chat_title,
"models": (
chat.get("chat", {}).get("models", [])
if isinstance(chat.get("chat"), dict)
else []
),
}
)
if not matches:
retVal["messages"] = f"❌ No chats found matching '{title}'"
return retVal
if len(matches) == 1:
m = matches[0]
retVal["status"] = "success"
retVal["message"] = f"✅ Single match found:\nChat ID: {m['id']}\nTitle: {m['title']}"
return retVal
result = f"⚠️ Multiple matches ({len(matches)}):\n\n"
for i, m in enumerate(matches[:5], 1):
result += f"{i}. {m['title']}\n ID: {m['id']}\n"
retVal["status"] = "success"
retVal["message"] = result
return retVal
async def owui_list_models(self, __request__=None) -> dict:
"""List all available models with IDs, tags, and capabilities."""
status, data = await self.owui_chat.api_request("GET", "/api/v1/models/", __request__)
retVal = {"status": "failure", "message": "", "models": []}
if status != 200:
retVal["message"] = f"❌ list_models FAILED\nHTTP {status}: {data}"
return retVal
models = (
data
if isinstance(data, list)
else data.get("items", []) if isinstance(data, dict) else []
)
if not models:
retVal = "❌ No models found"
return retVal
retVal["message"] = f"📋 Available Models ({len(models)} total, showing {self.valves.MAX_RESULTS}):\n\n"
for model in models[: self.valves.MAX_RESULTS]:
if not isinstance(model, dict):
continue
if not "name" in model or model.get("name") == "":
continue
retVal["models"].append(model)
# name = model.get("name", model.get("id", "Unknown"))
# model_id = model.get("id")
# meta = model.get("meta", {})
# tags = meta.get("tags", [])
# tag_str = (
# ", ".join(
# t.get("name") if isinstance(t, dict) else t for t in tags if t
# )
# if tags
# else "none"
# )
#
# capabilities = meta.get("capabilities", {})
# cap_str = (
# ", ".join(f"{k}={v}" for k, v in capabilities.items())
# if capabilities
# else "none"
# )
#
# result += f"• {name}\n ID: {model_id}\n Tags: {tag_str}\n Capabilities: {cap_str}\n\n"
if len(models) > self.valves.MAX_RESULTS:
retVal["message"] += f"... and {len(models) - self.valves.MAX_RESULTS} more\n"
return retVal
async def get_models_by_tag(self, tag: str, __request__=None) -> str:
"""Get models by tag name with full tag profiles."""
status, data = await self._api_request(
"GET", f"/api/v1/models/list?tag={tag}&page=1", __request__
)
if status != 200:
return f"❌ get_models_by_tag FAILED\nHTTP {status}: {data}"
items = (
data.get("items", [])
if isinstance(data, dict)
else data if isinstance(data, list) else []
)
total = data.get("total", len(items)) if isinstance(data, dict) else len(items)
if not items:
return f"❌ No models found with tag '{tag}'"
result = f"✅ Models with tag '{tag}' ({total} total):\n\n"
for model in items[: self.valves.MAX_RESULTS]:
if not isinstance(model, dict):
continue
name = model.get("name", model.get("id"))
model_id = model.get("id")
meta = model.get("meta", {})
all_tags = meta.get("tags", [])
tag_list = (
", ".join(
t.get("name", t)
for t in all_tags
if isinstance(t, dict) or isinstance(t, str)
)
if all_tags
else "none"
)
result += f"{name} (`{model_id}`)\n All tags: {tag_list}\n\n"
if len(items) > self.valves.MAX_RESULTS:
result += f"... and {len(items) - self.valves.MAX_RESULTS} more\n"
return result
async def get_model_by_name(self, name: str, __request__=None) -> str:
"""Find model by partial name match."""
status, data = await self._api_request("GET", "/api/v1/models/", __request__)
if status != 200:
return f"❌ get_model_by_name FAILED\nHTTP {status}: {data}"
models = (
data
if isinstance(data, list)
else data.get("items", []) if isinstance(data, dict) else []
)
if not models:
return "❌ get_model_by_name FAILED\nNo models returned"
matches = []
for model in models:
if not isinstance(model, dict):
continue
model_name = model.get("name", model.get("id", ""))
if name.lower() in model_name.lower():
matches.append(model)
if not matches:
return f"❌ No models matching '{name}'"
if len(matches) == 1:
m = matches[0]
name = m.get("name", m.get("id"))
model_id = m.get("id")
meta = m.get("meta", {})
tags = meta.get("tags", [])
tag_str = (
", ".join(
t.get("name", t)
for t in tags
if isinstance(t, dict) or isinstance(t, str)
)
if tags
else "none"
)
return f"✅ Found: {name}\nID: {model_id}\nTags: {tag_str}"
result = f"⚠️ Multiple matches ({len(matches)}):\n\n"
for i, m in enumerate(matches[:5], 1):
result += f"{i}. {m.get('name')}\n ID: {m.get('id')}\n\n"
return result
async def discover_model_relationships(
self,
relationship_prefix: str = "rel:works-with",
__request__=None,
__event_emitter__: Callable[[dict], Any] = None,
) -> str:
"""Discover models with relationship tags and dump their full tag profiles."""
if __event_emitter__:
await __event_emitter__(
{
"type": "status",
"data": {
"description": "Scanning for relationships...",
"done": False,
},
}
)
status, data = await self._api_request("GET", "/api/v1/models/", __request__)
if status != 200:
return f"❌ Failed: HTTP {status}"
models = (
data
if isinstance(data, list)
else data.get("items", []) if isinstance(data, dict) else []
)
relationship_map = {}
for model in models:
if not isinstance(model, dict):
continue
model_id = model.get("id")
model_name = model.get("name", model_id)
meta = model.get("meta", {})
tags = meta.get("tags", [])
# Extract all relationship tags
for tag in tags:
tag_name = tag.get("name") if isinstance(tag, dict) else tag
if tag_name and tag_name.lower().startswith(
relationship_prefix.lower()
):
target = tag_name[len(relationship_prefix) :].strip()
relationship_map.setdefault(target, []).append(
{
"model_id": model_id,
"model_name": model_name,
"all_tags": [t.get("name", t) for t in tags if t],
"capabilities": meta.get("capabilities", {}),
}
)
if __event_emitter__:
await __event_emitter__(
{
"type": "status",
"data": {
"description": f"Found {len(relationship_map)} relationship(s)",
"done": True,
},
}
)
if not relationship_map:
return f"❌ No '{relationship_prefix}' relationships found"
result = f"🔍 Relationship Dump: '{relationship_prefix}'\n{'='*60}\n\n"
for target, sources in sorted(relationship_map.items()):
result += f"📌 Target: **{target}** ({len(sources)} source(s))\n"
for src in sources:
result += f"{src['model_name']} (`{src['model_id']}`)\n"
result += f" Tags: {', '.join(src['all_tags']) or 'none'}\n"
result += f" Caps: {', '.join(f'{k}:{v}' for k,v in src['capabilities'].items()) or 'none'}\n"
result += "\n"
return result
async def add_capability_tag(
self,
model_id: str,
capability: str,
__request__=None,
__event_emitter__: Callable[[dict], Any] = None,
) -> dict:
"""Add a capability tag (cap:) to a model."""
if __event_emitter__:
await __event_emitter__(
{
"type": "status",
"data": {
"description": f"Adding capability tag to {model_id}...",
"done": False,
},
}
)
# Get current model data
status, model_data = await self._api_request(
"GET", f"/api/v1/models/model?id={model_id}", __request__
)
if status != 200:
return f"❌ Could not get model: {model_data}"
meta = model_data.get("meta", {})
tags = meta.get("tags", [])
# Add new capability tag
new_tag = f"{self.valves.TAG_PREFIX_CAPABILITY}{capability}"
if new_tag not in [t.get("name", t) for t in tags]:
tags.append({"name": new_tag})
# Update model
update_payload = {"id": model_id, "meta": {**meta, "tags": tags}}
status, response = await self._api_request(
"POST", "/api/v1/models/model/update", __request__, json_data=update_payload
)
if __event_emitter__:
await __event_emitter__(
{
"type": "status",
"data": {"description": "Tag added successfully", "done": True},
}
)
if status == 200:
return f"✅ Added capability tag '{new_tag}' to {model_id}"
return f"❌ Failed to update model: {response}"
async def add_relationship_tag(
self,
model_id: str,
relationship: str,
target_model: str,
__request__=None,
__event_emitter__: Callable[[dict], Any] = None,
) -> str:
"""Add a relationship tag (rel:) to a model."""
if __event_emitter__:
await __event_emitter__(
{
"type": "status",
"data": {
"description": f"Adding relationship tag to {model_id}...",
"done": False,
},
}
)
# Get current model data
status, model_data = await self._api_request(
"GET", f"/api/v1/models/model?id={model_id}", __request__
)
if status != 200:
return f"❌ Could not get model: {model_data}"
meta = model_data.get("meta", {})
tags = meta.get("tags", [])
# Add new relationship tag
new_tag = f"{self.valves.TAG_PREFIX_RELATIONSHIP}{relationship}:{target_model}"
if new_tag not in [t.get("name", t) for t in tags]:
tags.append({"name": new_tag})
# Update model
update_payload = {"id": model_id, "meta": {**meta, "tags": tags}}
status, response = await self._api_request(
"POST", "/api/v1/models/model/update", __request__, json_data=update_payload
)
if __event_emitter__:
await __event_emitter__(
{
"type": "status",
"data": {"description": "Relationship tag added", "done": True},
}
)
if status == 200:
return f"✅ Added relationship tag '{new_tag}' to {model_id}"
return f"❌ Failed to update model: {response}"
async def dump_tag_taxonomy(self, __request__=None) -> str:
"""Dump complete tag taxonomy showing cap: and rel: distributions."""
status, data = await self._api_request("GET", "/api/v1/models/", __request__)
if status != 200:
return f"❌ Failed: HTTP {status}"
models = (
data
if isinstance(data, list)
else data.get("items", []) if isinstance(data, dict) else []
)
capability_tags = {}
relationship_tags = {}
all_capabilities = {}
for model in models:
if not isinstance(model, dict):
continue
model_id = model.get("id")
model_name = model.get("name", model_id)
meta = model.get("meta", {})
tags = meta.get("tags", [])
capabilities = meta.get("capabilities", {})
# Aggregate capabilities
for cap, val in capabilities.items():
all_capabilities.setdefault(cap, []).append(f"{model_name}={val}")
# Categorize tags
for tag in tags:
tag_name = tag.get("name") if isinstance(tag, dict) else tag
if not tag_name:
continue
if tag_name.startswith(self.valves.TAG_PREFIX_CAPABILITY):
capability_tags.setdefault(tag_name, []).append(model_name)
elif tag_name.startswith(self.valves.TAG_PREFIX_RELATIONSHIP):
relationship_tags.setdefault(tag_name, []).append(model_name)
result = "📊 Tag Taxonomy Dump\n" + "=" * 60 + "\n\n"
if capability_tags:
result += "🎯 CAPABILITY TAGS:\n"
for cap, models in sorted(capability_tags.items()):
result += f" {cap}: {len(models)} model(s)\n"
for m in models[:3]:
result += f"{m}\n"
if len(models) > 3:
result += f" ... and {len(models)-3} more\n"
result += "\n"
if relationship_tags:
result += "🔗 RELATIONSHIP TAGS:\n"
for rel, models in sorted(relationship_tags.items()):
result += f" {rel}: {len(models)} model(s)\n"
for m in models[:3]:
result += f"{m}\n"
if len(models) > 3:
result += f" ... and {len(models)-3} more\n"
result += "\n"
if all_capabilities:
result += "🧠 CAPABILITIES:\n"
for cap, instances in sorted(all_capabilities.items()):
result += f" {cap}: {len(instances)} value(s)\n"
for inst in instances[:3]:
result += f"{inst}\n"
if len(instances) > 3:
result += f" ... and {len(instances)-3} more\n"
return result or "❌ No tags found"
async def chat_with_model(
self,
chat_id: str,
model_identifier: Optional[str],
message: str,
__request__=None,
__event_emitter__: Callable[[dict], Any] = None,
) -> str:
"""🚀 PRIMARY TOOL: Robust multi-turn chat with proper message threading."""
if __event_emitter__:
await __event_emitter__(
{
"type": "status",
"data": {"description": "Resolving model...", "done": False},
}
)
# NEW CHAT: model_identifier is REQUIRED
if chat_id == "new":
if not model_identifier:
return "❌ chat_with_model FAILED: model_identifier required when creating new chat"
# Resolve model_identifier to actual model_id
target_model = None
model_name = model_identifier
# Try tag first (if no slash in identifier)
if "/" not in model_identifier:
status, tag_data = await self._api_request(
"GET",
f"/api/v1/models/list?tag={model_identifier}&page=1",
__request__,
)
if status == 200:
items = (
tag_data.get("items", [])
if isinstance(tag_data, dict)
else tag_data
)
if items and isinstance(items, list) and len(items) > 0:
target_model = items[0].get("id")
model_name = items[0].get("name", target_model)
# Fallback to direct model ID
if not target_model:
target_model = model_identifier
# Get friendly name
status, model_info = await self._api_request(
"GET", f"/api/v1/models/{model_identifier}", __request__
)
if status == 200 and isinstance(model_info, dict):
model_name = model_info.get("name", model_identifier)
else:
model_name = model_identifier.replace("-", " ").title()
if not target_model:
return "❌ chat_with_model FAILED: Could not resolve target model"
# Create new chat
if __event_emitter__:
await __event_emitter__(
{
"type": "status",
"data": {"description": "Creating chat...", "done": False},
}
)
payload = {
"chat": {
"title": f"Orchestrated: {model_name}",
"models": [target_model],
"params": {},
"history": {"messages": {}, "currentId": None},
"messages": [],
"tags": ["orchestrated"],
"files": [],
}
}
status, chat_data = await self._api_request(
"POST", "/api/v1/chats/new", __request__, json_data=payload
)
if status != 200:
return f"❌ chat_with_model FAILED\nCould not create chat: {chat_data}"
chat_id = chat_data.get("id")
if not chat_id:
return "❌ chat_with_model FAILED: No chat ID returned"
# EXISTING CHAT: model_identifier is OPTIONAL
else:
# Verify chat exists
status, chat_data = await self._api_request(
"GET", f"/api/v1/chats/{chat_id}", __request__
)
if status != 200:
return f"❌ chat_with_model FAILED\nCould not get chat: {chat_data}"
chat_obj = chat_data.get("chat", {})
existing_models = chat_obj.get("models", [])
if model_identifier:
# Use provided model
target_model = model_identifier
model_name = model_identifier.replace("-", " ").title()
elif existing_models and len(existing_models) > 0:
# Auto-detect from chat
target_model = existing_models[0]
status, model_info = await self._api_request(
"GET", f"/api/v1/models/{target_model}", __request__
)
if status == 200 and isinstance(model_info, dict):
model_name = model_info.get("name", target_model)
else:
model_name = target_model.replace("-", " ").title()
else:
return "❌ chat_with_model FAILED: No model found and no model_identifier provided"
if __event_emitter__:
await __event_emitter__(
{
"type": "status",
"data": {
"description": f"Chatting with {model_name}...",
"done": False,
},
}
)
# Build conversation history WITH PROPER THREADING
status, chat_data = await self._api_request(
"GET", f"/api/v1/chats/{chat_id}", __request__
)
if status != 200:
return f"❌ Could not retrieve chat history: {chat_data}"
chat_obj = chat_data.get("chat", {})
existing_messages = chat_obj.get("messages", [])
completion_messages = []
for msg in existing_messages:
if msg.get("role") in ["user", "assistant"]:
completion_messages.append(
{"role": msg["role"], "content": msg["content"]}
)
# 🔑 KEY ENHANCEMENT: Enhanced chat_id injection for multi-turn awareness
enhanced_message = f"{message}\n\n[System: This is part of chat {chat_id}. You may respond here.]"
completion_messages.append({"role": "user", "content": enhanced_message})
# Send to target model
status, completion_data = await self._api_request(
"POST",
"/api/v1/chat/completions",
__request__,
json_data={
"model": target_model,
"stream": False,
"messages": completion_messages,
"chat_id": chat_id,
},
)
if status != 200:
return f"❌ chat_with_model FAILED\nCompletion error: {completion_data}"
try:
response_text = completion_data["choices"][0]["message"]["content"]
except (KeyError, IndexError, TypeError) as e:
return f"❌ Parse error: {e}"
# Persist with proper message threading
updated_chat = self._build_message_structure(
existing_messages, enhanced_message, response_text, target_model, model_name
)
updated_chat["title"] = chat_obj.get("title", f"Chat with {model_name}")
status, _ = await self._api_request(
"POST",
f"/api/v1/chats/{chat_id}",
__request__,
json_data={"chat": updated_chat},
)
if __event_emitter__:
await __event_emitter__(
{
"type": "status",
"data": {"description": "✅ Response persisted", "done": True},
}
)
return f"""✅ chat_with_model SUCCESS
═══════════════════════════════════════════════════════════
Model: {model_name} ({target_model})
Chat ID: {chat_id}
═══════════════════════════════════════════════════════════
Your message: {message}
═══════════════════════════════════════════════════════════
Response (persisted): {response_text}
═══════════════════════════════════════════════════════════
✅ This multi-turn response is now in chat history
"""
def _build_message_structure(
self,
existing_messages: List[Dict],
user_content: str,
assistant_content: str,
model: str,
model_name: str = None,
) -> Dict:
"""Build chat structure with proper message linking for multi-turn."""
if not model_name:
model_name = model.replace("-", " ").title()
timestamp = int(time.time())
user_uuid = str(uuid.uuid4())
asst_uuid = str(uuid.uuid4())
# Find last message ID for proper threading
last_msg_id = existing_messages[-1].get("id") if existing_messages else None
# Build user message
user_msg = {
"id": user_uuid,
"parentId": last_msg_id,
"childrenIds": [asst_uuid],
"role": "user",
"content": user_content,
"timestamp": timestamp,
"models": [model],
}
# Build assistant message
asst_msg = {
"id": asst_uuid,
"parentId": user_uuid,
"childrenIds": [],
"role": "assistant",
"content": assistant_content,
"model": model,
"modelName": model_name,
"modelIdx": 0,
"timestamp": timestamp,
"done": True,
}
# Update parent's children if exists
if last_msg_id:
for msg in existing_messages:
if msg.get("id") == last_msg_id:
if "childrenIds" not in msg:
msg["childrenIds"] = []
msg["childrenIds"].append(user_uuid)
break
all_messages = existing_messages + [user_msg, asst_msg]
history_messages = {msg["id"]: msg for msg in all_messages}
return {
"models": [model],
"params": {},
"history": {"messages": history_messages, "currentId": asst_uuid},
"messages": all_messages,
"tags": ["orchestrated"],
"files": [],
}

View File

@@ -15,12 +15,11 @@ description: |
and attach images to chat via event emitter for inline display.
Re-entrant safe: Multiple concurrent calls accumulate images correctly.
v1.7.0: Added VeniceImage namespace class for helper functions to avoid
method collisions with Open WebUI framework introspection.
v1.6.0: Added UserValves for SAFE_MODE and HIDE_WATERMARK with proper
admin/user override logic.
changelog:
1.7.1:
- changed return type from string to dictionary, mirroring the default tools behavior
- fixed issues with user valve overrides - Watermake and Safe Mode
- status message will display either [SFW] or [NSFW] depending on flag not content
1.7.0:
- Added VeniceImage namespace class for helper functions
- Moved get_api_key, parse_venice_image_response to VeniceImage namespace
@@ -96,7 +95,7 @@ class Tools:
class UserValves(BaseModel):
VENICE_API_KEY: str = Field(default="", description="Your Venice.ai API key (overrides admin)")
SAFE_MODE: bool = Field(default=False, description="Enable SFW content filtering")
SAFE_MODE: bool = Field(default=True, description="Enable SFW content filtering")
HIDE_WATERMARK: bool = Field(default=False, description="Hide Venice.ai watermark")
DEFAULT_MODEL: str = Field(default="", description="Your preferred image model")
DEFAULT_NEGATIVE_PROMPT: str = Field(default="", description="Default negative prompt")
@@ -111,11 +110,21 @@ class Tools:
self._lock_init = threading.Lock()
self._last_cleanup: float = 0.0
def _is_safe_mode_enabled(self) -> bool:
return self.valves.SAFE_MODE or self.user_valves.SAFE_MODE
def _is_safe_mode_enabled(self, __user__: dict = None) -> bool:
user_safe_mode = self.user_valves.SAFE_MODE
def _is_watermark_hidden(self) -> bool:
return self.valves.HIDE_WATERMARK or self.user_valves.HIDE_WATERMARK
if __user__ and "valves" in __user__:
user_safe_mode = __user__["valves"].SAFE_MODE
return self.valves.SAFE_MODE or user_safe_mode
def _is_watermark_hidden(self, __user__: dict = None) -> bool:
user_hide_watermark = self.user_valves.HIDE_WATERMARK
if __user__ and "valves" in __user__:
user_hide_watermark = __user__["valves"].HIDE_WATERMARK
return self.valves.HIDE_WATERMARK or user_hide_watermark
def _get_default_model(self) -> str:
return self.user_valves.DEFAULT_MODEL or self.valves.DEFAULT_MODEL
@@ -152,20 +161,29 @@ class Tools:
async def _accumulate_files(self, key: str, new_files: List[dict], __event_emitter__: Callable[[dict], Any] = None):
all_files = []
async with self._get_lock():
if key not in self._message_files:
self._message_files[key] = {"files": [], "timestamp": time.time()}
for f in new_files:
self._message_files[key]["files"].append(dict(f))
self._message_files[key]["timestamp"] = time.time()
all_files = list(self._message_files[key]["files"])
now = time.time()
if now - self._last_cleanup > 60:
self._last_cleanup = now
ttl = self.valves.ACCUMULATOR_TTL
expired = [k for k, v in self._message_files.items() if now - v.get("timestamp", 0) > ttl]
for k in expired:
del self._message_files[k]
if all_files and __event_emitter__:
await __event_emitter__({"type": "files", "data": {"files": all_files}})
@@ -290,38 +308,73 @@ class Tools:
except Exception as e:
return None, f"Fetch error: {type(e).__name__}: {e}"
async def generate_image(self, prompt: str, model: Optional[str] = None, width: int = 1024, height: int = 1024, negative_prompt: Optional[str] = None, style_preset: Optional[str] = None, variants: int = 1, __request__=None, __user__: dict = None, __metadata__: dict = None, __event_emitter__: Callable[[dict], Any] = None) -> str:
async def generate_image(
self,
prompt: str,
model: Optional[str] = None,
width: int = 1024,
height: int = 1024,
negative_prompt: Optional[str] = None,
style_preset: Optional[str] = None,
variants: int = 1,
__request__=None,
__user__: dict = None,
__metadata__: dict = None,
__event_emitter__: Callable[[dict], Any] = None
) -> dict:
retVal = {
"status": "failed",
"message": "",
"settings": {},
"images": [],
}
venice_key = VeniceImage.get_api_key(self.valves, self.user_valves, __user__)
if not venice_key:
return "Generate Image\nStatus: 0\nError: Venice.ai API key not configured."
retVal["message"] = "Error: Venice.ai API key not configured",
return retVal
if not prompt or not prompt.strip():
return "Generate Image\nStatus: 0\nError: Prompt is required"
retVal["message"] = "Error: Prompt is required",
return retVal
msg_key = self._get_message_key(__metadata__)
user_id = __user__.get("id", "default") if __user__ else "default"
cooldown = self.valves.COOLDOWN_SECONDS
if cooldown > 0:
now = time.time()
last_gen = self._cooldowns.get(user_id, 0)
is_reentrant = self._get_accumulated_count(msg_key) > 0
if not is_reentrant and now - last_gen < cooldown:
remaining = cooldown - (now - last_gen)
return f"Generate Image\nStatus: 429\nError: Rate limited. Wait {remaining:.1f}s."
retVal["message"] = "Error: Rate limited. Wait {remaining:.1f}s.",
return retVal
self._cooldowns[user_id] = now
model = model or self._get_default_model()
safe_mode = self._is_safe_mode_enabled()
hide_watermark = self._is_watermark_hidden()
safe_mode = self._is_safe_mode_enabled(__user__)
hide_watermark = self._is_watermark_hidden(__user__)
effective_negative_prompt = negative_prompt or self._get_default_negative_prompt()
variants = max(1, min(4, variants))
width = max(512, min(1280, width))
height = max(512, min(1280, height))
existing_count = self._get_accumulated_count(msg_key)
if __event_emitter__:
status_msg = f"Generating {variants} image{'s' if variants > 1 else ''} with {model}"
if existing_count > 0:
status_msg += f" (adding to {existing_count} existing)"
if safe_mode:
status_msg += " [SFW]"
else:
status_msg += " [NSFW]"
await __event_emitter__({"type": "status", "data": {"description": f"{status_msg}...", "done": False}})
payload = {"model": model, "prompt": prompt, "width": width, "height": height, "safe_mode": safe_mode, "hide_watermark": hide_watermark, "return_binary": False, "variants": variants}
if effective_negative_prompt:
payload["negative_prompt"] = effective_negative_prompt
@@ -329,6 +382,7 @@ class Tools:
payload["style_preset"] = style_preset
retried = False
dropped_params = []
try:
async with httpx.AsyncClient(timeout=float(self.valves.GENERATION_TIMEOUT)) as client:
response = await client.post("https://api.venice.ai/api/v1/image/generate", headers={"Authorization": f"Bearer {venice_key}", "Content-Type": "application/json"}, json=payload)
@@ -349,65 +403,86 @@ class Tools:
except httpx.HTTPStatusError as e:
if __event_emitter__:
await __event_emitter__({"type": "status", "data": {"done": True}})
return f"Generate Image\nStatus: {e.response.status_code}\nError: {e.response.text[:200]}"
retVal["message"] = f"Status: {e.response.status_code} Error: {e.response.text[:200]}",
return retVal["message"]
except httpx.TimeoutException:
if __event_emitter__:
await __event_emitter__({"type": "status", "data": {"done": True}})
return f"Generate Image\nStatus: 408\nError: Timed out after {self.valves.GENERATION_TIMEOUT}s"
retVal["message"] = f"Status: 408\nError: Timed out after {self.valves.GENERATION_TIMEOUT}s",
return retVal
except Exception as e:
if __event_emitter__:
await __event_emitter__({"type": "status", "data": {"done": True}})
return f"Generate Image\nStatus: 0\nError: {type(e).__name__}: {e}"
retVal["message"] = f"Status: 0\nError: {type(e).__name__}: {e}",
return retVal
images = result.get("images", [])
if not images:
if __event_emitter__:
await __event_emitter__({"type": "status", "data": {"done": True}})
return "Generate Image\nStatus: 0\nError: No images returned"
retVal["message"] = f"Status: 0\nError: No images returned",
return retVal
if __event_emitter__:
await __event_emitter__({"type": "status", "data": {"description": f"Uploading {len(images)} images...", "done": False}})
chat_id = __metadata__.get("chat_id") if __metadata__ else None
message_id = __metadata__.get("message_id") if __metadata__ else None
uploaded_files = []
errors = []
for i, image_b64 in enumerate(images):
timestamp = int(time.time() * 1000)
filename = f"venice_{model}_{timestamp}_{i+1}.webp"
file_metadata = {"name": filename, "content_type": "image/webp", "data": {"model": model, "prompt": prompt, "negative_prompt": effective_negative_prompt, "style_preset": style_preset, "width": width, "height": height, "variant": i+1, "total_variants": len(images), "safe_mode": safe_mode, "hide_watermark": hide_watermark}}
if chat_id:
file_metadata["chat_id"] = chat_id
if message_id:
file_metadata["message_id"] = message_id
file_id, error = await self._upload_image(image_b64, filename, file_metadata, "image/webp", __request__)
if file_id:
uploaded_files.append({"type": "image", "url": f"/api/v1/files/{file_id}/content"})
else:
errors.append(f"Variant {i+1}: {error}")
if uploaded_files:
await self._accumulate_files(msg_key, uploaded_files, __event_emitter__)
final_count = self._get_accumulated_count(msg_key)
if __event_emitter__:
await __event_emitter__({"type": "status", "data": {"description": f"Done ({final_count} images total)", "done": True}})
parts = ["Generate Image", "Status: 200", "", f"Generated {len(uploaded_files)} image(s) for: {prompt[:100]}{'...' if len(prompt) > 100 else ''}", f"Model: {model} | Size: {width}x{height}"]
settings_parts = []
retVal["status"] = "success"
retVal["message"] = "The image has been successfully generated and is already visible to the user in the chat. You do not need to display or embed the image again - just acknowledge that it has been created.",
if safe_mode:
settings_parts.append("SFW")
retVal["settings"]["safe_mode"]: "SFW"
else:
retVal["settings"]["safe_mode"]: "NSFW"
if hide_watermark:
settings_parts.append("No watermark")
if settings_parts:
parts.append(f"Settings: {', '.join(settings_parts)}")
retVal["settings"]["hide_watermark"]: "hide_watermark"
if uploaded_files:
parts.append("", "Files:")
for i, f in enumerate(uploaded_files):
parts.append(f" [{i+1}] {f['url']}")
retVal["images"] = uploaded_files
if dropped_params:
parts.append(f"Note: {model} doesn't support: {', '.join(dropped_params)} (ignored)")
if final_count > len(uploaded_files):
parts.append(f"Total images in message: {final_count}")
retVal["note"] = f" {model} doesn't support: {', '.join(dropped_params)} (ignored)"
if errors:
parts.append("", "Warnings:")
for e in errors:
parts.append(f" - {e}")
return "\n".join(parts)
retVal["warnings"] = errors
return retVal
async def upscale_image(self, image: str, scale: int = 2, enhance: bool = False, enhance_creativity: float = 0.5, enhance_prompt: Optional[str] = None, __request__=None, __user__: dict = None, __metadata__: dict = None, __files__: list = None, __event_emitter__: Callable[[dict], Any] = None) -> str:
venice_key = VeniceImage.get_api_key(self.valves, self.user_valves, __user__)