4 Commits

Author SHA1 Message Date
889680a7b7 feat(gitea): implement gitea_coder role with scope enforcement (#20)
## Summary

Implements the gitea_coder role as defined in issue #11, providing a complete workflow automation layer for Git operations with scope enforcement.

## Features

### Branch Management with Scope Gating
-  Enforces branch naming conventions (feature/, fix/, refactor/, docs/, test/, chore/)
-  Prevents direct pushes to protected branches (main, master, develop, dev)
-  Auto-appends issue numbers to branch names

### Unified Commit Workflow
-  Automatic create vs replace detection
-  Conventional commits format with issue references
-  Detailed commit message generation

### PR Creation
-  Validates source branch is not protected
-  Auto-references issues in PR description
-  Uses existing gitea/dev.py operations

### Ticket Integration
-  Reads and parses issue requirements
-  Extracts testing criteria and technical notes
-  Suggests branch names from issue content

## Files Added
- `gitea/coder.py` - Complete gitea_coder role implementation

## Files Modified
- `README.md` - Added gitea_coder documentation

## Testing Criteria
 Can create feature branch from ticket
 Can modify files according to ticket requirements
 Can generate commit messages with issue references
 Can create PR for review

Refs: #11
Reviewed-on: #20
2026-01-18 22:24:53 +00:00
3d8a8190f9 return type fixes (#9)
Reviewed-on: #9
Co-authored-by: Jeffrey Smith <jasafpro@gmail.com>
Co-committed-by: Jeffrey Smith <jasafpro@gmail.com>
2026-01-17 12:31:59 +00:00
dc9f128eb9 Update gitea/dev.py (#8)
Reviewed-on: #8
2026-01-15 20:43:12 +00:00
7f35b8fac4 fix(dev.py): fix CRUD operation bugs
- Fixed redundant __user__ checks in _get_token, _get_repo, _get_branch, _get_org
- Fixed merge_pull_request: proper conflict detection (409), merged status check, and empty response handling
- Fixed update_file: proper 404 handling before raise_for_status
- Fixed delete_file: proper 404 handling before raise_for_status
- Updated version to 1.4.1 with changelog

Refs: bug hunt fix
2026-01-15 17:43:06 +00:00
4 changed files with 3054 additions and 57 deletions

View File

@@ -8,6 +8,7 @@ This monorepo contains a collection of automation tools for Open WebUI, designed
Python scripts for Git operations and repository management: Python scripts for Git operations and repository management:
- **`admin.py`**: Administrative utilities for managing Gitea repositories, potentially including user management, permissions, and batch operations. - **`admin.py`**: Administrative utilities for managing Gitea repositories, potentially including user management, permissions, and batch operations.
- **`dev.py`**: Development-focused tools for Git workflows, branch handling, and repository interactions tailored for software development processes. - **`dev.py`**: Development-focused tools for Git workflows, branch handling, and repository interactions tailored for software development processes.
- **`coder.py`**: Development workflow role that reads tickets, creates branches with scope enforcement, generates commit messages with issue references, and creates pull requests.
### venice/ ### venice/
Tools for interacting with Venice AI services: Tools for interacting with Venice AI services:

2889
gitea/coder.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,18 @@
""" """
title: Gitea Dev - Native Mode Optimized title: Gitea Dev - Native Mode Optimized
author: Jeff Smith + Claude + minimax + kimi-k2 author: Jeff Smith + Claude + minimax + kimi-k2
version: 1.4.0 version: 1.4.1
license: MIT license: MIT
description: Interact with Gitea repositories - native tool calling optimized for high-tier LLMs with robust error handling description: Interact with Gitea repositories - native tool calling optimized for high-tier LLMs with robust error handling
requirements: pydantic, httpx requirements: pydantic, httpx
changelog: changelog:
1.4.2:
- Fixed renamed update_file to replace file, as update implied diff
1.4.1:
- Fixed redundant __user__ checks in _get_token, _get_repo, _get_branch, _get_org
- Fixed merge_pull_request: proper conflict detection, merged status check, and empty response handling
- Fixed delete_file: proper 404 handling before raise_for_status
- Fixed update_file: proper 404 handling before raise_for_status
1.4.0: 1.4.0:
- Added CRUD operations for Issues (get, update, close, reopen, delete, comments) - Added CRUD operations for Issues (get, update, close, reopen, delete, comments)
- Added CRUD operations for Pull Requests (get, update, merge, comments) - Added CRUD operations for Pull Requests (get, update, merge, comments)
@@ -102,8 +109,9 @@ class Tools:
def _get_token(self, __user__: dict = None) -> str: def _get_token(self, __user__: dict = None) -> str:
"""Extract Gitea token from user context with robust handling""" """Extract Gitea token from user context with robust handling"""
if __user__ and "valves" in __user__: if __user__ and "valves" in __user__:
user_valves = __user__.get("valves") if __user__ else None user_valves = __user__.get("valves")
return user_valves.GITEA_TOKEN if user_valves:
return user_valves.GITEA_TOKEN
return "" return ""
def _headers(self, __user__: dict = None) -> dict: def _headers(self, __user__: dict = None) -> dict:
@@ -132,9 +140,10 @@ class Tools:
if repo: if repo:
return repo return repo
if __user__ and "valves" in __user__: if __user__ and "valves" in __user__:
user_valves = __user__.get("valves") if __user__ else None user_valves = __user__.get("valves")
if self.valves.ALLOW_USER_OVERRIDES and user_valves.USER_DEFAULT_REPO: if user_valves:
return user_valves.USER_DEFAULT_REPO if self.valves.ALLOW_USER_OVERRIDES and user_valves.USER_DEFAULT_REPO:
return user_valves.USER_DEFAULT_REPO
return self.valves.DEFAULT_REPO return self.valves.DEFAULT_REPO
def _get_branch(self, branch: Optional[str], __user__: dict = None) -> str: def _get_branch(self, branch: Optional[str], __user__: dict = None) -> str:
@@ -142,9 +151,10 @@ class Tools:
if branch: if branch:
return branch return branch
if __user__ and "valves" in __user__: if __user__ and "valves" in __user__:
user_valves = __user__.get("valves") if __user__ else None user_valves = __user__.get("valves")
if self.valves.ALLOW_USER_OVERRIDES and user_valves.USER_DEFAULT_BRANCH: if user_valves:
return user_valves.USER_DEFAULT_BRANCH if self.valves.ALLOW_USER_OVERRIDES and user_valves.USER_DEFAULT_BRANCH:
return user_valves.USER_DEFAULT_BRANCH
return self.valves.DEFAULT_BRANCH return self.valves.DEFAULT_BRANCH
def _get_org(self, org: Optional[str], __user__: dict = None) -> str: def _get_org(self, org: Optional[str], __user__: dict = None) -> str:
@@ -152,9 +162,10 @@ class Tools:
if org: if org:
return org return org
if __user__ and "valves" in __user__: if __user__ and "valves" in __user__:
user_valves = __user__.get("valves") if __user__ else None user_valves = __user__.get("valves")
if self.valves.ALLOW_USER_OVERRIDES and user_valves.USER_DEFAULT_ORG: if user_valves:
return user_valves.USER_DEFAULT_ORG if self.valves.ALLOW_USER_OVERRIDES and user_valves.USER_DEFAULT_ORG:
return user_valves.USER_DEFAULT_ORG
return self.valves.DEFAULT_ORG return self.valves.DEFAULT_ORG
def _resolve_repo( def _resolve_repo(
@@ -546,7 +557,7 @@ class Tools:
f"Error: Unexpected failure during file fetch: {type(e).__name__}: {e}" f"Error: Unexpected failure during file fetch: {type(e).__name__}: {e}"
) )
async def update_file( async def replace_file(
self, self,
path: str, path: str,
content: str, content: str,
@@ -729,7 +740,7 @@ class Tools:
except httpx.HTTPStatusError as e: except httpx.HTTPStatusError as e:
error_msg = self._format_error(e, f"file creation for '{path}'") error_msg = self._format_error(e, f"file creation for '{path}'")
if e.response.status_code == 422: if e.response.status_code == 422:
return f"Error: File already exists: `{path}`. Use `update_file()` to modify it instead." return f"Error: File already exists: `{path}`. Use `replace_file()` to modify it instead."
return f"Error: Failed to create file. {error_msg}" return f"Error: Failed to create file. {error_msg}"
except Exception as e: except Exception as e:
return f"Error: Unexpected failure during file creation: {type(e).__name__}: {e}" return f"Error: Unexpected failure during file creation: {type(e).__name__}: {e}"
@@ -2419,14 +2430,33 @@ class Tools:
json={"merge_strategy": merge_strategy}, json={"merge_strategy": merge_strategy},
) )
# Check for conflict before raise_for_status
if response.status_code == 409:
try:
error_data = response.json()
error_msg = error_data.get("message", "Merge conflicts detected")
except Exception:
error_msg = "Merge conflicts detected"
return f"Error: PR #{pr_number} cannot be merged due to conflicts.\n\nDetails: {error_msg}"
if response.status_code == 405: if response.status_code == 405:
return "Error: PR cannot be merged. Check if it's already merged or has conflicts." try:
error_data = response.json()
error_msg = error_data.get("message", "PR cannot be merged")
except Exception:
error_msg = "PR cannot be merged"
return f"Error: PR #{pr_number} cannot be merged. {error_msg}"
response.raise_for_status() response.raise_for_status()
result = response.json() if response.text else {}
merged = result.get("merged", True) # Handle successful merge response (may be empty or have merge details)
commit_sha = result.get("merge_commit", {}).get("sha", "")[:8] if not response.text:
merged = True
commit_sha = ""
else:
result = response.json() if response.text else {}
merged = result.get("merged", True) # Default to True if key missing
commit_sha = result.get("merge_commit", {}).get("sha", "")[:8] if result else ""
if __event_emitter__: if __event_emitter__:
await __event_emitter__( await __event_emitter__(
@@ -2446,10 +2476,12 @@ class Tools:
) )
return output return output
else: else:
return f"**PR #{pr_number} Merge Result:**\n\n{result}\n" return f"**PR #{pr_number} Merge Result:**\n\nMerge operation returned success=false"
except httpx.HTTPStatusError as e: except httpx.HTTPStatusError as e:
error_msg = self._format_error(e, f"PR #{pr_number} merge") error_msg = self._format_error(e, f"PR #{pr_number} merge")
if e.response.status_code == 409:
return f"Error: PR #{pr_number} cannot be merged due to conflicts."
if e.response.status_code == 405: if e.response.status_code == 405:
return f"Error: PR #{pr_number} cannot be merged. It may already be merged or have merge conflicts." return f"Error: PR #{pr_number} cannot be merged. It may already be merged or have merge conflicts."
return f"Error: Failed to merge PR. {error_msg}" return f"Error: Failed to merge PR. {error_msg}"

View File

@@ -15,12 +15,11 @@ description: |
and attach images to chat via event emitter for inline display. and attach images to chat via event emitter for inline display.
Re-entrant safe: Multiple concurrent calls accumulate images correctly. Re-entrant safe: Multiple concurrent calls accumulate images correctly.
v1.7.0: Added VeniceImage namespace class for helper functions to avoid
method collisions with Open WebUI framework introspection.
v1.6.0: Added UserValves for SAFE_MODE and HIDE_WATERMARK with proper
admin/user override logic.
changelog: changelog:
1.7.1:
- changed return type from string to dictionary, mirroring the default tools behavior
- fixed issues with user valve overrides - Watermake and Safe Mode
- status message will display either [SFW] or [NSFW] depending on flag not content
1.7.0: 1.7.0:
- Added VeniceImage namespace class for helper functions - Added VeniceImage namespace class for helper functions
- Moved get_api_key, parse_venice_image_response to VeniceImage namespace - Moved get_api_key, parse_venice_image_response to VeniceImage namespace
@@ -96,7 +95,7 @@ class Tools:
class UserValves(BaseModel): class UserValves(BaseModel):
VENICE_API_KEY: str = Field(default="", description="Your Venice.ai API key (overrides admin)") VENICE_API_KEY: str = Field(default="", description="Your Venice.ai API key (overrides admin)")
SAFE_MODE: bool = Field(default=False, description="Enable SFW content filtering") SAFE_MODE: bool = Field(default=True, description="Enable SFW content filtering")
HIDE_WATERMARK: bool = Field(default=False, description="Hide Venice.ai watermark") HIDE_WATERMARK: bool = Field(default=False, description="Hide Venice.ai watermark")
DEFAULT_MODEL: str = Field(default="", description="Your preferred image model") DEFAULT_MODEL: str = Field(default="", description="Your preferred image model")
DEFAULT_NEGATIVE_PROMPT: str = Field(default="", description="Default negative prompt") DEFAULT_NEGATIVE_PROMPT: str = Field(default="", description="Default negative prompt")
@@ -111,11 +110,21 @@ class Tools:
self._lock_init = threading.Lock() self._lock_init = threading.Lock()
self._last_cleanup: float = 0.0 self._last_cleanup: float = 0.0
def _is_safe_mode_enabled(self) -> bool: def _is_safe_mode_enabled(self, __user__: dict = None) -> bool:
return self.valves.SAFE_MODE or self.user_valves.SAFE_MODE user_safe_mode = self.user_valves.SAFE_MODE
def _is_watermark_hidden(self) -> bool: if __user__ and "valves" in __user__:
return self.valves.HIDE_WATERMARK or self.user_valves.HIDE_WATERMARK user_safe_mode = __user__["valves"].SAFE_MODE
return self.valves.SAFE_MODE or user_safe_mode
def _is_watermark_hidden(self, __user__: dict = None) -> bool:
user_hide_watermark = self.user_valves.HIDE_WATERMARK
if __user__ and "valves" in __user__:
user_hide_watermark = __user__["valves"].HIDE_WATERMARK
return self.valves.HIDE_WATERMARK or user_hide_watermark
def _get_default_model(self) -> str: def _get_default_model(self) -> str:
return self.user_valves.DEFAULT_MODEL or self.valves.DEFAULT_MODEL return self.user_valves.DEFAULT_MODEL or self.valves.DEFAULT_MODEL
@@ -152,20 +161,29 @@ class Tools:
async def _accumulate_files(self, key: str, new_files: List[dict], __event_emitter__: Callable[[dict], Any] = None): async def _accumulate_files(self, key: str, new_files: List[dict], __event_emitter__: Callable[[dict], Any] = None):
all_files = [] all_files = []
async with self._get_lock(): async with self._get_lock():
if key not in self._message_files: if key not in self._message_files:
self._message_files[key] = {"files": [], "timestamp": time.time()} self._message_files[key] = {"files": [], "timestamp": time.time()}
for f in new_files: for f in new_files:
self._message_files[key]["files"].append(dict(f)) self._message_files[key]["files"].append(dict(f))
self._message_files[key]["timestamp"] = time.time() self._message_files[key]["timestamp"] = time.time()
all_files = list(self._message_files[key]["files"]) all_files = list(self._message_files[key]["files"])
now = time.time() now = time.time()
if now - self._last_cleanup > 60: if now - self._last_cleanup > 60:
self._last_cleanup = now self._last_cleanup = now
ttl = self.valves.ACCUMULATOR_TTL ttl = self.valves.ACCUMULATOR_TTL
expired = [k for k, v in self._message_files.items() if now - v.get("timestamp", 0) > ttl] expired = [k for k, v in self._message_files.items() if now - v.get("timestamp", 0) > ttl]
for k in expired: for k in expired:
del self._message_files[k] del self._message_files[k]
if all_files and __event_emitter__: if all_files and __event_emitter__:
await __event_emitter__({"type": "files", "data": {"files": all_files}}) await __event_emitter__({"type": "files", "data": {"files": all_files}})
@@ -290,38 +308,73 @@ class Tools:
except Exception as e: except Exception as e:
return None, f"Fetch error: {type(e).__name__}: {e}" return None, f"Fetch error: {type(e).__name__}: {e}"
async def generate_image(self, prompt: str, model: Optional[str] = None, width: int = 1024, height: int = 1024, negative_prompt: Optional[str] = None, style_preset: Optional[str] = None, variants: int = 1, __request__=None, __user__: dict = None, __metadata__: dict = None, __event_emitter__: Callable[[dict], Any] = None) -> str: async def generate_image(
self,
prompt: str,
model: Optional[str] = None,
width: int = 1024,
height: int = 1024,
negative_prompt: Optional[str] = None,
style_preset: Optional[str] = None,
variants: int = 1,
__request__=None,
__user__: dict = None,
__metadata__: dict = None,
__event_emitter__: Callable[[dict], Any] = None
) -> dict:
retVal = {
"status": "failed",
"message": "",
"settings": {},
"images": [],
}
venice_key = VeniceImage.get_api_key(self.valves, self.user_valves, __user__) venice_key = VeniceImage.get_api_key(self.valves, self.user_valves, __user__)
if not venice_key: if not venice_key:
return "Generate Image\nStatus: 0\nError: Venice.ai API key not configured." retVal["message"] = "Error: Venice.ai API key not configured",
return retVal
if not prompt or not prompt.strip(): if not prompt or not prompt.strip():
return "Generate Image\nStatus: 0\nError: Prompt is required" retVal["message"] = "Error: Prompt is required",
return retVal
msg_key = self._get_message_key(__metadata__) msg_key = self._get_message_key(__metadata__)
user_id = __user__.get("id", "default") if __user__ else "default" user_id = __user__.get("id", "default") if __user__ else "default"
cooldown = self.valves.COOLDOWN_SECONDS cooldown = self.valves.COOLDOWN_SECONDS
if cooldown > 0: if cooldown > 0:
now = time.time() now = time.time()
last_gen = self._cooldowns.get(user_id, 0) last_gen = self._cooldowns.get(user_id, 0)
is_reentrant = self._get_accumulated_count(msg_key) > 0 is_reentrant = self._get_accumulated_count(msg_key) > 0
if not is_reentrant and now - last_gen < cooldown: if not is_reentrant and now - last_gen < cooldown:
remaining = cooldown - (now - last_gen) remaining = cooldown - (now - last_gen)
return f"Generate Image\nStatus: 429\nError: Rate limited. Wait {remaining:.1f}s." retVal["message"] = "Error: Rate limited. Wait {remaining:.1f}s.",
return retVal
self._cooldowns[user_id] = now self._cooldowns[user_id] = now
model = model or self._get_default_model() model = model or self._get_default_model()
safe_mode = self._is_safe_mode_enabled() safe_mode = self._is_safe_mode_enabled(__user__)
hide_watermark = self._is_watermark_hidden() hide_watermark = self._is_watermark_hidden(__user__)
effective_negative_prompt = negative_prompt or self._get_default_negative_prompt() effective_negative_prompt = negative_prompt or self._get_default_negative_prompt()
variants = max(1, min(4, variants)) variants = max(1, min(4, variants))
width = max(512, min(1280, width)) width = max(512, min(1280, width))
height = max(512, min(1280, height)) height = max(512, min(1280, height))
existing_count = self._get_accumulated_count(msg_key) existing_count = self._get_accumulated_count(msg_key)
if __event_emitter__: if __event_emitter__:
status_msg = f"Generating {variants} image{'s' if variants > 1 else ''} with {model}" status_msg = f"Generating {variants} image{'s' if variants > 1 else ''} with {model}"
if existing_count > 0: if existing_count > 0:
status_msg += f" (adding to {existing_count} existing)" status_msg += f" (adding to {existing_count} existing)"
if safe_mode: if safe_mode:
status_msg += " [SFW]" status_msg += " [SFW]"
else:
status_msg += " [NSFW]"
await __event_emitter__({"type": "status", "data": {"description": f"{status_msg}...", "done": False}}) await __event_emitter__({"type": "status", "data": {"description": f"{status_msg}...", "done": False}})
payload = {"model": model, "prompt": prompt, "width": width, "height": height, "safe_mode": safe_mode, "hide_watermark": hide_watermark, "return_binary": False, "variants": variants} payload = {"model": model, "prompt": prompt, "width": width, "height": height, "safe_mode": safe_mode, "hide_watermark": hide_watermark, "return_binary": False, "variants": variants}
if effective_negative_prompt: if effective_negative_prompt:
payload["negative_prompt"] = effective_negative_prompt payload["negative_prompt"] = effective_negative_prompt
@@ -329,6 +382,7 @@ class Tools:
payload["style_preset"] = style_preset payload["style_preset"] = style_preset
retried = False retried = False
dropped_params = [] dropped_params = []
try: try:
async with httpx.AsyncClient(timeout=float(self.valves.GENERATION_TIMEOUT)) as client: async with httpx.AsyncClient(timeout=float(self.valves.GENERATION_TIMEOUT)) as client:
response = await client.post("https://api.venice.ai/api/v1/image/generate", headers={"Authorization": f"Bearer {venice_key}", "Content-Type": "application/json"}, json=payload) response = await client.post("https://api.venice.ai/api/v1/image/generate", headers={"Authorization": f"Bearer {venice_key}", "Content-Type": "application/json"}, json=payload)
@@ -349,65 +403,86 @@ class Tools:
except httpx.HTTPStatusError as e: except httpx.HTTPStatusError as e:
if __event_emitter__: if __event_emitter__:
await __event_emitter__({"type": "status", "data": {"done": True}}) await __event_emitter__({"type": "status", "data": {"done": True}})
return f"Generate Image\nStatus: {e.response.status_code}\nError: {e.response.text[:200]}"
retVal["message"] = f"Status: {e.response.status_code} Error: {e.response.text[:200]}",
return retVal["message"]
except httpx.TimeoutException: except httpx.TimeoutException:
if __event_emitter__: if __event_emitter__:
await __event_emitter__({"type": "status", "data": {"done": True}}) await __event_emitter__({"type": "status", "data": {"done": True}})
return f"Generate Image\nStatus: 408\nError: Timed out after {self.valves.GENERATION_TIMEOUT}s"
retVal["message"] = f"Status: 408\nError: Timed out after {self.valves.GENERATION_TIMEOUT}s",
return retVal
except Exception as e: except Exception as e:
if __event_emitter__: if __event_emitter__:
await __event_emitter__({"type": "status", "data": {"done": True}}) await __event_emitter__({"type": "status", "data": {"done": True}})
return f"Generate Image\nStatus: 0\nError: {type(e).__name__}: {e}"
retVal["message"] = f"Status: 0\nError: {type(e).__name__}: {e}",
return retVal
images = result.get("images", []) images = result.get("images", [])
if not images: if not images:
if __event_emitter__: if __event_emitter__:
await __event_emitter__({"type": "status", "data": {"done": True}}) await __event_emitter__({"type": "status", "data": {"done": True}})
return "Generate Image\nStatus: 0\nError: No images returned"
retVal["message"] = f"Status: 0\nError: No images returned",
return retVal
if __event_emitter__: if __event_emitter__:
await __event_emitter__({"type": "status", "data": {"description": f"Uploading {len(images)} images...", "done": False}}) await __event_emitter__({"type": "status", "data": {"description": f"Uploading {len(images)} images...", "done": False}})
chat_id = __metadata__.get("chat_id") if __metadata__ else None chat_id = __metadata__.get("chat_id") if __metadata__ else None
message_id = __metadata__.get("message_id") if __metadata__ else None message_id = __metadata__.get("message_id") if __metadata__ else None
uploaded_files = [] uploaded_files = []
errors = [] errors = []
for i, image_b64 in enumerate(images): for i, image_b64 in enumerate(images):
timestamp = int(time.time() * 1000) timestamp = int(time.time() * 1000)
filename = f"venice_{model}_{timestamp}_{i+1}.webp" filename = f"venice_{model}_{timestamp}_{i+1}.webp"
file_metadata = {"name": filename, "content_type": "image/webp", "data": {"model": model, "prompt": prompt, "negative_prompt": effective_negative_prompt, "style_preset": style_preset, "width": width, "height": height, "variant": i+1, "total_variants": len(images), "safe_mode": safe_mode, "hide_watermark": hide_watermark}} file_metadata = {"name": filename, "content_type": "image/webp", "data": {"model": model, "prompt": prompt, "negative_prompt": effective_negative_prompt, "style_preset": style_preset, "width": width, "height": height, "variant": i+1, "total_variants": len(images), "safe_mode": safe_mode, "hide_watermark": hide_watermark}}
if chat_id: if chat_id:
file_metadata["chat_id"] = chat_id file_metadata["chat_id"] = chat_id
if message_id: if message_id:
file_metadata["message_id"] = message_id file_metadata["message_id"] = message_id
file_id, error = await self._upload_image(image_b64, filename, file_metadata, "image/webp", __request__) file_id, error = await self._upload_image(image_b64, filename, file_metadata, "image/webp", __request__)
if file_id: if file_id:
uploaded_files.append({"type": "image", "url": f"/api/v1/files/{file_id}/content"}) uploaded_files.append({"type": "image", "url": f"/api/v1/files/{file_id}/content"})
else: else:
errors.append(f"Variant {i+1}: {error}") errors.append(f"Variant {i+1}: {error}")
if uploaded_files: if uploaded_files:
await self._accumulate_files(msg_key, uploaded_files, __event_emitter__) await self._accumulate_files(msg_key, uploaded_files, __event_emitter__)
final_count = self._get_accumulated_count(msg_key) final_count = self._get_accumulated_count(msg_key)
if __event_emitter__: if __event_emitter__:
await __event_emitter__({"type": "status", "data": {"description": f"Done ({final_count} images total)", "done": True}}) await __event_emitter__({"type": "status", "data": {"description": f"Done ({final_count} images total)", "done": True}})
parts = ["Generate Image", "Status: 200", "", f"Generated {len(uploaded_files)} image(s) for: {prompt[:100]}{'...' if len(prompt) > 100 else ''}", f"Model: {model} | Size: {width}x{height}"]
settings_parts = [] retVal["status"] = "success"
retVal["message"] = "The image has been successfully generated and is already visible to the user in the chat. You do not need to display or embed the image again - just acknowledge that it has been created.",
if safe_mode: if safe_mode:
settings_parts.append("SFW") retVal["settings"]["safe_mode"]: "SFW"
else:
retVal["settings"]["safe_mode"]: "NSFW"
if hide_watermark: if hide_watermark:
settings_parts.append("No watermark") retVal["settings"]["hide_watermark"]: "hide_watermark"
if settings_parts:
parts.append(f"Settings: {', '.join(settings_parts)}")
if uploaded_files: if uploaded_files:
parts.append("", "Files:") retVal["images"] = uploaded_files
for i, f in enumerate(uploaded_files):
parts.append(f" [{i+1}] {f['url']}")
if dropped_params: if dropped_params:
parts.append(f"Note: {model} doesn't support: {', '.join(dropped_params)} (ignored)") retVal["note"] = f" {model} doesn't support: {', '.join(dropped_params)} (ignored)"
if final_count > len(uploaded_files):
parts.append(f"Total images in message: {final_count}")
if errors: if errors:
parts.append("", "Warnings:") retVal["warnings"] = errors
for e in errors:
parts.append(f" - {e}") return retVal
return "\n".join(parts)
async def upscale_image(self, image: str, scale: int = 2, enhance: bool = False, enhance_creativity: float = 0.5, enhance_prompt: Optional[str] = None, __request__=None, __user__: dict = None, __metadata__: dict = None, __files__: list = None, __event_emitter__: Callable[[dict], Any] = None) -> str: async def upscale_image(self, image: str, scale: int = 2, enhance: bool = False, enhance_creativity: float = 0.5, enhance_prompt: Optional[str] = None, __request__=None, __user__: dict = None, __metadata__: dict = None, __files__: list = None, __event_emitter__: Callable[[dict], Any] = None) -> str:
venice_key = VeniceImage.get_api_key(self.valves, self.user_valves, __user__) venice_key = VeniceImage.get_api_key(self.valves, self.user_valves, __user__)