Spaces:
Running
Running
Chandima Prabhath
commited on
Commit
·
3380d86
1
Parent(s):
40babb7
gemini flash image edit
Browse files- app.py +2 -2
- gemini_flash_lib/README.md +42 -0
- gemini_flash_lib/__init__.py +3 -0
- gemini_flash_lib/example_usage.py +19 -0
- gemini_flash_lib/image_generator.py +178 -0
- requirements.txt +2 -1
- telebot.py +0 -496
app.py
CHANGED
|
@@ -28,7 +28,7 @@ import uvicorn
|
|
| 28 |
from FLUX import generate_image
|
| 29 |
from VoiceReply import generate_voice_reply
|
| 30 |
from polLLM import generate_llm, LLMBadRequestError
|
| 31 |
-
import
|
| 32 |
|
| 33 |
# --- Configuration ---------------------------------------------------------
|
| 34 |
|
|
@@ -601,7 +601,7 @@ class WhatsAppBot:
|
|
| 601 |
else:
|
| 602 |
raise ValueError(f"Unknown source_type: {source_type}")
|
| 603 |
|
| 604 |
-
|
| 605 |
|
| 606 |
if os.path.exists(output_path):
|
| 607 |
caption = f"✨ Edited: {prompt}"
|
|
|
|
| 28 |
from FLUX import generate_image
|
| 29 |
from VoiceReply import generate_voice_reply
|
| 30 |
from polLLM import generate_llm, LLMBadRequestError
|
| 31 |
+
import gemini_flash_lib
|
| 32 |
|
| 33 |
# --- Configuration ---------------------------------------------------------
|
| 34 |
|
|
|
|
| 601 |
else:
|
| 602 |
raise ValueError(f"Unknown source_type: {source_type}")
|
| 603 |
|
| 604 |
+
gemini_flash_lib.generate_image(prompt, edit_input_path, download_path=output_path)
|
| 605 |
|
| 606 |
if os.path.exists(output_path):
|
| 607 |
caption = f"✨ Edited: {prompt}"
|
gemini_flash_lib/README.md
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Flux Kontext Image Generator Library
|
| 2 |
+
|
| 3 |
+
A Python library for interacting with the Kontext Chat image generation API.
|
| 4 |
+
|
| 5 |
+
## Installation
|
| 6 |
+
```bash
|
| 7 |
+
pip install requests
|
| 8 |
+
```
|
| 9 |
+
|
| 10 |
+
## Usage
|
| 11 |
+
```python
|
| 12 |
+
from flux_kontext_lib import generate_image
|
| 13 |
+
|
| 14 |
+
# Using file path
|
| 15 |
+
result = generate_image("close her eyes", "path/to/image.jpg")
|
| 16 |
+
|
| 17 |
+
# Using image bytes
|
| 18 |
+
with open("path/to/image.jpg", "rb") as f:
|
| 19 |
+
image_bytes = f.read()
|
| 20 |
+
result = generate_image("add sunglasses", image_bytes)
|
| 21 |
+
|
| 22 |
+
# Custom headers
|
| 23 |
+
custom_headers = {"Authorization": "Bearer YOUR_TOKEN"}
|
| 24 |
+
result = generate_image("make it sunny", "path/to/image.jpg", headers=custom_headers)
|
| 25 |
+
```
|
| 26 |
+
|
| 27 |
+
## Parameters
|
| 28 |
+
- `prompt_text` (str): Text prompt for image modification
|
| 29 |
+
- `image_input` (str or bytes): Image file path or bytes content
|
| 30 |
+
- `headers` (dict, optional): Custom request headers
|
| 31 |
+
|
| 32 |
+
## Returns
|
| 33 |
+
- dict: API response on success
|
| 34 |
+
- None: On request failure
|
| 35 |
+
|
| 36 |
+
## Error Handling
|
| 37 |
+
Raises:
|
| 38 |
+
- `FileNotFoundError`: If image file doesn't exist
|
| 39 |
+
- `ValueError`: For unsupported input types
|
| 40 |
+
|
| 41 |
+
## Example
|
| 42 |
+
See [example_usage.py](example_usage.py) for a complete usage example.
|
gemini_flash_lib/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .image_generator import generate_image
|
| 2 |
+
|
| 3 |
+
__all__ = ['generate_image']
|
gemini_flash_lib/example_usage.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flux_kontext_lib import generate_image
|
| 2 |
+
|
| 3 |
+
# Example usage
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
try:
|
| 6 |
+
# Replace with your actual image path
|
| 7 |
+
image_path = "./image.jpg"
|
| 8 |
+
prompt = "close his eyes"
|
| 9 |
+
|
| 10 |
+
# Call the library function
|
| 11 |
+
result = generate_image(prompt, image_path)
|
| 12 |
+
|
| 13 |
+
if result:
|
| 14 |
+
print("API Response:")
|
| 15 |
+
print(result)
|
| 16 |
+
else:
|
| 17 |
+
print("Request failed. Check error messages for details.")
|
| 18 |
+
except Exception as e:
|
| 19 |
+
print(f"Error: {e}")
|
gemini_flash_lib/image_generator.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
from io import BytesIO
|
| 5 |
+
from typing import Optional, Dict, Any
|
| 6 |
+
|
| 7 |
+
# --- New Dependency ---
|
| 8 |
+
# The google-generativeai library is now required.
|
| 9 |
+
# Please install it using: pip install google-generativeai
|
| 10 |
+
try:
|
| 11 |
+
from google import genai
|
| 12 |
+
from google.genai.types import RawReferenceImage
|
| 13 |
+
from google.genai import types
|
| 14 |
+
except ImportError:
|
| 15 |
+
print("google-generativeai library not found. Please install it using: pip install google-generativeai")
|
| 16 |
+
exit()
|
| 17 |
+
|
| 18 |
+
# Pillow is required for image format conversion and normalization.
|
| 19 |
+
# Please install it using: pip install Pillow
|
| 20 |
+
try:
|
| 21 |
+
from PIL import Image
|
| 22 |
+
except ImportError:
|
| 23 |
+
print("Pillow library not found. Please install it using: pip install Pillow")
|
| 24 |
+
exit()
|
| 25 |
+
|
| 26 |
+
# --- Configuration ---
|
| 27 |
+
# It is recommended to set these as environment variables for security.
|
| 28 |
+
# For ImgBB: used to upload the final image and get a public URL.
|
| 29 |
+
IMGBB_API_KEY = os.getenv("IMGBB_API_KEY")
|
| 30 |
+
# For Google AI: your API key for accessing the Gemini model.
|
| 31 |
+
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
| 32 |
+
|
| 33 |
+
# Configure the Gemini client library
|
| 34 |
+
if GEMINI_API_KEY:
|
| 35 |
+
client = genai.Client(api_key=GEMINI_API_KEY)
|
| 36 |
+
|
| 37 |
+
def upload_to_imgbb(image_path: str, file_name: str) -> Optional[str]:
|
| 38 |
+
"""
|
| 39 |
+
Uploads the image at image_path to ImgBB.
|
| 40 |
+
Returns the public URL or None on failure.
|
| 41 |
+
(This function is unchanged from the original script)
|
| 42 |
+
"""
|
| 43 |
+
if not IMGBB_API_KEY:
|
| 44 |
+
print("Warning: IMGBB_API_KEY not set, skipping upload to ImgBB.")
|
| 45 |
+
return None
|
| 46 |
+
|
| 47 |
+
# requests is now only used for ImgBB uploads, so we import it here.
|
| 48 |
+
import requests
|
| 49 |
+
print(f"Uploading {file_name} to ImgBB...")
|
| 50 |
+
try:
|
| 51 |
+
with open(image_path, 'rb') as f:
|
| 52 |
+
files = {"image": (file_name, f.read())}
|
| 53 |
+
resp = requests.post(
|
| 54 |
+
"https://api.imgbb.com/1/upload",
|
| 55 |
+
params={"key": IMGBB_API_KEY},
|
| 56 |
+
files=files,
|
| 57 |
+
timeout=30
|
| 58 |
+
)
|
| 59 |
+
resp.raise_for_status()
|
| 60 |
+
data = resp.json().get("data", {})
|
| 61 |
+
url = data.get("url")
|
| 62 |
+
if url:
|
| 63 |
+
print(f"Successfully uploaded to ImgBB: {url}")
|
| 64 |
+
return url
|
| 65 |
+
else:
|
| 66 |
+
print(f"Error: ImgBB API response missing 'url'. Response: {resp.json()}")
|
| 67 |
+
return None
|
| 68 |
+
except requests.exceptions.RequestException as e:
|
| 69 |
+
print(f"Error: ImgBB upload failed: {e}")
|
| 70 |
+
return None
|
| 71 |
+
|
| 72 |
+
def _save_image_bytes(image_bytes: bytes, save_path: str) -> bool:
|
| 73 |
+
"""
|
| 74 |
+
Saves raw image bytes to a file.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
image_bytes: The raw bytes of the image data.
|
| 78 |
+
save_path: The local path to save the image.
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
True if saving was successful, False otherwise.
|
| 82 |
+
"""
|
| 83 |
+
print(f"Saving generated image to: {save_path}")
|
| 84 |
+
try:
|
| 85 |
+
with open(save_path, 'wb') as f:
|
| 86 |
+
f.write(image_bytes)
|
| 87 |
+
print("Image successfully saved.")
|
| 88 |
+
return True
|
| 89 |
+
except IOError as e:
|
| 90 |
+
print(f"Error saving image file: {e}")
|
| 91 |
+
return False
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def generate_image(
|
| 95 |
+
prompt_text: str,
|
| 96 |
+
image_path: str,
|
| 97 |
+
download_path: Optional[str] = None
|
| 98 |
+
) -> Optional[Dict[str, Any]]:
|
| 99 |
+
"""
|
| 100 |
+
Sends a request to the Gemini API using the Python SDK to modify an image,
|
| 101 |
+
optionally saves the result, and uploads it to ImgBB.
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
prompt_text: The instructional text for image modification.
|
| 105 |
+
image_path: The file path to the input image (any common format).
|
| 106 |
+
download_path: If provided, the path to save the generated image.
|
| 107 |
+
|
| 108 |
+
Returns:
|
| 109 |
+
A dictionary containing a simplified API response and the ImgBB URL,
|
| 110 |
+
or None on error.
|
| 111 |
+
"""
|
| 112 |
+
if not GEMINI_API_KEY:
|
| 113 |
+
print("Error: GEMINI_API_KEY environment variable not set.")
|
| 114 |
+
return None
|
| 115 |
+
|
| 116 |
+
try:
|
| 117 |
+
# --- Image Loading ---
|
| 118 |
+
print(f"Processing image: {image_path}")
|
| 119 |
+
img = Image.open(image_path)
|
| 120 |
+
except FileNotFoundError:
|
| 121 |
+
print(f"Error: Image file not found at {image_path}")
|
| 122 |
+
return None
|
| 123 |
+
except Exception as e:
|
| 124 |
+
print(f"Error processing image file. Ensure it's a valid image. Details: {e}")
|
| 125 |
+
return None
|
| 126 |
+
|
| 127 |
+
try:
|
| 128 |
+
# --- API Call via Python SDK ---
|
| 129 |
+
print("Initializing Gemini model...")
|
| 130 |
+
print("Sending request to Gemini API via Python SDK...")
|
| 131 |
+
# --- Process Gemini API Response ---
|
| 132 |
+
# Convert the PIL Image to bytes (JPEG format)
|
| 133 |
+
img_byte_arr = BytesIO()
|
| 134 |
+
img.save(img_byte_arr, format='JPEG')
|
| 135 |
+
img_bytes = img_byte_arr.getvalue()
|
| 136 |
+
|
| 137 |
+
response = client.models.generate_content(
|
| 138 |
+
model='gemini-2.0-flash-preview-image-generation',
|
| 139 |
+
contents=[
|
| 140 |
+
types.Part.from_text(text=prompt_text),
|
| 141 |
+
types.Part.from_bytes(data=img_bytes, mime_type='image/jpeg'),
|
| 142 |
+
],
|
| 143 |
+
config=types.GenerateContentConfig(
|
| 144 |
+
response_modalities=["TEXT", "IMAGE"]
|
| 145 |
+
)
|
| 146 |
+
)
|
| 147 |
+
# DEBUG: print the response parts to inspect structure
|
| 148 |
+
# print(response.candidates[0].content.parts)
|
| 149 |
+
|
| 150 |
+
# Extract the image bytes from the response parts
|
| 151 |
+
generated_image_bytes = None
|
| 152 |
+
for part in response.candidates[0].content.parts:
|
| 153 |
+
if hasattr(part, "inline_data") and part.inline_data and hasattr(part.inline_data, "data"):
|
| 154 |
+
generated_image_bytes = part.inline_data.data
|
| 155 |
+
break
|
| 156 |
+
print(generated_image_bytes)
|
| 157 |
+
imgbb_url = None
|
| 158 |
+
# --- Download & Upload Logic ---
|
| 159 |
+
if download_path and generated_image_bytes:
|
| 160 |
+
if _save_image_bytes(generated_image_bytes, download_path):
|
| 161 |
+
# If save is successful, upload the saved file to ImgBB
|
| 162 |
+
file_name = os.path.basename(download_path)
|
| 163 |
+
imgbb_url = upload_to_imgbb(download_path, file_name)
|
| 164 |
+
|
| 165 |
+
# Prepare a final dictionary similar to the original script's output
|
| 166 |
+
final_result = {
|
| 167 |
+
"api_response": {
|
| 168 |
+
"candidates": [{
|
| 169 |
+
"finish_reason": response.candidates[0].finish_reason.name
|
| 170 |
+
}]
|
| 171 |
+
},
|
| 172 |
+
"imgbb_url": imgbb_url
|
| 173 |
+
}
|
| 174 |
+
return final_result
|
| 175 |
+
|
| 176 |
+
except Exception as e:
|
| 177 |
+
print(f"Gemini API request failed: {e}")
|
| 178 |
+
return None
|
requirements.txt
CHANGED
|
@@ -5,4 +5,5 @@ pillow
|
|
| 5 |
requests
|
| 6 |
supabase
|
| 7 |
pydantic
|
| 8 |
-
python-telegram-bot
|
|
|
|
|
|
| 5 |
requests
|
| 6 |
supabase
|
| 7 |
pydantic
|
| 8 |
+
python-telegram-bot
|
| 9 |
+
google-genai
|
telebot.py
DELETED
|
@@ -1,496 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import threading
|
| 3 |
-
import requests
|
| 4 |
-
import logging
|
| 5 |
-
import queue
|
| 6 |
-
import json
|
| 7 |
-
import asyncio
|
| 8 |
-
from typing import List, Optional, Literal
|
| 9 |
-
from collections import defaultdict, deque
|
| 10 |
-
from concurrent.futures import ThreadPoolExecutor
|
| 11 |
-
|
| 12 |
-
from telegram import Update, Message, Bot
|
| 13 |
-
from telegram.ext import (
|
| 14 |
-
ApplicationBuilder,
|
| 15 |
-
ContextTypes,
|
| 16 |
-
CommandHandler,
|
| 17 |
-
MessageHandler,
|
| 18 |
-
filters,
|
| 19 |
-
)
|
| 20 |
-
from pydantic import BaseModel, Field, ValidationError
|
| 21 |
-
|
| 22 |
-
from FLUX import generate_image
|
| 23 |
-
from VoiceReply import generate_voice_reply
|
| 24 |
-
from polLLM import generate_llm, LLMBadRequestError
|
| 25 |
-
|
| 26 |
-
# --- Logging Setup ---------------------------------------------------------
|
| 27 |
-
|
| 28 |
-
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
|
| 29 |
-
logger = logging.getLogger("eve_bot")
|
| 30 |
-
logger.setLevel(LOG_LEVEL)
|
| 31 |
-
|
| 32 |
-
handler = logging.StreamHandler()
|
| 33 |
-
formatter = logging.Formatter(
|
| 34 |
-
"%(asctime)s [%(levelname)s] [%(chat_id)s/%(user_id)s] %(message)s"
|
| 35 |
-
)
|
| 36 |
-
handler.setFormatter(formatter)
|
| 37 |
-
|
| 38 |
-
class ContextFilter(logging.Filter):
|
| 39 |
-
def filter(self, record):
|
| 40 |
-
record.chat_id = getattr(record, "chat_id", "-")
|
| 41 |
-
record.user_id = getattr(record, "user_id", "-")
|
| 42 |
-
return True
|
| 43 |
-
|
| 44 |
-
handler.addFilter(ContextFilter())
|
| 45 |
-
logger.handlers = [handler]
|
| 46 |
-
|
| 47 |
-
# Thread‐local to carry context through helpers
|
| 48 |
-
_thread_ctx = threading.local()
|
| 49 |
-
def set_thread_context(chat_id, user_id, message_id):
|
| 50 |
-
_thread_ctx.chat_id = chat_id
|
| 51 |
-
_thread_ctx.user_id = user_id
|
| 52 |
-
_thread_ctx.message_id = message_id
|
| 53 |
-
|
| 54 |
-
def get_thread_context():
|
| 55 |
-
return (
|
| 56 |
-
getattr(_thread_ctx, "chat_id", None),
|
| 57 |
-
getattr(_thread_ctx, "user_id", None),
|
| 58 |
-
getattr(_thread_ctx, "message_id", None),
|
| 59 |
-
)
|
| 60 |
-
|
| 61 |
-
# --- Conversation History -------------------------------------------------
|
| 62 |
-
|
| 63 |
-
history = defaultdict(lambda: deque(maxlen=20))
|
| 64 |
-
|
| 65 |
-
def record_user_message(chat_id, user_id, message):
|
| 66 |
-
history[(chat_id, user_id)].append(f"User: {message}")
|
| 67 |
-
|
| 68 |
-
def record_bot_message(chat_id, user_id, message):
|
| 69 |
-
history[(chat_id, user_id)].append(f"Assistant: {message}")
|
| 70 |
-
|
| 71 |
-
def get_history_text(chat_id, user_id):
|
| 72 |
-
return "\n".join(history[(chat_id, user_id)])
|
| 73 |
-
|
| 74 |
-
def clear_history(chat_id, user_id):
|
| 75 |
-
history[(chat_id, user_id)].clear()
|
| 76 |
-
|
| 77 |
-
# --- Config ---------------------------------------------------------------
|
| 78 |
-
|
| 79 |
-
class BotConfig:
|
| 80 |
-
TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN")
|
| 81 |
-
IMAGE_DIR = "/tmp/images"
|
| 82 |
-
AUDIO_DIR = "/tmp/audio"
|
| 83 |
-
DEFAULT_IMAGE_COUNT = 4
|
| 84 |
-
|
| 85 |
-
@classmethod
|
| 86 |
-
def validate(cls):
|
| 87 |
-
if not cls.TELEGRAM_TOKEN:
|
| 88 |
-
raise ValueError("Missing TELEGRAM_TOKEN")
|
| 89 |
-
|
| 90 |
-
BotConfig.validate()
|
| 91 |
-
|
| 92 |
-
# --- Threading & Queues ---------------------------------------------------
|
| 93 |
-
|
| 94 |
-
task_queue = queue.Queue()
|
| 95 |
-
polls = {}
|
| 96 |
-
|
| 97 |
-
def worker():
|
| 98 |
-
while True:
|
| 99 |
-
task = task_queue.get()
|
| 100 |
-
try:
|
| 101 |
-
if task["type"] == "image":
|
| 102 |
-
_fn_generate_images(**task)
|
| 103 |
-
except Exception as e:
|
| 104 |
-
logger.error(f"Worker error {task}: {e}")
|
| 105 |
-
finally:
|
| 106 |
-
task_queue.task_done()
|
| 107 |
-
|
| 108 |
-
for _ in range(4):
|
| 109 |
-
threading.Thread(target=worker, daemon=True).start()
|
| 110 |
-
|
| 111 |
-
# --- Core Handlers --------------------------------------------------------
|
| 112 |
-
|
| 113 |
-
async def _fn_send_text(mid: int, cid: int, text: str, context: ContextTypes.DEFAULT_TYPE):
|
| 114 |
-
chat_id, user_id, _ = get_thread_context()
|
| 115 |
-
msg: Message = await context.bot.send_message(
|
| 116 |
-
chat_id=cid,
|
| 117 |
-
text=text,
|
| 118 |
-
reply_to_message_id=mid
|
| 119 |
-
)
|
| 120 |
-
record_bot_message(chat_id, user_id, text)
|
| 121 |
-
# enqueue audio reply in async loop
|
| 122 |
-
context.application.create_task(_fn_voice_reply_async(
|
| 123 |
-
msg.message_id, cid, text, context
|
| 124 |
-
))
|
| 125 |
-
|
| 126 |
-
async def _fn_send_text_wrapper(mid, cid, message, context):
|
| 127 |
-
await _fn_send_text(mid, cid, message, context)
|
| 128 |
-
|
| 129 |
-
async def _fn_voice_reply_async(mid: int, cid: int, prompt: str, context: ContextTypes.DEFAULT_TYPE):
|
| 130 |
-
proc = (
|
| 131 |
-
f"Just say this exactly as written in a friendly, playful, "
|
| 132 |
-
f"happy and helpful but a little bit clumsy-cute way: {prompt}"
|
| 133 |
-
)
|
| 134 |
-
res = generate_voice_reply(proc, model="openai-audio", voice="coral", audio_dir=BotConfig.AUDIO_DIR)
|
| 135 |
-
if res and res[0]:
|
| 136 |
-
path, _ = res
|
| 137 |
-
with open(path, "rb") as f:
|
| 138 |
-
await context.bot.send_audio(chat_id=cid, audio=f, reply_to_message_id=mid)
|
| 139 |
-
os.remove(path)
|
| 140 |
-
else:
|
| 141 |
-
await _fn_send_text(mid, cid, prompt, context)
|
| 142 |
-
|
| 143 |
-
async def _fn_summarize(mid, cid, text, context):
|
| 144 |
-
summary = generate_llm(f"Summarize:\n\n{text}")
|
| 145 |
-
await _fn_send_text(mid, cid, summary, context)
|
| 146 |
-
|
| 147 |
-
async def _fn_translate(mid, cid, lang, text, context):
|
| 148 |
-
resp = generate_llm(f"Translate to {lang}:\n\n{text}")
|
| 149 |
-
await _fn_send_text(mid, cid, resp, context)
|
| 150 |
-
|
| 151 |
-
async def _fn_joke(mid, cid, context):
|
| 152 |
-
try:
|
| 153 |
-
j = requests.get("https://official-joke-api.appspot.com/random_joke", timeout=5).json()
|
| 154 |
-
joke = f"{j['setup']}\n\n{j['punchline']}"
|
| 155 |
-
except:
|
| 156 |
-
joke = generate_llm("Tell me a short joke.")
|
| 157 |
-
await _fn_send_text(mid, cid, joke, context)
|
| 158 |
-
|
| 159 |
-
async def _fn_weather(mid, cid, loc, context):
|
| 160 |
-
raw = requests.get(f"http://sl.wttr.in/{loc}?format=4", timeout=5).text
|
| 161 |
-
report = generate_llm(f"Give a weather report in °C:\n\n{raw}")
|
| 162 |
-
await _fn_send_text(mid, cid, report, context)
|
| 163 |
-
|
| 164 |
-
async def _fn_inspire(mid, cid, context):
|
| 165 |
-
quote = generate_llm("Give me a unique, random short inspirational quote.")
|
| 166 |
-
await _fn_send_text(mid, cid, f"✨ {quote}", context)
|
| 167 |
-
|
| 168 |
-
async def _fn_meme(mid, cid, txt, context):
|
| 169 |
-
await context.bot.send_message(chat_id=cid, text="🎨 Generating meme…", reply_to_message_id=mid)
|
| 170 |
-
task_queue.put({"type":"image","message_id":mid,"chat_id":cid,"prompt":f"meme: {txt}"})
|
| 171 |
-
|
| 172 |
-
async def _fn_poll_create(mid, cid, question, options, context):
|
| 173 |
-
votes = {i+1:0 for i in range(len(options))}
|
| 174 |
-
polls[cid] = {"question": question, "options": options, "votes": votes, "voters": {}}
|
| 175 |
-
text = f"📊 *Poll:* {question}\n" + "\n".join(f"{i+1}. {o}" for i,o in enumerate(options))
|
| 176 |
-
await context.bot.send_message(chat_id=cid, text=text, reply_to_message_id=mid, parse_mode="Markdown")
|
| 177 |
-
record_bot_message(cid, get_thread_context()[1], text)
|
| 178 |
-
|
| 179 |
-
async def _fn_poll_vote(mid, cid, voter, choice, context):
|
| 180 |
-
poll = polls.get(cid)
|
| 181 |
-
if not poll or choice<1 or choice>len(poll["options"]): return
|
| 182 |
-
prev = poll["voters"].get(voter)
|
| 183 |
-
if prev: poll["votes"][prev] -= 1
|
| 184 |
-
poll["votes"][choice] += 1
|
| 185 |
-
poll["voters"][voter] = choice
|
| 186 |
-
await _fn_send_text(mid, cid, f"✅ Voted for {poll['options'][choice-1]}", context)
|
| 187 |
-
|
| 188 |
-
async def _fn_poll_results(mid, cid, context):
|
| 189 |
-
poll = polls.get(cid)
|
| 190 |
-
if not poll:
|
| 191 |
-
await _fn_send_text(mid, cid, "No active poll.", context)
|
| 192 |
-
return
|
| 193 |
-
txt = f"📊 *Results:* {poll['question']}\n" + "\n".join(
|
| 194 |
-
f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
|
| 195 |
-
)
|
| 196 |
-
await _fn_send_text(mid, cid, txt, context)
|
| 197 |
-
|
| 198 |
-
async def _fn_poll_end(mid, cid, context):
|
| 199 |
-
poll = polls.pop(cid, None)
|
| 200 |
-
if not poll:
|
| 201 |
-
await _fn_send_text(mid, cid, "No active poll.", context)
|
| 202 |
-
return
|
| 203 |
-
txt = f"📊 *Final Results:* {poll['question']}\n" + "\n".join(
|
| 204 |
-
f"{i}. {o}: {poll['votes'][i]}" for i,o in enumerate(poll["options"],1)
|
| 205 |
-
)
|
| 206 |
-
await _fn_send_text(mid, cid, txt, context)
|
| 207 |
-
|
| 208 |
-
def _fn_generate_images(message_id, chat_id, prompt, count=1, width=None, height=None, **_):
|
| 209 |
-
"""
|
| 210 |
-
Runs in a background thread. Spins up its own asyncio loop
|
| 211 |
-
so we can `await` the bot’s send_message / send_photo coroutines.
|
| 212 |
-
"""
|
| 213 |
-
b = Bot(BotConfig.TELEGRAM_TOKEN)
|
| 214 |
-
loop = asyncio.new_event_loop()
|
| 215 |
-
asyncio.set_event_loop(loop)
|
| 216 |
-
try:
|
| 217 |
-
loop.run_until_complete(
|
| 218 |
-
b.send_message(
|
| 219 |
-
chat_id=chat_id,
|
| 220 |
-
text=f"✨ Generating {count} image(s)…",
|
| 221 |
-
reply_to_message_id=message_id
|
| 222 |
-
)
|
| 223 |
-
)
|
| 224 |
-
for i in range(1, count+1):
|
| 225 |
-
try:
|
| 226 |
-
img, path, ret_p, url = generate_image(
|
| 227 |
-
prompt, str(message_id), str(message_id),
|
| 228 |
-
BotConfig.IMAGE_DIR, width=width, height=height
|
| 229 |
-
)
|
| 230 |
-
caption = f"✨ Image {i}/{count}: {url}\n\n{ret_p}"
|
| 231 |
-
with open(path, "rb") as f:
|
| 232 |
-
loop.run_until_complete(
|
| 233 |
-
b.send_photo(
|
| 234 |
-
chat_id=chat_id,
|
| 235 |
-
photo=f,
|
| 236 |
-
caption=caption,
|
| 237 |
-
reply_to_message_id=message_id
|
| 238 |
-
)
|
| 239 |
-
)
|
| 240 |
-
os.remove(path)
|
| 241 |
-
except Exception as e:
|
| 242 |
-
logger.warning(f"Img {i}/{count} failed: {e}")
|
| 243 |
-
loop.run_until_complete(
|
| 244 |
-
b.send_message(
|
| 245 |
-
chat_id=chat_id,
|
| 246 |
-
text=f"😢 Failed to generate image {i}/{count}.",
|
| 247 |
-
reply_to_message_id=message_id
|
| 248 |
-
)
|
| 249 |
-
)
|
| 250 |
-
finally:
|
| 251 |
-
loop.close()
|
| 252 |
-
|
| 253 |
-
# --- Pydantic Models & Intent Routing ------------------------------------
|
| 254 |
-
|
| 255 |
-
class BaseIntent(BaseModel):
|
| 256 |
-
action: str
|
| 257 |
-
|
| 258 |
-
class SummarizeIntent(BaseIntent):
|
| 259 |
-
action: Literal["summarize"]
|
| 260 |
-
text: str
|
| 261 |
-
|
| 262 |
-
class TranslateIntent(BaseIntent):
|
| 263 |
-
action: Literal["translate"]
|
| 264 |
-
lang: str
|
| 265 |
-
text: str
|
| 266 |
-
|
| 267 |
-
class JokeIntent(BaseIntent):
|
| 268 |
-
action: Literal["joke"]
|
| 269 |
-
|
| 270 |
-
class WeatherIntent(BaseIntent):
|
| 271 |
-
action: Literal["weather"]
|
| 272 |
-
location: str
|
| 273 |
-
|
| 274 |
-
class InspireIntent(BaseIntent):
|
| 275 |
-
action: Literal["inspire"]
|
| 276 |
-
|
| 277 |
-
class MemeIntent(BaseIntent):
|
| 278 |
-
action: Literal["meme"]
|
| 279 |
-
text: str
|
| 280 |
-
|
| 281 |
-
class PollCreateIntent(BaseIntent):
|
| 282 |
-
action: Literal["poll_create"]
|
| 283 |
-
question: str
|
| 284 |
-
options: List[str]
|
| 285 |
-
|
| 286 |
-
class PollVoteIntent(BaseIntent):
|
| 287 |
-
action: Literal["poll_vote"]
|
| 288 |
-
voter: str
|
| 289 |
-
choice: int
|
| 290 |
-
|
| 291 |
-
class PollResultsIntent(BaseIntent):
|
| 292 |
-
action: Literal["poll_results"]
|
| 293 |
-
|
| 294 |
-
class PollEndIntent(BaseIntent):
|
| 295 |
-
action: Literal["poll_end"]
|
| 296 |
-
|
| 297 |
-
class GenerateImageIntent(BaseModel):
|
| 298 |
-
action: Literal["generate_image"]
|
| 299 |
-
prompt: str
|
| 300 |
-
count: int = Field(default=1, ge=1)
|
| 301 |
-
width: Optional[int]
|
| 302 |
-
height: Optional[int]
|
| 303 |
-
|
| 304 |
-
class SendTextIntent(BaseIntent):
|
| 305 |
-
action: Literal["send_text"]
|
| 306 |
-
message: str
|
| 307 |
-
|
| 308 |
-
INTENT_MODELS = [
|
| 309 |
-
SummarizeIntent, TranslateIntent, JokeIntent, WeatherIntent,
|
| 310 |
-
InspireIntent, MemeIntent, PollCreateIntent, PollVoteIntent,
|
| 311 |
-
PollResultsIntent, PollEndIntent, GenerateImageIntent, SendTextIntent
|
| 312 |
-
]
|
| 313 |
-
|
| 314 |
-
async def _fn_enqueue_image(mid, cid, prompt, count, width, height, context):
|
| 315 |
-
task_queue.put({
|
| 316 |
-
"type":"image",
|
| 317 |
-
"message_id": mid,
|
| 318 |
-
"chat_id": cid,
|
| 319 |
-
"prompt": prompt,
|
| 320 |
-
"count": count,
|
| 321 |
-
"width": width,
|
| 322 |
-
"height": height
|
| 323 |
-
})
|
| 324 |
-
|
| 325 |
-
ACTION_HANDLERS = {
|
| 326 |
-
"summarize": _fn_summarize,
|
| 327 |
-
"translate": _fn_translate,
|
| 328 |
-
"joke": _fn_joke,
|
| 329 |
-
"weather": _fn_weather,
|
| 330 |
-
"inspire": _fn_inspire,
|
| 331 |
-
"meme": _fn_meme,
|
| 332 |
-
"poll_create": _fn_poll_create,
|
| 333 |
-
"poll_vote": _fn_poll_vote,
|
| 334 |
-
"poll_results": _fn_poll_results,
|
| 335 |
-
"poll_end": _fn_poll_end,
|
| 336 |
-
"generate_image": _fn_enqueue_image,
|
| 337 |
-
"send_text": _fn_send_text_wrapper,
|
| 338 |
-
}
|
| 339 |
-
|
| 340 |
-
def route_intent(user_input: str, chat_id: str, sender: str):
|
| 341 |
-
history_text = get_history_text(chat_id, sender)
|
| 342 |
-
sys_prompt = (
|
| 343 |
-
"You never perform work yourself—you only invoke one of the available functions. "
|
| 344 |
-
"When the user asks for something that matches a function signature, you must return exactly one JSON object matching that function’s parameters—and nothing else. "
|
| 345 |
-
"Do not wrap it in markdown, do not add extra text, and do not show the JSON to the user. "
|
| 346 |
-
"If the user’s request does not match any function, reply in plain text, and never mention JSON or internal logic.\n\n"
|
| 347 |
-
"- summarize(text)\n"
|
| 348 |
-
"- translate(lang, text)\n"
|
| 349 |
-
"- joke()\n"
|
| 350 |
-
"- weather(location)\n"
|
| 351 |
-
"- inspire()\n"
|
| 352 |
-
"- meme(text)\n"
|
| 353 |
-
"- poll_create(question, options)\n"
|
| 354 |
-
"- poll_vote(voter, choice)\n"
|
| 355 |
-
"- poll_results()\n"
|
| 356 |
-
"- poll_end()\n"
|
| 357 |
-
"- generate_image(prompt, count, width, height)\n"
|
| 358 |
-
"- send_text(message)\n\n"
|
| 359 |
-
"Return only raw JSON matching one of these shapes. For example:\n"
|
| 360 |
-
" {\"action\":\"generate_image\",\"prompt\":\"a red fox\",\"count\":3,\"width\":512,\"height\":512}\n"
|
| 361 |
-
"Otherwise, use send_text to reply with plain chat and you should only return one json for the current message not for previous conversations.\n"
|
| 362 |
-
f"Conversation so far:\n{history_text}\n\n current message: User: {user_input}"
|
| 363 |
-
)
|
| 364 |
-
try:
|
| 365 |
-
raw = generate_llm(sys_prompt)
|
| 366 |
-
except LLMBadRequestError:
|
| 367 |
-
clear_history(chat_id, sender)
|
| 368 |
-
return SendTextIntent(action="send_text", message="Oops, let’s start fresh!")
|
| 369 |
-
try:
|
| 370 |
-
parsed = json.loads(raw)
|
| 371 |
-
except json.JSONDecodeError:
|
| 372 |
-
return SendTextIntent(action="send_text", message=raw)
|
| 373 |
-
|
| 374 |
-
for M in INTENT_MODELS:
|
| 375 |
-
try:
|
| 376 |
-
return M.model_validate(parsed)
|
| 377 |
-
except ValidationError:
|
| 378 |
-
continue
|
| 379 |
-
|
| 380 |
-
action = parsed.get("action")
|
| 381 |
-
if action in ACTION_HANDLERS:
|
| 382 |
-
data = parsed
|
| 383 |
-
kwargs = {}
|
| 384 |
-
if action == "generate_image":
|
| 385 |
-
kwargs = {
|
| 386 |
-
"prompt": data.get("prompt",""),
|
| 387 |
-
"count": int(data.get("count", BotConfig.DEFAULT_IMAGE_COUNT)),
|
| 388 |
-
"width": data.get("width"),
|
| 389 |
-
"height": data.get("height"),
|
| 390 |
-
}
|
| 391 |
-
elif action == "send_text":
|
| 392 |
-
kwargs = {"message": data.get("message","")}
|
| 393 |
-
elif action == "translate":
|
| 394 |
-
kwargs = {"lang": data.get("lang",""), "text": data.get("text","")}
|
| 395 |
-
elif action == "summarize":
|
| 396 |
-
kwargs = {"text": data.get("text","")}
|
| 397 |
-
elif action == "weather":
|
| 398 |
-
kwargs = {"location": data.get("location","")}
|
| 399 |
-
elif action == "meme":
|
| 400 |
-
kwargs = {"text": data.get("text","")}
|
| 401 |
-
elif action == "poll_create":
|
| 402 |
-
kwargs = {"question": data.get("question",""), "options": data.get("options",[])}
|
| 403 |
-
elif action == "poll_vote":
|
| 404 |
-
kwargs = {"voter": sender, "choice": int(data.get("choice",0))}
|
| 405 |
-
try:
|
| 406 |
-
model = next(m for m in INTENT_MODELS if getattr(m, "__fields__", {}).get("action").default == action)
|
| 407 |
-
return model.model_validate({"action":action, **kwargs})
|
| 408 |
-
except Exception:
|
| 409 |
-
return SendTextIntent(action="send_text", message=raw)
|
| 410 |
-
|
| 411 |
-
return SendTextIntent(action="send_text", message=raw)
|
| 412 |
-
|
| 413 |
-
# --- Telegram Handlers ----------------------------------------------------
|
| 414 |
-
|
| 415 |
-
async def start(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
| 416 |
-
await update.message.reply_text("🌟 Eve is online! Type /help to see commands.")
|
| 417 |
-
|
| 418 |
-
async def help_cmd(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
| 419 |
-
await update.message.reply_markdown(
|
| 420 |
-
"🤖 *Eve* commands:\n"
|
| 421 |
-
"• /help\n"
|
| 422 |
-
"• /summarize <text>\n"
|
| 423 |
-
"• /translate <lang>|<text>\n"
|
| 424 |
-
"• /joke\n"
|
| 425 |
-
"• /weather <loc>\n"
|
| 426 |
-
"• /inspire\n"
|
| 427 |
-
"• /meme <text>\n"
|
| 428 |
-
"• /poll <Q>|opt1|opt2 …\n"
|
| 429 |
-
"• /results\n"
|
| 430 |
-
"• /endpoll\n"
|
| 431 |
-
"• /gen <prompt>|<count>|<width>|<height>\n"
|
| 432 |
-
"Otherwise just chat with me."
|
| 433 |
-
)
|
| 434 |
-
|
| 435 |
-
async def message_router(update: Update, context: ContextTypes.DEFAULT_TYPE):
|
| 436 |
-
msg: Message = update.message
|
| 437 |
-
chat_id = msg.chat.id
|
| 438 |
-
user_id = msg.from_user.id
|
| 439 |
-
mid = msg.message_id
|
| 440 |
-
text = msg.text or ""
|
| 441 |
-
|
| 442 |
-
set_thread_context(chat_id, user_id, mid)
|
| 443 |
-
record_user_message(chat_id, user_id, text)
|
| 444 |
-
|
| 445 |
-
low = text.lower().strip()
|
| 446 |
-
if low == "/help":
|
| 447 |
-
return await help_cmd(update, context)
|
| 448 |
-
if low.startswith("/summarize "):
|
| 449 |
-
return await _fn_summarize(mid, chat_id, text[11:].strip(), context)
|
| 450 |
-
if low.startswith("/translate "):
|
| 451 |
-
lang, txt = text[11:].split("|",1)
|
| 452 |
-
return await _fn_translate(mid, chat_id, lang.strip(), txt.strip(), context)
|
| 453 |
-
if low == "/joke":
|
| 454 |
-
return await _fn_joke(mid, chat_id, context)
|
| 455 |
-
if low.startswith("/weather "):
|
| 456 |
-
return await _fn_weather(mid, chat_id, text[9:].strip().replace(" ","+"), context)
|
| 457 |
-
if low == "/inspire":
|
| 458 |
-
return await _fn_inspire(mid, chat_id, context)
|
| 459 |
-
if low.startswith("/meme "):
|
| 460 |
-
return await _fn_meme(mid, chat_id, text[6:].strip(), context)
|
| 461 |
-
if low.startswith("/poll "):
|
| 462 |
-
parts = [p.strip() for p in text[6:].split("|")]
|
| 463 |
-
return await _fn_poll_create(mid, chat_id, parts[0], parts[1:], context)
|
| 464 |
-
if chat_id in polls and low.isdigit():
|
| 465 |
-
return await _fn_poll_vote(mid, chat_id, str(user_id), int(low), context)
|
| 466 |
-
if low == "/results":
|
| 467 |
-
return await _fn_poll_results(mid, chat_id, context)
|
| 468 |
-
if low == "/endpoll":
|
| 469 |
-
return await _fn_poll_end(mid, chat_id, context)
|
| 470 |
-
if low.startswith("/gen"):
|
| 471 |
-
parts = text[4:].split("|")
|
| 472 |
-
pr = parts[0].strip()
|
| 473 |
-
ct = int(parts[1]) if len(parts)>1 and parts[1].isdigit() else BotConfig.DEFAULT_IMAGE_COUNT
|
| 474 |
-
width = int(parts[2]) if len(parts)>2 and parts[2].isdigit() else None
|
| 475 |
-
height = int(parts[3]) if len(parts)>3 and parts[3].isdigit() else None
|
| 476 |
-
task_queue.put({
|
| 477 |
-
"type":"image","message_id":mid,"chat_id":chat_id,
|
| 478 |
-
"prompt":pr,"count":ct,"width":width,"height":height
|
| 479 |
-
})
|
| 480 |
-
return
|
| 481 |
-
|
| 482 |
-
intent = route_intent(text, str(chat_id), str(user_id))
|
| 483 |
-
handler = ACTION_HANDLERS.get(intent.action)
|
| 484 |
-
kwargs = intent.model_dump(exclude={"action"})
|
| 485 |
-
await handler(mid, chat_id, **kwargs, context=context)
|
| 486 |
-
|
| 487 |
-
def main():
|
| 488 |
-
app = ApplicationBuilder().token(BotConfig.TELEGRAM_TOKEN).build()
|
| 489 |
-
app.add_handler(CommandHandler("start", start))
|
| 490 |
-
app.add_handler(CommandHandler("help", help_cmd))
|
| 491 |
-
app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, message_router))
|
| 492 |
-
logger.info("Starting Telegram bot…")
|
| 493 |
-
app.run_polling()
|
| 494 |
-
|
| 495 |
-
if __name__ == "__main__":
|
| 496 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|