Compare commits

..

11 Commits

Author SHA1 Message Date
tami-p40
8fd4b07559 steps default 2024-06-09 11:33:02 +03:00
tami-p40
9f8bff3540 cfg for XL 2024-06-09 10:39:34 +03:00
tami-p40
ec900759a1 get model 2024-06-03 14:14:25 +03:00
tami-p40
49c9f6337a size limit 2024-06-03 13:35:14 +03:00
tami-p40
f62d6a6dbc jaguarnut 2024-05-29 09:28:23 +03:00
tami-p40
5baffd360b info 2024-05-26 09:05:27 +03:00
tami-p40
814a779b47 info 2024-05-26 09:00:32 +03:00
tami-p40
ca1c34b91b negative emmbedings 2024-05-26 08:49:27 +03:00
tami-p40
de2badd5a0 xcfg 2024-05-20 10:29:34 +03:00
tami-p40
3fa661a54c qa 2024-05-18 20:41:30 +03:00
tami-p40
c8e825b247 AIrefactor 2024-05-18 14:06:29 +03:00
6 changed files with 313 additions and 315 deletions

1
.gitignore vendored
View File

@@ -6,3 +6,4 @@ venv/
*.session-journal *.session-journal
logs/stable_diff_telegram_bot.log logs/stable_diff_telegram_bot.log
*.session *.session
images/

View File

@@ -1,41 +0,0 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"this came from upstream, but it is not yet fixed"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15+e163309.d20230103-cp38-cp38-linux_x86_64.whl\n",
"\n",
"!git clone https://github.com/camenduru/stable-diffusion-webui\n",
"!git clone https://github.com/deforum-art/deforum-for-automatic1111-webui /content/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui\n",
"!git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /content/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser\n",
"!git clone https://github.com/camenduru/stable-diffusion-webui-huggingface /content/stable-diffusion-webui/extensions/stable-diffusion-webui-huggingface\n",
"!git clone https://github.com/Vetchems/sd-civitai-browser /content/stable-diffusion-webui/extensions/sd-civitai-browser\n",
"%cd /content/stable-diffusion-webui\n",
"\n",
"!wget https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0-pruned.ckpt -O /content/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.ckpt\n",
"!wget https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0.vae.pt -O /content/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.vae.pt\n",
"\n",
"!python launch.py --share --xformers --api\n"
]
}
],
"metadata": {
"language_info": {
"name": "python"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -47,6 +47,11 @@ Set that low (like 0.2) if you just want to slightly change things. defaults to
basicly anything the `/controlnet/img2img` API payload supports basicly anything the `/controlnet/img2img` API payload supports
### general
`X/Y/Z script` [link](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#xyz-plot), one powerfull thing
for prompt we use the Serach Replace option (a.k.a `prompt s/r`) [exaplined](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#prompt-sr)
## Setup ## Setup
Install requirements using venv Install requirements using venv

View File

@@ -1,163 +0,0 @@
from datetime import datetime
import urllib.request
import base64
import json
import time
import os
url="pop-os.local"
webui_server_url = f'http://{url}:7860'
out_dir = 'api_out'
out_dir_t2i = os.path.join(out_dir, 'txt2img')
out_dir_i2i = os.path.join(out_dir, 'img2img')
os.makedirs(out_dir_t2i, exist_ok=True)
os.makedirs(out_dir_i2i, exist_ok=True)
def timestamp():
return datetime.fromtimestamp(time.time()).strftime("%Y%m%d-%H%M%S")
def encode_file_to_base64(path):
with open(path, 'rb') as file:
return base64.b64encode(file.read()).decode('utf-8')
def decode_and_save_base64(base64_str, save_path):
with open(save_path, "wb") as file:
file.write(base64.b64decode(base64_str))
def call_api(api_endpoint, **payload):
data = json.dumps(payload).encode('utf-8')
request = urllib.request.Request(
f'{webui_server_url}/{api_endpoint}',
headers={'Content-Type': 'application/json'},
data=data,
)
response = urllib.request.urlopen(request)
return json.loads(response.read().decode('utf-8'))
def call_txt2img_api(**payload):
response = call_api('sdapi/v1/txt2img', **payload)
for index, image in enumerate(response.get('images')):
save_path = os.path.join(out_dir_t2i, f'txt2img-{timestamp()}-{index}.png')
decode_and_save_base64(image, save_path)
def call_img2img_api(**payload):
response = call_api('sdapi/v1/img2img', **payload)
for index, image in enumerate(response.get('images')):
save_path = os.path.join(out_dir_i2i, f'img2img-{timestamp()}-{index}.png')
decode_and_save_base64(image, save_path)
if __name__ == '__main__':
payload = {
"prompt": "masterpiece, (best quality:1.1), 1girl <lora:lora_model:1>", # extra networks also in prompts
"negative_prompt": "",
"seed": 1,
"steps": 20,
"width": 512,
"height": 512,
"cfg_scale": 7,
"sampler_name": "DPM++ 2M",
"n_iter": 1,
"batch_size": 1,
# example args for x/y/z plot
# "script_name": "x/y/z plot",
# "script_args": [
# 1,
# "10,20",
# [],
# 0,
# "",
# [],
# 0,
# "",
# [],
# True,
# True,
# False,
# False,
# 0,
# False
# ],
# example args for Refiner and ControlNet
# "alwayson_scripts": {
# "ControlNet": {
# "args": [
# {
# "batch_images": "",
# "control_mode": "Balanced",
# "enabled": True,
# "guidance_end": 1,
# "guidance_start": 0,
# "image": {
# "image": encode_file_to_base64(r"B:\path\to\control\img.png"),
# "mask": None # base64, None when not need
# },
# "input_mode": "simple",
# "is_ui": True,
# "loopback": False,
# "low_vram": False,
# "model": "control_v11p_sd15_canny [d14c016b]",
# "module": "canny",
# "output_dir": "",
# "pixel_perfect": False,
# "processor_res": 512,
# "resize_mode": "Crop and Resize",
# "threshold_a": 100,
# "threshold_b": 200,
# "weight": 1
# }
# ]
# },
# "Refiner": {
# "args": [
# True,
# "sd_xl_refiner_1.0",
# 0.5
# ]
# }
# },
# "enable_hr": True,
# "hr_upscaler": "R-ESRGAN 4x+ Anime6B",
# "hr_scale": 2,
# "denoising_strength": 0.5,
# "styles": ['style 1', 'style 2'],
# "override_settings": {
# 'sd_model_checkpoint': "sd_xl_base_1.0", # this can use to switch sd model
# },
}
call_txt2img_api(**payload)
init_images = [
encode_file_to_base64(r"../stable-diffusion-webui/output/img2img-images/2024-05-15/00012-357584826.png"),
# encode_file_to_base64(r"B:\path\to\img_2.png"),
# "https://image.can/also/be/a/http/url.png",
]
batch_size = 2
payload = {
"prompt": "1girl, blue hair",
"seed": 1,
"steps": 20,
"width": 512,
"height": 512,
"denoising_strength": 0.5,
"n_iter": 1,
"init_images": init_images,
"batch_size": batch_size if len(init_images) == 1 else len(init_images),
# "mask": encode_file_to_base64(r"B:\path\to\mask.png")
}
# if len(init_images) > 1 then batch_size should be == len(init_images)
# else if len(init_images) == 1 then batch_size can be any value int >= 1
call_img2img_api(**payload)
# there exist a useful extension that allows converting of webui calls to api payload
# particularly useful when you wish setup arguments of extensions and scripts
# https://github.com/huchenlei/sd-webui-api-payload-display

View File

@@ -62,29 +62,36 @@ if __name__ == '__main__':
"width": 512, "width": 512,
"height": 512, "height": 512,
"cfg_scale": 7, "cfg_scale": 7,
"sampler_name": "DPM++ 2M", "sampler_name": "DPM++ SDE Karras",
"n_iter": 1, "n_iter": 1,
"batch_size": 1, "batch_size": 1,
# example args for x/y/z plot # example args for x/y/z plot
# "script_name": "x/y/z plot", #steps 4,"20,30"
# "script_args": [ #denoising==22
# 1, # S/R 7,"X,united states,china",
# "10,20", "script_args": [
# [], 4,
# 0, "20,30,40",
# "", [],
# [], 0,
# 0, "",
# "", [],
# [], 0,
# True, "",
# True, [],
# False, True,
# False, False,
# 0, False,
# False False,
# ], False,
False,
False,
0,
False
],
"script_name": "x/y/z plot",
# example args for Refiner and ControlNet # example args for Refiner and ControlNet
# "alwayson_scripts": { # "alwayson_scripts": {

327
main.py
View File

@@ -1,10 +1,10 @@
import json
import requests
import io
import re
import os import os
import re
import io
import uuid import uuid
import base64 import base64
import json
import requests
from datetime import datetime from datetime import datetime
from PIL import Image, PngImagePlugin from PIL import Image, PngImagePlugin
from pyrogram import Client, filters from pyrogram import Client, filters
@@ -13,19 +13,39 @@ from dotenv import load_dotenv
# Load environment variables # Load environment variables
load_dotenv() load_dotenv()
API_ID = os.environ.get("API_ID", None) API_ID = os.environ.get("API_ID")
API_HASH = os.environ.get("API_HASH", None) API_HASH = os.environ.get("API_HASH")
TOKEN = os.environ.get("TOKEN_givemtxt2img", None) TOKEN = os.environ.get("TOKEN_givemtxt2img")
SD_URL = os.environ.get("SD_URL", None) SD_URL = os.environ.get("SD_URL")
# Ensure all required environment variables are loaded
if not all([API_ID, API_HASH, TOKEN, SD_URL]):
raise EnvironmentError("Missing one or more required environment variables: API_ID, API_HASH, TOKEN, SD_URL")
app = Client("stable", api_id=API_ID, api_hash=API_HASH, bot_token=TOKEN) app = Client("stable", api_id=API_ID, api_hash=API_HASH, bot_token=TOKEN)
IMAGE_PATH = 'images' # Do not leave a trailing / IMAGE_PATH = 'images'
# Ensure IMAGE_PATH directory exists # Ensure IMAGE_PATH directory exists
os.makedirs(IMAGE_PATH, exist_ok=True) os.makedirs(IMAGE_PATH, exist_ok=True)
def timestamp():
return datetime.now().strftime("%Y%m%d-%H%M%S") def get_current_model_name():
try:
response = requests.get(f"{SD_URL}/sdapi/v1/options")
response.raise_for_status()
options = response.json()
current_model_name = options.get("sd_model_checkpoint", "Unknown")
return current_model_name
except requests.RequestException as e:
print(f"API call failed: {e}")
return None
# Fetch the current model name at the start
current_model_name = get_current_model_name()
if current_model_name:
print(f"Current model name: {current_model_name}")
else:
print("Failed to fetch the current model name.")
def encode_file_to_base64(path): def encode_file_to_base64(path):
with open(path, 'rb') as file: with open(path, 'rb') as file:
@@ -35,10 +55,11 @@ def decode_and_save_base64(base64_str, save_path):
with open(save_path, "wb") as file: with open(save_path, "wb") as file:
file.write(base64.b64decode(base64_str)) file.write(base64.b64decode(base64_str))
def parse_input(input_string): # Set default payload values
default_payload = { default_payload = {
"prompt": "", "prompt": "",
"negative_prompt": "ugly, bad face, distorted", "seed": -1, # Random seed
"negative_prompt": "extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs",
"enable_hr": False, "enable_hr": False,
"Sampler": "DPM++ SDE Karras", "Sampler": "DPM++ SDE Karras",
"denoising_strength": 0.35, "denoising_strength": 0.35,
@@ -52,119 +73,239 @@ def parse_input(input_string):
"override_settings": {}, "override_settings": {},
"override_settings_restore_afterwards": True, "override_settings_restore_afterwards": True,
} }
# Model-specific embeddings for negative prompts
model_negative_prompts = {
"coloringPage_v10": "fake",
"Anything-Diffusion": "",
"Deliberate": "",
"Dreamshaper": "",
"DreamShaperXL_Lightning": "",
"realisticVisionV60B1_v51VAE": "realisticvision-negative-embedding",
"v1-5-pruned-emaonly": "",
"Juggernaut-XL_v9_RunDiffusionPhoto_v2": "bad eyes, cgi, airbrushed, plastic, watermark"
}
def update_negative_prompt(model_name):
"""Update the negative prompt for a given model."""
if model_name in model_negative_prompts:
suffix = model_negative_prompts[model_name]
default_payload["negative_prompt"] += f", {suffix}"
print(f"Updated negative prompt to: {default_payload['negative_prompt']}")
def update_resolution(model_name):
"""Update resolution based on the selected model."""
if model_name == "Juggernaut-XL_v9_RunDiffusionPhoto_v2":
default_payload["width"] = 832
default_payload["height"] = 1216
else:
default_payload["width"] = 512
default_payload["height"] = 512
print(f"Updated resolution to {default_payload['width']}x{default_payload['height']}")
def update_steps(model_name):
"""Update CFG scale based on the selected model."""
if model_name == "Juggernaut-XL_v9_RunDiffusionPhoto_v2":
default_payload["steps"] = 15
else:
default_payload["steps"] = 35
print(f"Updated steps to {default_payload['cfg_scale']}")
def update_cfg_scale(model_name):
"""Update CFG scale based on the selected model."""
if model_name == "Juggernaut-XL_v9_RunDiffusionPhoto_v2":
default_payload["cfg_scale"] = 2.5
else:
default_payload["cfg_scale"] = 7
print(f"Updated CFG scale to {default_payload['cfg_scale']}")
# Update configurations based on the current model name
if current_model_name:
update_negative_prompt(current_model_name)
update_resolution(current_model_name)
update_cfg_scale(current_model_name)
update_steps(current_model_name)
else:
print("Failed to update configurations as the current model name is not available.")
def parse_input(input_string):
"""Parse the input string and create a payload."""
payload = default_payload.copy() payload = default_payload.copy()
prompt = [] prompt = []
include_info = "info:" in input_string
input_string = input_string.replace("info:", "").strip()
matches = re.finditer(r"(\w+):", input_string) matches = re.finditer(r"(\w+):", input_string)
last_index = 0 last_index = 0
script_args = [0, "", [], 0, "", [], 0, "", [], True, False, False, False, False, False, False, 0, False]
script_name = None
slot_mapping = {0: (0, 1), 1: (3, 4), 2: (6, 7)}
slot_index = 0
for match in matches: for match in matches:
key = match.group(1).lower() key = match.group(1).lower()
value_start_index = match.end() value_start_index = match.end()
if last_index != match.start(): if last_index != match.start():
prompt.append(input_string[last_index: match.start()].strip()) prompt.append(input_string[last_index: match.start()].strip())
last_index = value_start_index last_index = value_start_index
value_end_match = re.search(r"(?=\s+\w+:|$)", input_string[value_start_index:])
if value_end_match:
value_end_index = value_end_match.start() + value_start_index
else:
value_end_index = len(input_string)
value = input_string[value_start_index: value_end_index].strip()
if key == "ds": if key == "ds":
key = "denoising_strength" key = "denoising_strength"
if key == "ng": if key == "ng":
key = "negative_prompt" key = "negative_prompt"
if key == "cfg":
key = "cfg_scale"
if key in default_payload: if key in default_payload:
value_end_index = re.search(r"(?=\s+\w+:|$)", input_string[value_start_index:]).start()
value = input_string[value_start_index: value_start_index + value_end_index].strip()
payload[key] = value payload[key] = value
last_index += value_end_index elif key in ["xsr", "xsteps", "xds", "xcfg", "nl", "ks", "rs"]:
script_name = "x/y/z plot"
if slot_index < 3:
script_slot = slot_mapping[slot_index]
if key == "xsr":
script_args[script_slot[0]] = 7 # Enum value for xsr
script_args[script_slot[1]] = value
elif key == "xsteps":
script_args[script_slot[0]] = 4 # Enum value for xsteps
script_args[script_slot[1]] = value
elif key == "xds":
script_args[script_slot[0]] = 22 # Enum value for xds
script_args[script_slot[1]] = value
elif key == "xcfg":
script_args[script_slot[0]] = 6 # Enum value for CFG Scale
script_args[script_slot[1]] = value
slot_index += 1
elif key == "nl":
script_args[9] = False # Draw legend
elif key == "ks":
script_args[10] = True # Keep sub images
elif key == "rs":
script_args[11] = True # Set random seed to sub images
else: else:
prompt.append(f"{key}:") prompt.append(f"{key}:{value}")
payload["prompt"] = " ".join(prompt) last_index = value_end_index
payload["prompt"] = " ".join(prompt).strip()
if not payload["prompt"]: if not payload["prompt"]:
payload["prompt"] = input_string.strip() payload["prompt"] = input_string.strip()
return payload if script_name:
payload["script_name"] = script_name
payload["script_args"] = script_args
print(f"Generated payload: {payload}")
return payload, include_info
def create_caption(payload, user_name, user_id, info, include_info):
"""Create a caption for the generated image."""
caption = f"**[{user_name}](tg://user?id={user_id})**\n\n"
prompt = payload["prompt"]
seed_pattern = r"Seed: (\d+)"
match = re.search(seed_pattern, info)
if match:
seed_value = match.group(1)
caption += f"**{seed_value}**\n"
else:
print("Seed value not found in the info string.")
caption += f"**{prompt}**\n"
if include_info:
caption += f"\nFull Payload:\n`{payload}`\n"
if len(caption) > 1024:
caption = caption[:1021] + "..."
return caption
def call_api(api_endpoint, payload): def call_api(api_endpoint, payload):
"""Call the API with the provided payload."""
try: try:
response = requests.post(f'{SD_URL}/{api_endpoint}', json=payload) response = requests.post(f'{SD_URL}/{api_endpoint}', json=payload)
response.raise_for_status() response.raise_for_status()
return response.json() return response.json()
except requests.RequestException as e: except requests.RequestException as e:
print(f"API call failed: {e}") print(f"API call failed: {e}")
return None return {"error": str(e)}
def process_images(images, user_id, user_name): def process_images(images, user_id, user_name):
"""Process and save generated images."""
def generate_unique_name(): def generate_unique_name():
unique_id = str(uuid.uuid4())[:7] unique_id = str(uuid.uuid4())[:7]
return f"{user_name}-{unique_id}" date = datetime.now().strftime("%Y-%m-%d-%H-%M")
return f"{date}-{user_name}-{unique_id}"
word = generate_unique_name() word = generate_unique_name()
for i in images: for i in images:
image = Image.open(io.BytesIO(base64.b64decode(i.split(",", 1)[0]))) image = Image.open(io.BytesIO(base64.b64decode(i.split(",", 1)[0])))
png_payload = {"image": "data:image/png;base64," + i} png_payload = {"image": "data:image/png;base64," + i}
response2 = requests.post(f"{SD_URL}/sdapi/v1/png-info", json=png_payload) response2 = requests.post(f"{SD_URL}/sdapi/v1/png-info", json=png_payload)
response2.raise_for_status() response2.raise_for_status()
# Write response2 json next to the image
with open(f"{IMAGE_PATH}/{word}.json", "w") as json_file:
json.dump(response2.json(), json_file)
pnginfo = PngImagePlugin.PngInfo() pnginfo = PngImagePlugin.PngInfo()
pnginfo.add_text("parameters", response2.json().get("info")) pnginfo.add_text("parameters", response2.json().get("info"))
image.save(f"{IMAGE_PATH}/{word}.png", pnginfo=pnginfo) image.save(f"{IMAGE_PATH}/{word}.png", pnginfo=pnginfo)
# Save as JPG
jpg_path = f"{IMAGE_PATH}/{word}.jpg"
image.convert("RGB").save(jpg_path, "JPEG")
return word, response2.json().get("info") return word, response2.json().get("info")
@app.on_message(filters.command(["draw"])) @app.on_message(filters.command(["draw"]))
def draw(client, message): def draw(client, message):
"""Handle /draw command to generate images from text prompts."""
msgs = message.text.split(" ", 1) msgs = message.text.split(" ", 1)
if len(msgs) == 1: if len(msgs) == 1:
message.reply_text("Format :\n/draw < text to image >\nng: < negative (optional) >\nsteps: < steps value (1-70, optional) >") message.reply_text("Format :\n/draw < text to image >\nng: < negative (optional) >\nsteps: < steps value (1-70, optional) >")
return return
payload = parse_input(msgs[1]) payload, include_info = parse_input(msgs[1])
print(payload)
if "xds" in msgs[1].lower():
message.reply_text("`xds` key cannot be used in the `/draw` command. Use `/img` instead.")
return
K = message.reply_text("Please Wait 10-15 Seconds") K = message.reply_text("Please Wait 10-15 Seconds")
r = call_api('sdapi/v1/txt2img', payload) r = call_api('sdapi/v1/txt2img', payload)
if r: if r and "images" in r:
for i in r["images"]: for i in r["images"]:
word, info = process_images([i], message.from_user.id, message.from_user.first_name) word, info = process_images([i], message.from_user.id, message.from_user.first_name)
caption = create_caption(payload, message.from_user.first_name, message.from_user.id, info, include_info)
seed_value = info.split(", Seed: ")[1].split(",")[0] message.reply_photo(photo=f"{IMAGE_PATH}/{word}.jpg", caption=caption)
caption = f"**[{message.from_user.first_name}](tg://user?id={message.from_user.id})**\n\n"
# for key, value in payload.items():
# caption += f"{key.capitalize()} - **{value}**\n"
prompt = payload["prompt"]
caption += f"**{prompt}**\n"
caption += f"Seed - **{seed_value}**\n"
# Ensure caption is within the allowed length
if len(caption) > 1024:
caption = caption[:1021] + "..."
message.reply_photo(photo=f"{IMAGE_PATH}/{word}.png", caption=caption)
K.delete() K.delete()
else: else:
message.reply_text("Failed to generate image. Please try again later.") error_message = r.get("error", "Failed to generate image. Please try again later.")
message.reply_text(error_message)
K.delete() K.delete()
@app.on_message(filters.command(["img"])) @app.on_message(filters.command(["img"]))
def img2img(client, message): def img2img(client, message):
"""Handle /img command to generate images from existing images."""
if not message.reply_to_message or not message.reply_to_message.photo: if not message.reply_to_message or not message.reply_to_message.photo:
message.reply_text("reply to an image with \n`/img < prompt > ds:0-1.0`\n\nds stand for `Denoising_strength` parameter. Set that low (like 0.2) if you just want to slightly change things. defaults to 0.4") message.reply_text("Reply to an image with\n`/img < prompt > ds:0-1.0`\n\nds stands for `Denoising_strength` parameter. Set that low (like 0.2) if you just want to slightly change things. defaults to 0.35\n\nExample: `/img murder on the dance floor ds:0.2`")
return return
msgs = message.text.split(" ", 1) msgs = message.text.split(" ", 1)
print(msgs)
if len(msgs) == 1: if len(msgs) == 1:
message.reply_text("""Format :\n/img < prompt >\nforce: < 0.1-1.0, default 0.3 > message.reply_text("Don't FAIL in life")
""")
return return
payload = parse_input(" ".join(msgs[1:])) payload, include_info = parse_input(msgs[1])
print(payload)
photo = message.reply_to_message.photo photo = message.reply_to_message.photo
photo_file = app.download_media(photo) photo_file = app.download_media(photo)
init_image = encode_file_to_base64(photo_file) init_image = encode_file_to_base64(photo_file)
@@ -175,27 +316,24 @@ def img2img(client, message):
K = message.reply_text("Please Wait 10-15 Seconds") K = message.reply_text("Please Wait 10-15 Seconds")
r = call_api('sdapi/v1/img2img', payload) r = call_api('sdapi/v1/img2img', payload)
if r: if r and "images" in r:
for i in r["images"]: for i in r["images"]:
word, info = process_images([i], message.from_user.id, message.from_user.first_name) word, info = process_images([i], message.from_user.id, message.from_user.first_name)
caption = create_caption(payload, message.from_user.first_name, message.from_user.id, info, include_info)
caption = f"**[{message.from_user.first_name}](tg://user?id={message.from_user.id})**\n\n" message.reply_photo(photo=f"{IMAGE_PATH}/{word}.jpg", caption=caption)
prompt = payload["prompt"]
caption += f"**{prompt}**\n"
message.reply_photo(photo=f"{IMAGE_PATH}/{word}.png", caption=caption)
K.delete() K.delete()
else: else:
message.reply_text("Failed to process image. Please try again later.") error_message = r.get("error", "Failed to process image. Please try again later.")
message.reply_text(error_message)
K.delete() K.delete()
@app.on_message(filters.command(["getmodels"])) @app.on_message(filters.command(["getmodels"]))
async def get_models(client, message): async def get_models(client, message):
"""Handle /getmodels command to list available models."""
try: try:
response = requests.get(f"{SD_URL}/sdapi/v1/sd-models") response = requests.get(f"{SD_URL}/sdapi/v1/sd-models")
response.raise_for_status() response.raise_for_status()
models_json = response.json() models_json = response.json()
buttons = [ buttons = [
[InlineKeyboardButton(model["title"], callback_data=model["model_name"])] [InlineKeyboardButton(model["title"], callback_data=model["model_name"])]
for model in models_json for model in models_json
@@ -206,30 +344,81 @@ async def get_models(client, message):
@app.on_callback_query() @app.on_callback_query()
async def process_callback(client, callback_query): async def process_callback(client, callback_query):
"""Process model selection from callback queries."""
sd_model_checkpoint = callback_query.data sd_model_checkpoint = callback_query.data
options = {"sd_model_checkpoint": sd_model_checkpoint} options = {"sd_model_checkpoint": sd_model_checkpoint}
try: try:
response = requests.post(f"{SD_URL}/sdapi/v1/options", json=options) response = requests.post(f"{SD_URL}/sdapi/v1/options", json=options)
response.raise_for_status() response.raise_for_status()
update_negative_prompt(sd_model_checkpoint)
update_resolution(sd_model_checkpoint)
update_cfg_scale(sd_model_checkpoint)
await callback_query.message.reply_text(f"Checkpoint set to {sd_model_checkpoint}") await callback_query.message.reply_text(f"Checkpoint set to {sd_model_checkpoint}")
except requests.RequestException as e: except requests.RequestException as e:
await callback_query.message.reply_text(f"Failed to set checkpoint: {e}") await callback_query.message.reply_text(f"Failed to set checkpoint: {e}")
print(f"Error setting checkpoint: {e}")
# @app.on_message(filters.command(["start"], prefixes=["/", "!"])) @app.on_message(filters.command(["info_sd_bot"]))
# async def start(client, message): async def info(client, message):
# buttons = [[InlineKeyboardButton("Add to your group", url="https://t.me/gootmornbot?startgroup=true")]] """Provide information about the bot's commands and options."""
# await message.reply_text("Hello!\nAsk me to imagine anything\n\n/draw text to image", reply_markup=InlineKeyboardMarkup(buttons)) await message.reply_text("""
**Stable Diffusion Bot Commands and Options:**
user_interactions = {} 1. **/draw <prompt> [options]**
- Generates an image based on the provided text prompt.
- **Options:**
- `ng:<negative_prompt>` - Add a negative prompt to avoid specific features.
- `steps:<value>` - Number of steps for generation (1-70).
- `ds:<value>` - Denoising strength (0-1.0).
- `cfg:<value>` - CFG scale (1-30).
- `width:<value>` - Width of the generated image.
- `height:<value>` - Height of the generated image.
- `info:` - Include full payload information in the caption.
@app.on_message(filters.command(["user_stats"])) **Example:** `/draw beautiful sunset ng:ugly steps:30 ds:0.5 info:`
def user_stats(client, message):
stats = "User Interactions:\n\n"
for user_id, info in user_interactions.items():
stats += f"User: {info['username']} (ID: {user_id})\n"
stats += f"Commands: {', '.join(info['commands'])}\n\n"
message.reply_text(stats) 2. **/img <prompt> [options]**
- Generates an image based on an existing image and the provided text prompt.
- **Options:**
- `ds:<value>` - Denoising strength (0-1.0).
- `steps:<value>` - Number of steps for generation (1-70).
- `cfg:<value>` - CFG scale (1-30).
- `width:<value>` - Width of the generated image.
- `height:<value>` - Height of the generated image.
- `info:` - Include full payload information in the caption.
**Example:** Reply to an image with `/img modern art ds:0.2 info:`
3. **/getmodels**
- Retrieves and lists all available models for the user to select.
- User can then choose a model to set as the current model for image generation.
4. **/info_sd_bot**
- Provides detailed information about the bot's commands and options.
**Additional Options for Advanced Users:**
- **x/y/z plot options** for advanced generation:
- `xsr:<value>` - Search and replace text/emoji in the prompt.
- `xsteps:<value>` - Steps value for x/y/z plot.
- `xds:<value>` - Denoising strength for x/y/z plot.
- `xcfg:<value>` - CFG scale for x/y/z plot.
- `nl:` - No legend in x/y/z plot.
- `ks:` - Keep sub-images in x/y/z plot.
- `rs:` - Set random seed for sub-images in x/y/z plot.
**Notes:**
- Use lower step values (10-20) for large x/y/z plots to avoid long processing times.
- Use `info:` option to include full payload details in the caption of generated images for better troubleshooting and analysis.
**Example for Advanced Users:** `/draw beautiful landscape xsteps:10 xds:0.5 xcfg:7 nl: ks: rs: info:`
For the bot code visit: [Stable Diffusion Bot](https://git.telavivmakers.space/ro/stable-diffusion-telegram-bot)
For more details, visit the [Stable Diffusion Wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#xyz-plot).
Enjoy creating with Stable Diffusion Bot!
""", disable_web_page_preview=True)
app.run() app.run()