This commit is contained in:
tami-p40 2024-05-26 09:00:32 +03:00
parent ca1c34b91b
commit 814a779b47

104
main.py
View File

@ -23,8 +23,7 @@ IMAGE_PATH = 'images'
# Ensure IMAGE_PATH directory exists
os.makedirs(IMAGE_PATH, exist_ok=True)
# Model-specific emmbedings for negative prompts
# see civit.ai model page for specific emmbedings recommnded for each model
# Model-specific embeddings for negative prompts
model_negative_prompts = {
"Anything-Diffusion": "",
"Deliberate": "",
@ -35,19 +34,15 @@ model_negative_prompts = {
"v1-5-pruned-emaonly": ""
}
def encode_file_to_base64(path):
with open(path, 'rb') as file:
return base64.b64encode(file.read()).decode('utf-8')
def decode_and_save_base64(base64_str, save_path):
with open(save_path, "wb") as file:
file.write(base64.b64decode(base64_str))
# Set default payload values
# Set default payload values
default_payload = {
"prompt": "",
"seed": -1, # Random seed
@ -64,7 +59,7 @@ default_payload = {
"restore_faces": False,
"override_settings": {},
"override_settings_restore_afterwards": True,
}
}
def update_negative_prompt(model_name):
if model_name in model_negative_prompts:
@ -74,14 +69,12 @@ def update_negative_prompt(model_name):
def parse_input(input_string):
payload = default_payload.copy()
prompt = []
include_info = "info:" in input_string
input_string = input_string.replace("info:", "").strip()
matches = re.finditer(r"(\w+):", input_string)
last_index = 0
script_args = [0, "", [], 0, "", [], 0, "", [], True, False, False, False, False, False, False, 0, False]
script_name = None
script_args = [0, "", [], 0, "", [], 0, "", [], True, False, False, False, False, False, False, 0, False]
script_name = None
@ -145,15 +138,13 @@ def parse_input(input_string):
payload["script_name"] = script_name
payload["script_args"] = script_args
return payload
return payload, include_info
def create_caption(payload, user_name, user_id, info):
def create_caption(payload, user_name, user_id, info, include_info):
caption = f"**[{user_name}](tg://user?id={user_id})**\n\n"
prompt = payload["prompt"]
print(payload["prompt"])
print(info)
# Steps: 3, Sampler: Euler, CFG scale: 7.0, Seed: 4094161400, Size: 512x512, Model hash: 15012c538f, Model: realisticVisionV60B1_v51VAE, Denoising strength: 0.35, Version: v1.8.0-1-g20cdc7c
# Define a regular expression pattern to match the seed value
seed_pattern = r"Seed: (\d+)"
@ -164,13 +155,15 @@ def create_caption(payload, user_name, user_id, info):
# Check if a match was found and extract the seed value
if match:
seed_value = match.group(1)
print(f"Seed value: {seed_value}")
caption += f"**{seed_value}**\n"
else:
print("Seed value not found in the info string.")
caption += f"**{prompt}**\n"
if include_info:
caption += f"\nFull Payload:\n`{payload}`\n"
if len(caption) > 1024:
caption = caption[:1021] + "..."
@ -186,7 +179,6 @@ def call_api(api_endpoint, payload):
print(f"API call failed: {e}")
return None
def process_images(images, user_id, user_name):
def generate_unique_name():
unique_id = str(uuid.uuid4())[:7]
@ -206,6 +198,7 @@ def process_images(images, user_id, user_name):
return word, response2.json().get("info")
@app.on_message(filters.command(["draw"]))
def draw(client, message):
msgs = message.text.split(" ", 1)
@ -213,8 +206,7 @@ def draw(client, message):
message.reply_text("Format :\n/draw < text to image >\nng: < negative (optional) >\nsteps: < steps value (1-70, optional) >")
return
payload = parse_input(msgs[1])
print(payload)
payload, include_info = parse_input(msgs[1])
# Check if xds is used in the payload
if "xds" in msgs[1].lower():
@ -227,14 +219,13 @@ def draw(client, message):
if r:
for i in r["images"]:
word, info = process_images([i], message.from_user.id, message.from_user.first_name)
caption = create_caption(payload, message.from_user.first_name, message.from_user.id, info)
caption = create_caption(payload, message.from_user.first_name, message.from_user.id, info, include_info)
message.reply_photo(photo=f"{IMAGE_PATH}/{word}.png", caption=caption)
K.delete()
else:
message.reply_text("Failed to generate image. Please try again later.")
K.delete()
@app.on_message(filters.command(["img"]))
def img2img(client, message):
if not message.reply_to_message or not message.reply_to_message.photo:
@ -246,12 +237,8 @@ def img2img(client, message):
message.reply_text("dont FAIL in life")
return
payload = parse_input(msgs[1])
print(f"input:\n{payload}")
payload, include_info = parse_input(msgs[1])
photo = message.reply_to_message.photo
# prompt_from_reply = message.reply_to_message.
# orginal_prompt = app.reply_to_message.message
# print(orginal_prompt)
photo_file = app.download_media(photo)
init_image = encode_file_to_base64(photo_file)
os.remove(photo_file) # Clean up downloaded image file
@ -264,7 +251,7 @@ def img2img(client, message):
if r:
for i in r["images"]:
word, info = process_images([i], message.from_user.id, message.from_user.first_name)
caption = create_caption(payload, message.from_user.first_name, message.from_user.id, info)
caption = create_caption(payload, message.from_user.first_name, message.from_user.id, info, include_info)
message.reply_photo(photo=f"{IMAGE_PATH}/{word}.png", caption=caption)
K.delete()
else:
@ -278,7 +265,6 @@ async def get_models(client, message):
response = requests.get(f"{SD_URL}/sdapi/v1/sd-models")
response.raise_for_status()
models_json = response.json()
print(models_json)
buttons = [
[InlineKeyboardButton(model["title"], callback_data=model["model_name"])]
for model in models_json
@ -287,7 +273,6 @@ async def get_models(client, message):
except requests.RequestException as e:
await message.reply_text(f"Failed to get models: {e}")
@app.on_callback_query()
async def process_callback(client, callback_query):
sd_model_checkpoint = callback_query.data
@ -305,19 +290,58 @@ async def process_callback(client, callback_query):
await callback_query.message.reply_text(f"Failed to set checkpoint: {e}")
print(f"Error setting checkpoint: {e}")
@app.on_message(filters.command(["info_sd_bot"]))
async def info(client, message):
await message.reply_text("""
now support for xyz scripts, see [sd wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#xyz-plot) !
currently supported
`xsr` - search replace text/emoji in the prompt, more info [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#prompt-sr)
`xds` - denoise strength, only valid for img2img
`xsteps` - steps
**note** limit the overall `steps:` to lower value (10-20) for big xyz plots
**Stable Diffusion Bot Commands and Options:**
aside from that you can use the usual `ng`, `ds`, `cfg`, `steps` for single image generation.
1. **/draw <prompt> [options]**
- Generates an image based on the provided text prompt.
- **Options:**
- `ng:<negative_prompt>` - Add a negative prompt to avoid specific features.
- `steps:<value>` - Number of steps for generation (1-70).
- `ds:<value>` - Denoising strength (0-1.0).
- `cfg:<value>` - CFG scale (1-30).
- `info:` - Include full payload information in the caption.
**Example:** `/draw beautiful sunset ng:ugly steps:30 ds:0.5 info:`
2. **/img <prompt> [options]**
- Generates an image based on an existing image and the provided text prompt.
- **Options:**
- `ds:<value>` - Denoising strength (0-1.0).
- `steps:<value>` - Number of steps for generation (1-70).
- `cfg:<value>` - CFG scale (1-30).
- `info:` - Include full payload information in the caption.
**Example:** Reply to an image with `/img modern art ds:0.2 info:`
3. **/getmodels**
- Retrieves and lists all available models for the user to select.
- User can then choose a model to set as the current model for image generation.
4. **/info_sd_bot**
- Provides detailed information about the bot's commands and options.
**Additional Options for Advanced Users:**
- **x/y/z plot options** for advanced generation:
- `xsr:<value>` - Search and replace text/emoji in the prompt.
- `xsteps:<value>` - Steps value for x/y/z plot.
- `xds:<value>` - Denoising strength for x/y/z plot.
- `xcfg:<value>` - CFG scale for x/y/z plot.
- `nl` - No legend in x/y/z plot.
- `ks` - Keep sub-images in x/y/z plot.
- `rs` - Set random seed for sub-images in x/y/z plot.
**Notes:**
- Use lower step values (10-20) for large x/y/z plots to avoid long processing times.
- Use `info:` option to include full payload details in the caption of generated images for better troubleshooting and analysis.
**Example for Advanced Users:** `/draw beautiful landscape xsteps:10 xds:0.5 xcfg:7 nl ks rs info:`
For more details, visit the [Stable Diffusion Wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#xyz-plot).
Enjoy creating with Stable Diffusion Bot!
""", disable_web_page_preview=True)
app.run()