From ca1c34b91bfff740f3c60c2d746b05b085053b30 Mon Sep 17 00:00:00 2001 From: tami-p40 Date: Sun, 26 May 2024 08:49:27 +0300 Subject: [PATCH] negative emmbedings --- main.py | 52 +++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/main.py b/main.py index 3e61f4e..7e00170 100644 --- a/main.py +++ b/main.py @@ -23,25 +23,35 @@ IMAGE_PATH = 'images' # Ensure IMAGE_PATH directory exists os.makedirs(IMAGE_PATH, exist_ok=True) -def timestamp(): - return datetime.now().strftime("%Y%m%d-%H%M%S") +# Model-specific emmbedings for negative prompts +# see civit.ai model page for specific emmbedings recommnded for each model +model_negative_prompts = { + "Anything-Diffusion": "", + "Deliberate": "", + "Dreamshaper": "", + "DreamShaperXL_Lightning": "", + "icbinp": "", + "realisticVisionV60B1_v51VAE": "realisticvision-negative-embedding", + "v1-5-pruned-emaonly": "" +} + def encode_file_to_base64(path): with open(path, 'rb') as file: return base64.b64encode(file.read()).decode('utf-8') + def decode_and_save_base64(base64_str, save_path): with open(save_path, "wb") as file: file.write(base64.b64decode(base64_str)) -def parse_input(input_string): # Set default payload values - default_payload = { +default_payload = { "prompt": "", "seed": -1, # Random seed - "negative_prompt": "ugly, bad face, distorted", + "negative_prompt": "extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs", "enable_hr": False, "Sampler": "DPM++ SDE Karras", "denoising_strength": 0.35, @@ -55,6 +65,13 @@ def parse_input(input_string): "override_settings": {}, "override_settings_restore_afterwards": True, } + +def update_negative_prompt(model_name): + if model_name in model_negative_prompts: + suffix = model_negative_prompts[model_name] + default_payload["negative_prompt"] += f", {suffix}" + +def parse_input(input_string): payload = default_payload.copy() prompt = [] @@ -129,6 +146,8 @@ def parse_input(input_string): payload["script_args"] = script_args return payload + + def create_caption(payload, user_name, user_id, info): caption = f"**[{user_name}](tg://user?id={user_id})**\n\n" prompt = payload["prompt"] @@ -157,6 +176,7 @@ def create_caption(payload, user_name, user_id, info): return caption + def call_api(api_endpoint, payload): try: response = requests.post(f'{SD_URL}/{api_endpoint}', json=payload) @@ -166,6 +186,7 @@ def call_api(api_endpoint, payload): print(f"API call failed: {e}") return None + def process_images(images, user_id, user_name): def generate_unique_name(): unique_id = str(uuid.uuid4())[:7] @@ -195,6 +216,11 @@ def draw(client, message): payload = parse_input(msgs[1]) print(payload) + # Check if xds is used in the payload + if "xds" in msgs[1].lower(): + message.reply_text("`xds` key cannot be used in the `/draw` command. Use `/img` instead.") + return + K = message.reply_text("Please Wait 10-15 Seconds") r = call_api('sdapi/v1/txt2img', payload) @@ -208,6 +234,7 @@ def draw(client, message): message.reply_text("Failed to generate image. Please try again later.") K.delete() + @app.on_message(filters.command(["img"])) def img2img(client, message): if not message.reply_to_message or not message.reply_to_message.photo: @@ -244,13 +271,14 @@ def img2img(client, message): message.reply_text("Failed to process image. Please try again later.") K.delete() + @app.on_message(filters.command(["getmodels"])) async def get_models(client, message): try: response = requests.get(f"{SD_URL}/sdapi/v1/sd-models") response.raise_for_status() models_json = response.json() - + print(models_json) buttons = [ [InlineKeyboardButton(model["title"], callback_data=model["model_name"])] for model in models_json @@ -259,6 +287,7 @@ async def get_models(client, message): except requests.RequestException as e: await message.reply_text(f"Failed to get models: {e}") + @app.on_callback_query() async def process_callback(client, callback_query): sd_model_checkpoint = callback_query.data @@ -267,9 +296,15 @@ async def process_callback(client, callback_query): try: response = requests.post(f"{SD_URL}/sdapi/v1/options", json=options) response.raise_for_status() + + # Update the negative prompt based on the selected model + update_negative_prompt(sd_model_checkpoint) + await callback_query.message.reply_text(f"Checkpoint set to {sd_model_checkpoint}") except requests.RequestException as e: await callback_query.message.reply_text(f"Failed to set checkpoint: {e}") + print(f"Error setting checkpoint: {e}") + @app.on_message(filters.command(["info_sd_bot"])) async def info(client, message): @@ -280,6 +315,9 @@ currently supported `xds` - denoise strength, only valid for img2img `xsteps` - steps **note** limit the overall `steps:` to lower value (10-20) for big xyz plots -""") + +aside from that you can use the usual `ng`, `ds`, `cfg`, `steps` for single image generation. +""", disable_web_page_preview=True) + app.run()