From 0e38a1e046b6d9f5ab7a7a19813ff3a5b21df9b9 Mon Sep 17 00:00:00 2001 From: ddPn08 Date: Sun, 28 May 2023 23:03:02 +0900 Subject: [PATCH] improved api system --- api/diffusion/pipelines/diffusers.py | 168 ++++++++++++++++++ api/events/__init__.py | 15 +- api/events/common.py | 3 +- api/events/generation.py | 9 +- api/plugin.py | 5 +- launch.py | 1 + lib/tensorrt/utilities.py | 1 - modules/acceleration/tensorrt/engine.py | 21 ++- modules/components/gallery.py | 7 +- .../components/image_generation_options.py | 2 + modules/diffusion/networks/__init__.py | 2 +- modules/diffusion/pipelines/deepfloyd_if.py | 2 +- modules/diffusion/pipelines/diffusers.py | 69 ++++--- modules/diffusion/pipelines/lpw.py | 5 +- modules/diffusion/pipelines/tensorrt.py | 2 - modules/diffusion/upscalers/multidiffusion.py | 73 +++----- modules/diffusion/upscalers/samplers.py | 5 +- modules/javascripts.py | 1 + modules/model.py | 10 +- modules/tabs/deepfloyd_if.py | 3 +- modules/tabs/generate.py | 3 +- modules/tabs/images_browser.py | 7 +- modules/ui.py | 2 +- 23 files changed, 284 insertions(+), 132 deletions(-) create mode 100644 api/diffusion/pipelines/diffusers.py diff --git a/api/diffusion/pipelines/diffusers.py b/api/diffusion/pipelines/diffusers.py new file mode 100644 index 00000000..316d5a32 --- /dev/null +++ b/api/diffusion/pipelines/diffusers.py @@ -0,0 +1,168 @@ +from dataclasses import dataclass +import inspect +from typing import * +import numpy as np + +import torch +from diffusers import ( + AutoencoderKL, + DDPMScheduler, + UNet2DConditionModel, +) +from transformers import CLIPTextModel, CLIPTokenizer +import PIL.Image + +from api.models.diffusion import ImageGenerationOptions +from api.plugin import get_plugin_id + + +@dataclass +class PipeSession: + plugin_data: Dict[str, Any] + opts: ImageGenerationOptions + + +class DiffusersPipelineModel: + __mode__ = "diffusers" + + @classmethod + def from_pretrained( + cls, + pretrained_model_id: str, + use_auth_token: Optional[str] = None, + torch_dtype: torch.dtype = torch.float32, + cache_dir: Optional[str] = None, + device: Optional[torch.device] = None, + subfolder: Optional[str] = None, + ): + pass + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + device: torch.device = torch.device("cpu"), + dtype: torch.dtype = torch.float32, + ): + self.vae: AutoencoderKL + self.text_encoder: CLIPTextModel + self.tokenizer: CLIPTokenizer + self.unet: UNet2DConditionModel + self.scheduler: DDPMScheduler + self.device: torch.device + self.dtype: torch.dtype + self.session: PipeSession + pass + + def get_plugin_data(self): + id = get_plugin_id(inspect.stack()[1]) + return self.session.plugin_data[id] + + def set_plugin_data(self, data): + id = get_plugin_id(inspect.stack()[1]) + self.session.plugin_data[id] = data + + def to(self, device: torch.device = None, dtype: torch.dtype = None): + pass + + def enterers(self): + pass + + def load_resources( + self, + image_height: int, + image_width: int, + batch_size: int, + num_inference_steps: int, + ): + pass + + def get_timesteps(self, num_inference_steps: int, strength: Optional[float]): + pass + + def get_timesteps(self, num_inference_steps: int, strength: Optional[float]): + pass + + def prepare_extra_step_kwargs(self, generator: torch.Generator, eta): + pass + + def preprocess_image(self, image: PIL.Image.Image, height: int, width: int): + pass + + def _encode_prompt( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int, + do_classifier_free_guidance: bool, + negative_prompt: Optional[Union[str, List[str]]] = "", + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + ): + pass + + def prepare_latents( + self, + vae_scale_factor: int, + unet_in_channels: int, + image: Optional[torch.Tensor], + timestep: torch.Tensor, + batch_size: int, + height: int, + width: int, + dtype: torch.dtype, + generator: torch.Generator, + latents: torch.Tensor = None, + ): + pass + + def denoise_latent( + self, + latents: torch.Tensor, + timesteps: torch.Tensor, + num_inference_steps: int, + guidance_scale: float, + do_classifier_free_guidance: bool, + prompt_embeds: torch.Tensor, + extra_step_kwargs: Dict[str, Any], + callback: Optional[Callable], + callback_steps: int, + cross_attention_kwargs: Dict[str, Any], + ): + pass + + def decode_latents(self, latents: torch.Tensor): + pass + + def decode_images(self, image: np.ndarray): + pass + + def create_output(self, latents: torch.Tensor, output_type: str, return_dict: bool): + pass + + def __call__( + self, + opts: ImageGenerationOptions, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + eta: float = 0.0, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + plugin_data: Optional[Dict[str, Any]] = {}, + ): + pass + + def enable_xformers_memory_efficient_attention( + self, attention_op: Optional[Callable] = None + ): + pass + + def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): + pass diff --git a/api/events/__init__.py b/api/events/__init__.py index 573284c7..6560db5d 100644 --- a/api/events/__init__.py +++ b/api/events/__init__.py @@ -4,6 +4,8 @@ handlers: Dict[str, List[Callable]] = {} +T = TypeVar("T", bound="BaseEvent") + class BaseEvent: __event_name__: ClassVar[str] = "" @@ -15,7 +17,11 @@ def register(cls, handler): handlers[cls].append(handler) @classmethod - def call_event(cls, event=None): + def call_event(cls: Type[T], *args, **kwargs) -> T: + if len(args) == 1 and type(args[0]) == cls: + event = args[0] + else: + event = cls(*args, **kwargs) if event is None: event = cls() if not isinstance(event, BaseEvent): @@ -33,6 +39,13 @@ def call_event(cls, event=None): return event + def __call__(self): + fields = self.__dataclass_fields__ + results = [] + for field in fields: + results.append(getattr(self, field)) + return results + @dataclass class CancellableEvent(BaseEvent): diff --git a/api/events/common.py b/api/events/common.py index 44fd4783..a2d35d9f 100644 --- a/api/events/common.py +++ b/api/events/common.py @@ -1,5 +1,6 @@ from dataclasses import dataclass from typing import * +from fastapi import FastAPI from gradio import Blocks @@ -13,7 +14,7 @@ class PreAppLaunchEvent(BaseEvent): @dataclass class PostAppLaunchEvent(BaseEvent): - pass + app: FastAPI @dataclass diff --git a/api/events/generation.py b/api/events/generation.py index 90947220..b3d11b9a 100644 --- a/api/events/generation.py +++ b/api/events/generation.py @@ -1,26 +1,29 @@ from dataclasses import dataclass, field from typing import * +from typing import Any import torch +from api.diffusion.pipelines.diffusers import DiffusersPipelineModel + from . import BaseEvent, SkippableEvent @dataclass class LoadResourceEvent(BaseEvent): - pipe: Any + pipe: DiffusersPipelineModel @dataclass class PromptTokenizingEvent(BaseEvent): - pipe: Any + pipe: DiffusersPipelineModel text_tokens: List text_weights: List @dataclass class UNetDenoisingEvent(SkippableEvent): - pipe: Any + pipe: DiffusersPipelineModel latent_model_input: torch.Tensor step: int diff --git a/api/plugin.py b/api/plugin.py index 868806fc..7853f049 100644 --- a/api/plugin.py +++ b/api/plugin.py @@ -1,7 +1,8 @@ import inspect -def get_plugin_id(): - frm = inspect.stack()[1] +def get_plugin_id(frm=None): + if frm is None: + frm = inspect.stack()[1] mod = inspect.getmodule(frm[0]) return mod.__name__.split(".")[1] diff --git a/launch.py b/launch.py index 97ba40cf..0894c559 100644 --- a/launch.py +++ b/launch.py @@ -181,6 +181,7 @@ def prepare_environment(): sys.argv, reinstall_torch = extract_arg(sys.argv, "--reinstall-torch") sys.argv, reinstall_xformers = extract_arg(sys.argv, "--reinstall-xformers") + sys.argv, reinstall_tensorrt = extract_arg(sys.argv, "--reinstall-tensorrt") tensorrt = "--tensorrt" in sys.argv if reinstall_torch or not is_installed("torch") or not is_installed("torchvision"): diff --git a/lib/tensorrt/utilities.py b/lib/tensorrt/utilities.py index 8fd0787e..64af7a00 100644 --- a/lib/tensorrt/utilities.py +++ b/lib/tensorrt/utilities.py @@ -340,7 +340,6 @@ def create_models( use_auth_token: Optional[str], device: Union[str, torch.device], max_batch_size: int, - hf_cache_dir: Optional[str] = None, unet_in_channels: int = 4, embedding_dim: int = 768, ): diff --git a/modules/acceleration/tensorrt/engine.py b/modules/acceleration/tensorrt/engine.py index 8a5e352d..8984e94f 100644 --- a/modules/acceleration/tensorrt/engine.py +++ b/modules/acceleration/tensorrt/engine.py @@ -1,8 +1,8 @@ import gc import os -import torch import tensorrt +import torch from api.models.tensorrt import BuildEngineOptions, TensorRTEngineData from lib.tensorrt.utilities import ( @@ -19,7 +19,6 @@ load_vae_encoder, ) from modules.logger import logger -from modules.shared import hf_diffusers_cache_dir def create_onnx_path(name, onnx_dir, opt=True): @@ -35,15 +34,15 @@ def __init__(self, opts: BuildEngineOptions): unet = load_unet(self.model.model_id) text_encoder = load_text_encoder(self.model.model_id) - self.models = create_models( - model_id=self.model.model_id, - device=torch.device("cuda"), - use_auth_token=opts.hf_token, - max_batch_size=opts.max_batch_size, - hf_cache_dir=hf_diffusers_cache_dir(), - unet_in_channels=unet.config.in_channels, - embedding_dim=text_encoder.config.hidden_size, - ) + self.model_args = { + "model_id": self.model.model_id, + "device": torch.device("cuda"), + "use_auth_token": opts.hf_token, + "max_batch_size": opts.max_batch_size, + "unet_in_channels": unet.config.in_channels, + "embedding_dim": text_encoder.config.hidden_size, + } + self.models = create_models(**self.model_args) if not opts.full_acceleration: self.models = { "unet": self.models["unet"], diff --git a/modules/components/gallery.py b/modules/components/gallery.py index 1d0db2f8..14dddfab 100644 --- a/modules/components/gallery.py +++ b/modules/components/gallery.py @@ -1,13 +1,12 @@ +import json +import os from random import randint from typing import * + import gradio as gr import gradio.blocks import gradio.utils - - from PIL import Image -import json -import os def outputs_gallery_info_ui(elem_classes=[], **kwargs): diff --git a/modules/components/image_generation_options.py b/modules/components/image_generation_options.py index 4317ab5f..6540f7ff 100644 --- a/modules/components/image_generation_options.py +++ b/modules/components/image_generation_options.py @@ -11,12 +11,14 @@ def prompt_ui(): lines=3, placeholder="Prompt", show_label=False, + elem_classes=["prompt-textbox"], ) negative_prompt_textbox = gr.TextArea( "", lines=3, placeholder="Negative Prompt", show_label=False, + elem_classes=["negative-prompt-textbox"], ) return prompt_textbox, negative_prompt_textbox diff --git a/modules/diffusion/networks/__init__.py b/modules/diffusion/networks/__init__.py index 3f342a3b..8f8aaa71 100644 --- a/modules/diffusion/networks/__init__.py +++ b/modules/diffusion/networks/__init__.py @@ -52,7 +52,7 @@ def restore_networks(*modules: torch.nn.Module): def load_network_modules(e: LoadResourceEvent): global latest_networks - opts: ImageGenerationOptions = e.pipe.opts + opts: ImageGenerationOptions = e.pipe.session.opts positive_networks, opts.prompt = get_networks_from_prompt(opts.prompt) diff --git a/modules/diffusion/pipelines/deepfloyd_if.py b/modules/diffusion/pipelines/deepfloyd_if.py index 080aa1f9..46f99dc8 100644 --- a/modules/diffusion/pipelines/deepfloyd_if.py +++ b/modules/diffusion/pipelines/deepfloyd_if.py @@ -8,7 +8,7 @@ from transformers import T5EncoderModel from modules import config -from modules.shared import hf_diffusers_cache_dir, hf_transformers_cache_dir, get_device +from modules.shared import get_device, hf_diffusers_cache_dir, hf_transformers_cache_dir class IFDiffusionPipeline: diff --git a/modules/diffusion/pipelines/diffusers.py b/modules/diffusion/pipelines/diffusers.py index 20cddcf2..83575d7f 100644 --- a/modules/diffusion/pipelines/diffusers.py +++ b/modules/diffusion/pipelines/diffusers.py @@ -1,6 +1,7 @@ import gc import inspect import os +from dataclasses import dataclass from typing import * import numpy as np @@ -20,6 +21,7 @@ from tqdm import tqdm from transformers import CLIPTextModel, CLIPTokenizer +from api.diffusion.pipelines.diffusers import DiffusersPipelineModel from api.events.generation import LoadResourceEvent, UNetDenoisingEvent from api.models.diffusion import ImageGenerationOptions from modules.diffusion.pipelines.lpw import LongPromptWeightingPipeline @@ -27,24 +29,14 @@ from modules.shared import ROOT_DIR -class DiffusersPipeline: - __mode__ = "diffusers" +@dataclass +class PipeSession: + plugin_data: Dict[str, Any] + opts: ImageGenerationOptions - @classmethod - def load_unet(cls, model_id: str): - ckpt_path = os.path.join(ROOT_DIR, "models", "checkpoints", model_id) - if os.path.exists(ckpt_path): - temporary_pipe = ( - convert_from_ckpt.download_from_original_stable_diffusion_ckpt( - ckpt_path, - from_safetensors=model_id.endswith(".safetensors"), - load_safety_checker=False, - ) - ) - unet = temporary_pipe.unet - else: - unet = UNet2DConditionModel.from_pretrained(model_id, subfolder="unet") - return unet + +class DiffusersPipeline(DiffusersPipelineModel): + __mode__ = "diffusers" @classmethod def from_pretrained( @@ -121,9 +113,8 @@ def __init__( self.lpw = LongPromptWeightingPipeline(self) self.multidiff = None - self.plugin_data = None - self.opts = None self.stage_1st = None + self.session = None def to(self, device: torch.device = None, dtype: torch.dtype = None): if device is None: @@ -157,7 +148,7 @@ def load_resources( ): num_inference_steps = opts.num_inference_steps self.scheduler.set_timesteps(num_inference_steps, device=self.device) - LoadResourceEvent.call_event(LoadResourceEvent(pipe=self)) + LoadResourceEvent.call_event(self) def get_timesteps(self, num_inference_steps: int, strength: Optional[float]): if strength is None: @@ -222,7 +213,7 @@ def prepare_latents( dtype: torch.dtype, generator: torch.Generator, latents: torch.Tensor = None, - ) -> torch.Tensor: + ): if image is None: shape = ( batch_size, @@ -280,21 +271,20 @@ def denoise_latent( latent_model_input, timestep ) - event = UNetDenoisingEvent( - pipe=self, - latent_model_input=latent_model_input, - timestep=timestep, - step=step, - latents=latents, - timesteps=timesteps, - do_classifier_free_guidance=do_classifier_free_guidance, - prompt_embeds=prompt_embeds, - extra_step_kwargs=extra_step_kwargs, - callback=callback, - callback_steps=callback_steps, - cross_attention_kwargs=cross_attention_kwargs, + event = UNetDenoisingEvent.call_event( + self, + latent_model_input, + step, + timestep, + latents, + timesteps, + do_classifier_free_guidance, + prompt_embeds, + extra_step_kwargs, + callback, + callback_steps, + cross_attention_kwargs, ) - UNetDenoisingEvent.call_event(event) latents = event.latents @@ -382,8 +372,10 @@ def __call__( cross_attention_kwargs: Optional[Dict[str, Any]] = None, plugin_data: Optional[Dict[str, Any]] = {}, ): - self.plugin_data = plugin_data - self.opts = opts + self.session = PipeSession( + plugin_data=plugin_data, + opts=opts, + ) # Hires.fix if opts.hiresfix: @@ -512,8 +504,7 @@ def __call__( self.stage_1st = None return outputs - self.plugin_data = None - self.opts = None + self.session = None return outputs diff --git a/modules/diffusion/pipelines/lpw.py b/modules/diffusion/pipelines/lpw.py index cfcbeb6b..8ac572a0 100644 --- a/modules/diffusion/pipelines/lpw.py +++ b/modules/diffusion/pipelines/lpw.py @@ -178,8 +178,9 @@ def get_prompts_with_weights(self, prompt: List[str], max_length: int): truncated = True break - event = PromptTokenizingEvent(self.pipe, text_tokens, text_weights) - PromptTokenizingEvent.call_event(event) + event = PromptTokenizingEvent.call_event( + self.pipe, text_tokens, text_weights + ) text_tokens = event.text_tokens text_weights = event.text_weights diff --git a/modules/diffusion/pipelines/tensorrt.py b/modules/diffusion/pipelines/tensorrt.py index 2ba19b19..2f788b35 100644 --- a/modules/diffusion/pipelines/tensorrt.py +++ b/modules/diffusion/pipelines/tensorrt.py @@ -12,7 +12,6 @@ from transformers import CLIPTextModel, CLIPTokenizer from api.models.diffusion import ImageGenerationOptions - from lib.tensorrt.engine import ( AutoencoderKLEngine, CLIPTextModelEngine, @@ -98,7 +97,6 @@ def from_pretrained( use_auth_token=use_auth_token, device=device, max_batch_size=max_batch_size, - hf_cache_dir=hf_cache_dir, embedding_dim=embedding_dim, ) diff --git a/modules/diffusion/upscalers/multidiffusion.py b/modules/diffusion/upscalers/multidiffusion.py index 63ca5473..a52cee50 100644 --- a/modules/diffusion/upscalers/multidiffusion.py +++ b/modules/diffusion/upscalers/multidiffusion.py @@ -5,14 +5,13 @@ from diffusers import ( AutoencoderKL, DDPMScheduler, - UNet2DConditionModel, EulerAncestralDiscreteScheduler, KDPM2AncestralDiscreteScheduler, + UNet2DConditionModel, ) from tqdm import tqdm from transformers import CLIPTextModel, CLIPTokenizer -from api.events.generation import UNetDenoisingEvent from .samplers import EulerAncestralSampler, KDPM2AncestralSampler @@ -26,8 +25,6 @@ def __init__( self.tokenizer: CLIPTokenizer = pipe.tokenizer self.unet: UNet2DConditionModel = pipe.unet self.scheduler: DDPMScheduler = pipe.scheduler - self.plugin_data = pipe.plugin_data - self.opts = pipe.opts self.ancestral = False def hijack_ancestral_scheduler(self) -> bool: @@ -103,55 +100,33 @@ def views_denoise_latent( latent_model_input, timestep ) - event = UNetDenoisingEvent( - pipe=self, - latent_model_input=latent_model_input, - timestep=timestep, - step=step, - latents=latents_for_view, - timesteps=timesteps, - do_classifier_free_guidance=do_classifier_free_guidance, - prompt_embeds=prompt_embeds, - extra_step_kwargs=extra_step_kwargs, - callback=callback, - callback_steps=callback_steps, + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + timestep, + encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, - ) - UNetDenoisingEvent.call_event(event) - - latents_for_view = event.latents - - if not event.skip: - # predict the noise residual - noise_pred = self.unet( - latent_model_input, - timestep, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - **event.unet_additional_kwargs, - ).sample - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * ( - noise_pred_text - noise_pred_uncond - ) - - # compute the previous noisy sample x_t -> x_t-1 - scheduler_output = self.scheduler.step( - model_output=noise_pred, - timestep=timestep, - sample=latents_for_view, - **extra_step_kwargs, - ) - latents_view_denoised = scheduler_output.prev_sample - sigma_up = scheduler_output.sigma_up if self.ancestral else None + ).sample - views_scheduler_status[j] = copy.deepcopy( - self.scheduler.__dict__ + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * ( + noise_pred_text - noise_pred_uncond ) + # compute the previous noisy sample x_t -> x_t-1 + scheduler_output = self.scheduler.step( + model_output=noise_pred, + timestep=timestep, + sample=latents_for_view, + **extra_step_kwargs, + ) + latents_view_denoised = scheduler_output.prev_sample + sigma_up = scheduler_output.sigma_up if self.ancestral else None + + views_scheduler_status[j] = copy.deepcopy(self.scheduler.__dict__) + value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised count[:, :, h_start:h_end, w_start:w_end] += 1 diff --git a/modules/diffusion/upscalers/samplers.py b/modules/diffusion/upscalers/samplers.py index 317fb143..23c1d6fc 100644 --- a/modules/diffusion/upscalers/samplers.py +++ b/modules/diffusion/upscalers/samplers.py @@ -4,11 +4,8 @@ from typing import * import torch +from diffusers import EulerAncestralDiscreteScheduler, KDPM2AncestralDiscreteScheduler from torch import FloatTensor -from diffusers import ( - EulerAncestralDiscreteScheduler, - KDPM2AncestralDiscreteScheduler, -) @dataclass diff --git a/modules/javascripts.py b/modules/javascripts.py index 96ecb804..ede33d77 100644 --- a/modules/javascripts.py +++ b/modules/javascripts.py @@ -1,4 +1,5 @@ import os + from fastapi import FastAPI from fastapi.responses import FileResponse diff --git a/modules/model.py b/modules/model.py index 00994d55..31c7aaf3 100644 --- a/modules/model.py +++ b/modules/model.py @@ -12,9 +12,13 @@ from . import config, utils from .images import save_image -from .shared import hf_diffusers_cache_dir, get_device +from .shared import get_device, hf_diffusers_cache_dir ModelMode = Literal["diffusers", "tensorrt"] +PrecisionMap = { + "fp32": torch.float32, + "fp16": torch.float16, +} class DiffusersModel: @@ -81,7 +85,9 @@ def activate(self): if self.activated: return device = get_device() - torch_dtype = torch.float16 if config.get("fp16") else torch.float32 + + precision = config.get("precision") or "fp32" + torch_dtype = PrecisionMap[precision] if self.mode == "diffusers": from .diffusion.pipelines.diffusers import DiffusersPipeline diff --git a/modules/tabs/deepfloyd_if.py b/modules/tabs/deepfloyd_if.py index 34ca2d92..75d9b858 100644 --- a/modules/tabs/deepfloyd_if.py +++ b/modules/tabs/deepfloyd_if.py @@ -5,8 +5,7 @@ import torch from modules import model_manager -from modules.components import image_generation_options -from modules.components import gallery +from modules.components import gallery, image_generation_options from modules.diffusion.pipelines.deepfloyd_if import IFDiffusionPipeline from modules.ui import Tab diff --git a/modules/tabs/generate.py b/modules/tabs/generate.py index 89f95f4c..d544e2f3 100644 --- a/modules/tabs/generate.py +++ b/modules/tabs/generate.py @@ -4,8 +4,7 @@ from api.models.diffusion import ImageGenerationOptions from modules import model_manager -from modules.components import image_generation_options -from modules.components import gallery +from modules.components import gallery, image_generation_options from modules.ui import Tab diff --git a/modules/tabs/images_browser.py b/modules/tabs/images_browser.py index b45adfb0..882c3cf1 100644 --- a/modules/tabs/images_browser.py +++ b/modules/tabs/images_browser.py @@ -1,13 +1,12 @@ +import glob +import os from typing import * - import gradio as gr -import glob -import os +from modules import config from modules.components import gallery from modules.ui import Tab -from modules import config class ImagesBrowser(Tab): diff --git a/modules/ui.py b/modules/ui.py index 6ba90f76..d07d6d23 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -103,7 +103,7 @@ def webpath(fn): def javascript_html(): script_js = os.path.join(ROOT_DIR, "script.js") - head = f'\n' + head = f'\n' return head