Datasets:

ArXiv:
diffusers-benchmarking-bot commited on
Commit
2b91721
·
verified ·
1 Parent(s): 6b6e905

Upload folder using huggingface_hub

Browse files
main/lpw_stable_diffusion_xl.py CHANGED
@@ -29,7 +29,6 @@ from diffusers.loaders import (
29
  TextualInversionLoaderMixin,
30
  )
31
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
32
- from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
33
  from diffusers.models.lora import adjust_lora_scale_text_encoder
34
  from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
35
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
@@ -1328,18 +1327,8 @@ class SDXLLongPromptWeightingPipeline(
1328
 
1329
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1330
  def upcast_vae(self):
1331
- dtype = self.vae.dtype
1332
  self.vae.to(dtype=torch.float32)
1333
- use_torch_2_0_or_xformers = isinstance(
1334
- self.vae.decoder.mid_block.attentions[0].processor,
1335
- (AttnProcessor2_0, XFormersAttnProcessor),
1336
- )
1337
- # if xformers or torch_2_0 is used attention block does not need
1338
- # to be in float32 which can save lots of memory
1339
- if use_torch_2_0_or_xformers:
1340
- self.vae.post_quant_conv.to(dtype)
1341
- self.vae.decoder.conv_in.to(dtype)
1342
- self.vae.decoder.mid_block.to(dtype)
1343
 
1344
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1345
  def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
 
29
  TextualInversionLoaderMixin,
30
  )
31
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
 
32
  from diffusers.models.lora import adjust_lora_scale_text_encoder
33
  from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
34
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
 
1327
 
1328
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1329
  def upcast_vae(self):
1330
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
1331
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
1332
 
1333
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1334
  def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
main/mixture_tiling_sdxl.py CHANGED
@@ -30,17 +30,13 @@ from diffusers.loaders import (
30
  TextualInversionLoaderMixin,
31
  )
32
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
33
- from diffusers.models.attention_processor import (
34
- AttnProcessor2_0,
35
- FusedAttnProcessor2_0,
36
- XFormersAttnProcessor,
37
- )
38
  from diffusers.models.lora import adjust_lora_scale_text_encoder
39
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
40
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
41
  from diffusers.schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler
42
  from diffusers.utils import (
43
  USE_PEFT_BACKEND,
 
44
  is_invisible_watermark_available,
45
  is_torch_xla_available,
46
  logging,
@@ -710,22 +706,8 @@ class StableDiffusionXLTilingPipeline(
710
  return torch.tile(weights_torch, (nbatches, self.unet.config.in_channels, 1, 1))
711
 
712
  def upcast_vae(self):
713
- dtype = self.vae.dtype
714
  self.vae.to(dtype=torch.float32)
715
- use_torch_2_0_or_xformers = isinstance(
716
- self.vae.decoder.mid_block.attentions[0].processor,
717
- (
718
- AttnProcessor2_0,
719
- XFormersAttnProcessor,
720
- FusedAttnProcessor2_0,
721
- ),
722
- )
723
- # if xformers or torch_2_0 is used attention block does not need
724
- # to be in float32 which can save lots of memory
725
- if use_torch_2_0_or_xformers:
726
- self.vae.post_quant_conv.to(dtype)
727
- self.vae.decoder.conv_in.to(dtype)
728
- self.vae.decoder.mid_block.to(dtype)
729
 
730
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
731
  def get_guidance_scale_embedding(
 
30
  TextualInversionLoaderMixin,
31
  )
32
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
 
 
 
 
 
33
  from diffusers.models.lora import adjust_lora_scale_text_encoder
34
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
35
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
36
  from diffusers.schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler
37
  from diffusers.utils import (
38
  USE_PEFT_BACKEND,
39
+ deprecate,
40
  is_invisible_watermark_available,
41
  is_torch_xla_available,
42
  logging,
 
706
  return torch.tile(weights_torch, (nbatches, self.unet.config.in_channels, 1, 1))
707
 
708
  def upcast_vae(self):
709
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
710
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
711
 
712
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
713
  def get_guidance_scale_embedding(
main/mod_controlnet_tile_sr_sdxl.py CHANGED
@@ -39,16 +39,13 @@ from diffusers.models import (
39
  MultiControlNetModel,
40
  UNet2DConditionModel,
41
  )
42
- from diffusers.models.attention_processor import (
43
- AttnProcessor2_0,
44
- XFormersAttnProcessor,
45
- )
46
  from diffusers.models.lora import adjust_lora_scale_text_encoder
47
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
48
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
49
  from diffusers.schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler
50
  from diffusers.utils import (
51
  USE_PEFT_BACKEND,
 
52
  logging,
53
  replace_example_docstring,
54
  scale_lora_layers,
@@ -1220,23 +1217,9 @@ class StableDiffusionXLControlNetTileSRPipeline(
1220
 
1221
  return tile_weights, tile_row_overlaps, tile_col_overlaps
1222
 
1223
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1224
  def upcast_vae(self):
1225
- dtype = self.vae.dtype
1226
  self.vae.to(dtype=torch.float32)
1227
- use_torch_2_0_or_xformers = isinstance(
1228
- self.vae.decoder.mid_block.attentions[0].processor,
1229
- (
1230
- AttnProcessor2_0,
1231
- XFormersAttnProcessor,
1232
- ),
1233
- )
1234
- # if xformers or torch_2_0 is used attention block does not need
1235
- # to be in float32 which can save lots of memory
1236
- if use_torch_2_0_or_xformers:
1237
- self.vae.post_quant_conv.to(dtype)
1238
- self.vae.decoder.conv_in.to(dtype)
1239
- self.vae.decoder.mid_block.to(dtype)
1240
 
1241
  @property
1242
  def guidance_scale(self):
 
39
  MultiControlNetModel,
40
  UNet2DConditionModel,
41
  )
 
 
 
 
42
  from diffusers.models.lora import adjust_lora_scale_text_encoder
43
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
44
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
45
  from diffusers.schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler
46
  from diffusers.utils import (
47
  USE_PEFT_BACKEND,
48
+ deprecate,
49
  logging,
50
  replace_example_docstring,
51
  scale_lora_layers,
 
1217
 
1218
  return tile_weights, tile_row_overlaps, tile_col_overlaps
1219
 
 
1220
  def upcast_vae(self):
1221
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
1222
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
1223
 
1224
  @property
1225
  def guidance_scale(self):
main/pipeline_controlnet_xl_kolors.py CHANGED
@@ -40,10 +40,6 @@ from diffusers.models import (
40
  MultiControlNetModel,
41
  UNet2DConditionModel,
42
  )
43
- from diffusers.models.attention_processor import (
44
- AttnProcessor2_0,
45
- XFormersAttnProcessor,
46
- )
47
  from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
48
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
49
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
@@ -760,21 +756,8 @@ class KolorsControlNetPipeline(
760
 
761
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
762
  def upcast_vae(self):
763
- dtype = self.vae.dtype
764
  self.vae.to(dtype=torch.float32)
765
- use_torch_2_0_or_xformers = isinstance(
766
- self.vae.decoder.mid_block.attentions[0].processor,
767
- (
768
- AttnProcessor2_0,
769
- XFormersAttnProcessor,
770
- ),
771
- )
772
- # if xformers or torch_2_0 is used attention block does not need
773
- # to be in float32 which can save lots of memory
774
- if use_torch_2_0_or_xformers:
775
- self.vae.post_quant_conv.to(dtype)
776
- self.vae.decoder.conv_in.to(dtype)
777
- self.vae.decoder.mid_block.to(dtype)
778
 
779
  @property
780
  def guidance_scale(self):
 
40
  MultiControlNetModel,
41
  UNet2DConditionModel,
42
  )
 
 
 
 
43
  from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
44
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
45
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
 
756
 
757
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
758
  def upcast_vae(self):
759
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
760
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
761
 
762
  @property
763
  def guidance_scale(self):
main/pipeline_controlnet_xl_kolors_img2img.py CHANGED
@@ -40,10 +40,6 @@ from diffusers.models import (
40
  MultiControlNetModel,
41
  UNet2DConditionModel,
42
  )
43
- from diffusers.models.attention_processor import (
44
- AttnProcessor2_0,
45
- XFormersAttnProcessor,
46
- )
47
  from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
48
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
49
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
@@ -930,21 +926,8 @@ class KolorsControlNetImg2ImgPipeline(
930
 
931
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
932
  def upcast_vae(self):
933
- dtype = self.vae.dtype
934
  self.vae.to(dtype=torch.float32)
935
- use_torch_2_0_or_xformers = isinstance(
936
- self.vae.decoder.mid_block.attentions[0].processor,
937
- (
938
- AttnProcessor2_0,
939
- XFormersAttnProcessor,
940
- ),
941
- )
942
- # if xformers or torch_2_0 is used attention block does not need
943
- # to be in float32 which can save lots of memory
944
- if use_torch_2_0_or_xformers:
945
- self.vae.post_quant_conv.to(dtype)
946
- self.vae.decoder.conv_in.to(dtype)
947
- self.vae.decoder.mid_block.to(dtype)
948
 
949
  @property
950
  def guidance_scale(self):
 
40
  MultiControlNetModel,
41
  UNet2DConditionModel,
42
  )
 
 
 
 
43
  from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
44
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
45
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
 
926
 
927
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
928
  def upcast_vae(self):
929
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
930
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
931
 
932
  @property
933
  def guidance_scale(self):
main/pipeline_controlnet_xl_kolors_inpaint.py CHANGED
@@ -39,10 +39,6 @@ from diffusers.models import (
39
  MultiControlNetModel,
40
  UNet2DConditionModel,
41
  )
42
- from diffusers.models.attention_processor import (
43
- AttnProcessor2_0,
44
- XFormersAttnProcessor,
45
- )
46
  from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
47
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
48
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
@@ -1006,21 +1002,8 @@ class KolorsControlNetInpaintPipeline(
1006
 
1007
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1008
  def upcast_vae(self):
1009
- dtype = self.vae.dtype
1010
  self.vae.to(dtype=torch.float32)
1011
- use_torch_2_0_or_xformers = isinstance(
1012
- self.vae.decoder.mid_block.attentions[0].processor,
1013
- (
1014
- AttnProcessor2_0,
1015
- XFormersAttnProcessor,
1016
- ),
1017
- )
1018
- # if xformers or torch_2_0 is used attention block does not need
1019
- # to be in float32 which can save lots of memory
1020
- if use_torch_2_0_or_xformers:
1021
- self.vae.post_quant_conv.to(dtype)
1022
- self.vae.decoder.conv_in.to(dtype)
1023
- self.vae.decoder.mid_block.to(dtype)
1024
 
1025
  @property
1026
  def denoising_end(self):
 
39
  MultiControlNetModel,
40
  UNet2DConditionModel,
41
  )
 
 
 
 
42
  from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
43
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
44
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
 
1002
 
1003
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1004
  def upcast_vae(self):
1005
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
1006
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
1007
 
1008
  @property
1009
  def denoising_end(self):
main/pipeline_demofusion_sdxl.py CHANGED
@@ -16,11 +16,11 @@ from diffusers.loaders import (
16
  TextualInversionLoaderMixin,
17
  )
18
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
19
- from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
20
  from diffusers.models.lora import adjust_lora_scale_text_encoder
21
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
22
  from diffusers.schedulers import KarrasDiffusionSchedulers
23
  from diffusers.utils import (
 
24
  is_accelerate_available,
25
  is_accelerate_version,
26
  is_invisible_watermark_available,
@@ -612,20 +612,9 @@ class DemoFusionSDXLPipeline(
612
 
613
  return image
614
 
615
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
616
  def upcast_vae(self):
617
- dtype = self.vae.dtype
618
  self.vae.to(dtype=torch.float32)
619
- use_torch_2_0_or_xformers = isinstance(
620
- self.vae.decoder.mid_block.attentions[0].processor,
621
- (AttnProcessor2_0, XFormersAttnProcessor),
622
- )
623
- # if xformers or torch_2_0 is used attention block does not need
624
- # to be in float32 which can save lots of memory
625
- if use_torch_2_0_or_xformers:
626
- self.vae.post_quant_conv.to(dtype)
627
- self.vae.decoder.conv_in.to(dtype)
628
- self.vae.decoder.mid_block.to(dtype)
629
 
630
  @torch.no_grad()
631
  @replace_example_docstring(EXAMPLE_DOC_STRING)
 
16
  TextualInversionLoaderMixin,
17
  )
18
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
 
19
  from diffusers.models.lora import adjust_lora_scale_text_encoder
20
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
21
  from diffusers.schedulers import KarrasDiffusionSchedulers
22
  from diffusers.utils import (
23
+ deprecate,
24
  is_accelerate_available,
25
  is_accelerate_version,
26
  is_invisible_watermark_available,
 
612
 
613
  return image
614
 
 
615
  def upcast_vae(self):
616
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
617
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
618
 
619
  @torch.no_grad()
620
  @replace_example_docstring(EXAMPLE_DOC_STRING)
main/pipeline_faithdiff_stable_diffusion_xl.py CHANGED
@@ -40,13 +40,6 @@ from diffusers.loaders import (
40
  UNet2DConditionLoadersMixin,
41
  )
42
  from diffusers.models import AutoencoderKL
43
- from diffusers.models.attention_processor import (
44
- AttnProcessor2_0,
45
- FusedAttnProcessor2_0,
46
- LoRAAttnProcessor2_0,
47
- LoRAXFormersAttnProcessor,
48
- XFormersAttnProcessor,
49
- )
50
  from diffusers.models.lora import adjust_lora_scale_text_encoder
51
  from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2D, get_down_block
52
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
@@ -1642,24 +1635,8 @@ class FaithDiffStableDiffusionXLPipeline(
1642
  return latents
1643
 
1644
  def upcast_vae(self):
1645
- dtype = self.vae.dtype
1646
  self.vae.to(dtype=torch.float32)
1647
- use_torch_2_0_or_xformers = isinstance(
1648
- self.vae.decoder.mid_block.attentions[0].processor,
1649
- (
1650
- AttnProcessor2_0,
1651
- XFormersAttnProcessor,
1652
- LoRAXFormersAttnProcessor,
1653
- LoRAAttnProcessor2_0,
1654
- FusedAttnProcessor2_0,
1655
- ),
1656
- )
1657
- # if xformers or torch_2_0 is used attention block does not need
1658
- # to be in float32 which can save lots of memory
1659
- if use_torch_2_0_or_xformers:
1660
- self.vae.post_quant_conv.to(dtype)
1661
- self.vae.decoder.conv_in.to(dtype)
1662
- self.vae.decoder.mid_block.to(dtype)
1663
 
1664
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1665
  def get_guidance_scale_embedding(
 
40
  UNet2DConditionLoadersMixin,
41
  )
42
  from diffusers.models import AutoencoderKL
 
 
 
 
 
 
 
43
  from diffusers.models.lora import adjust_lora_scale_text_encoder
44
  from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2D, get_down_block
45
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
 
1635
  return latents
1636
 
1637
  def upcast_vae(self):
1638
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
1639
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1640
 
1641
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1642
  def get_guidance_scale_embedding(
main/pipeline_kolors_differential_img2img.py CHANGED
@@ -22,13 +22,12 @@ from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
22
  from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
23
  from diffusers.loaders import IPAdapterMixin, StableDiffusionXLLoraLoaderMixin
24
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
25
- from diffusers.models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor
26
  from diffusers.pipelines.kolors.pipeline_output import KolorsPipelineOutput
27
  from diffusers.pipelines.kolors.text_encoder import ChatGLMModel
28
  from diffusers.pipelines.kolors.tokenizer import ChatGLMTokenizer
29
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
30
  from diffusers.schedulers import KarrasDiffusionSchedulers
31
- from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring
32
  from diffusers.utils.torch_utils import randn_tensor
33
 
34
 
@@ -709,24 +708,9 @@ class KolorsDifferentialImg2ImgPipeline(
709
  add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
710
  return add_time_ids
711
 
712
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae
713
  def upcast_vae(self):
714
- dtype = self.vae.dtype
715
  self.vae.to(dtype=torch.float32)
716
- use_torch_2_0_or_xformers = isinstance(
717
- self.vae.decoder.mid_block.attentions[0].processor,
718
- (
719
- AttnProcessor2_0,
720
- XFormersAttnProcessor,
721
- FusedAttnProcessor2_0,
722
- ),
723
- )
724
- # if xformers or torch_2_0 is used attention block does not need
725
- # to be in float32 which can save lots of memory
726
- if use_torch_2_0_or_xformers:
727
- self.vae.post_quant_conv.to(dtype)
728
- self.vae.decoder.conv_in.to(dtype)
729
- self.vae.decoder.mid_block.to(dtype)
730
 
731
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
732
  def get_guidance_scale_embedding(
 
22
  from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
23
  from diffusers.loaders import IPAdapterMixin, StableDiffusionXLLoraLoaderMixin
24
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
 
25
  from diffusers.pipelines.kolors.pipeline_output import KolorsPipelineOutput
26
  from diffusers.pipelines.kolors.text_encoder import ChatGLMModel
27
  from diffusers.pipelines.kolors.tokenizer import ChatGLMTokenizer
28
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
29
  from diffusers.schedulers import KarrasDiffusionSchedulers
30
+ from diffusers.utils import deprecate, is_torch_xla_available, logging, replace_example_docstring
31
  from diffusers.utils.torch_utils import randn_tensor
32
 
33
 
 
708
  add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
709
  return add_time_ids
710
 
 
711
  def upcast_vae(self):
712
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
713
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
714
 
715
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
716
  def get_guidance_scale_embedding(
main/pipeline_kolors_inpainting.py CHANGED
@@ -32,12 +32,6 @@ from diffusers.loaders import (
32
  TextualInversionLoaderMixin,
33
  )
34
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
35
- from diffusers.models.attention_processor import (
36
- AttnProcessor2_0,
37
- LoRAAttnProcessor2_0,
38
- LoRAXFormersAttnProcessor,
39
- XFormersAttnProcessor,
40
- )
41
  from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
42
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
43
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
@@ -1008,23 +1002,8 @@ class KolorsInpaintPipeline(
1008
 
1009
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1010
  def upcast_vae(self):
1011
- dtype = self.vae.dtype
1012
  self.vae.to(dtype=torch.float32)
1013
- use_torch_2_0_or_xformers = isinstance(
1014
- self.vae.decoder.mid_block.attentions[0].processor,
1015
- (
1016
- AttnProcessor2_0,
1017
- XFormersAttnProcessor,
1018
- LoRAXFormersAttnProcessor,
1019
- LoRAAttnProcessor2_0,
1020
- ),
1021
- )
1022
- # if xformers or torch_2_0 is used attention block does not need
1023
- # to be in float32 which can save lots of memory
1024
- if use_torch_2_0_or_xformers:
1025
- self.vae.post_quant_conv.to(dtype)
1026
- self.vae.decoder.conv_in.to(dtype)
1027
- self.vae.decoder.mid_block.to(dtype)
1028
 
1029
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1030
  def get_guidance_scale_embedding(
 
32
  TextualInversionLoaderMixin,
33
  )
34
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
 
 
 
 
 
 
35
  from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
36
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
37
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
 
1002
 
1003
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1004
  def upcast_vae(self):
1005
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
1006
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1007
 
1008
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1009
  def get_guidance_scale_embedding(
main/pipeline_sdxl_style_aligned.py CHANGED
@@ -45,8 +45,6 @@ from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionMode
45
  from diffusers.models.attention_processor import (
46
  Attention,
47
  AttnProcessor2_0,
48
- FusedAttnProcessor2_0,
49
- XFormersAttnProcessor,
50
  )
51
  from diffusers.models.lora import adjust_lora_scale_text_encoder
52
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
@@ -1151,22 +1149,8 @@ class StyleAlignedSDXLPipeline(
1151
  return add_time_ids
1152
 
1153
  def upcast_vae(self):
1154
- dtype = self.vae.dtype
1155
  self.vae.to(dtype=torch.float32)
1156
- use_torch_2_0_or_xformers = isinstance(
1157
- self.vae.decoder.mid_block.attentions[0].processor,
1158
- (
1159
- AttnProcessor2_0,
1160
- XFormersAttnProcessor,
1161
- FusedAttnProcessor2_0,
1162
- ),
1163
- )
1164
- # if xformers or torch_2_0 is used attention block does not need
1165
- # to be in float32 which can save lots of memory
1166
- if use_torch_2_0_or_xformers:
1167
- self.vae.post_quant_conv.to(dtype)
1168
- self.vae.decoder.conv_in.to(dtype)
1169
- self.vae.decoder.mid_block.to(dtype)
1170
 
1171
  def _enable_shared_attention_processors(
1172
  self,
 
45
  from diffusers.models.attention_processor import (
46
  Attention,
47
  AttnProcessor2_0,
 
 
48
  )
49
  from diffusers.models.lora import adjust_lora_scale_text_encoder
50
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
 
1149
  return add_time_ids
1150
 
1151
  def upcast_vae(self):
1152
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
1153
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1154
 
1155
  def _enable_shared_attention_processors(
1156
  self,
main/pipeline_stable_diffusion_upscale_ldm3d.py CHANGED
@@ -503,24 +503,9 @@ class StableDiffusionUpscaleLDM3DPipeline(
503
  latents = latents * self.scheduler.init_noise_sigma
504
  return latents
505
 
506
- # def upcast_vae(self):
507
- # dtype = self.vae.dtype
508
- # self.vae.to(dtype=torch.float32)
509
- # use_torch_2_0_or_xformers = isinstance(
510
- # self.vae.decoder.mid_block.attentions[0].processor,
511
- # (
512
- # AttnProcessor2_0,
513
- # XFormersAttnProcessor,
514
- # LoRAXFormersAttnProcessor,
515
- # LoRAAttnProcessor2_0,
516
- # ),
517
- # )
518
- # # if xformers or torch_2_0 is used attention block does not need
519
- # # to be in float32 which can save lots of memory
520
- # if use_torch_2_0_or_xformers:
521
- # self.vae.post_quant_conv.to(dtype)
522
- # self.vae.decoder.conv_in.to(dtype)
523
- # self.vae.decoder.mid_block.to(dtype)
524
 
525
  @torch.no_grad()
526
  def __call__(
 
503
  latents = latents * self.scheduler.init_noise_sigma
504
  return latents
505
 
506
+ def upcast_vae(self):
507
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
508
+ self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
509
 
510
  @torch.no_grad()
511
  def __call__(
main/pipeline_stable_diffusion_xl_attentive_eraser.py CHANGED
@@ -35,12 +35,6 @@ from diffusers.loaders import (
35
  TextualInversionLoaderMixin,
36
  )
37
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
38
- from diffusers.models.attention_processor import (
39
- AttnProcessor2_0,
40
- LoRAAttnProcessor2_0,
41
- LoRAXFormersAttnProcessor,
42
- XFormersAttnProcessor,
43
- )
44
  from diffusers.models.lora import adjust_lora_scale_text_encoder
45
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
46
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
@@ -1282,23 +1276,8 @@ class StableDiffusionXL_AE_Pipeline(
1282
 
1283
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1284
  def upcast_vae(self):
1285
- dtype = self.vae.dtype
1286
  self.vae.to(dtype=torch.float32)
1287
- use_torch_2_0_or_xformers = isinstance(
1288
- self.vae.decoder.mid_block.attentions[0].processor,
1289
- (
1290
- AttnProcessor2_0,
1291
- XFormersAttnProcessor,
1292
- LoRAXFormersAttnProcessor,
1293
- LoRAAttnProcessor2_0,
1294
- ),
1295
- )
1296
- # if xformers or torch_2_0 is used attention block does not need
1297
- # to be in float32 which can save lots of memory
1298
- if use_torch_2_0_or_xformers:
1299
- self.vae.post_quant_conv.to(dtype)
1300
- self.vae.decoder.conv_in.to(dtype)
1301
- self.vae.decoder.mid_block.to(dtype)
1302
 
1303
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1304
  def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
 
35
  TextualInversionLoaderMixin,
36
  )
37
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
 
 
 
 
 
 
38
  from diffusers.models.lora import adjust_lora_scale_text_encoder
39
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
40
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
 
1276
 
1277
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1278
  def upcast_vae(self):
1279
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
1280
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1281
 
1282
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1283
  def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
main/pipeline_stable_diffusion_xl_controlnet_adapter.py CHANGED
@@ -25,7 +25,6 @@ from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokeniz
25
  from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
26
  from diffusers.loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
27
  from diffusers.models import AutoencoderKL, ControlNetModel, MultiAdapter, T2IAdapter, UNet2DConditionModel
28
- from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
29
  from diffusers.models.lora import adjust_lora_scale_text_encoder
30
  from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
31
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
@@ -34,6 +33,7 @@ from diffusers.schedulers import KarrasDiffusionSchedulers
34
  from diffusers.utils import (
35
  PIL_INTERPOLATION,
36
  USE_PEFT_BACKEND,
 
37
  logging,
38
  replace_example_docstring,
39
  scale_lora_layers,
@@ -793,20 +793,9 @@ class StableDiffusionXLControlNetAdapterPipeline(
793
  add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
794
  return add_time_ids
795
 
796
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
797
  def upcast_vae(self):
798
- dtype = self.vae.dtype
799
  self.vae.to(dtype=torch.float32)
800
- use_torch_2_0_or_xformers = isinstance(
801
- self.vae.decoder.mid_block.attentions[0].processor,
802
- (AttnProcessor2_0, XFormersAttnProcessor),
803
- )
804
- # if xformers or torch_2_0 is used attention block does not need
805
- # to be in float32 which can save lots of memory
806
- if use_torch_2_0_or_xformers:
807
- self.vae.post_quant_conv.to(dtype)
808
- self.vae.decoder.conv_in.to(dtype)
809
- self.vae.decoder.mid_block.to(dtype)
810
 
811
  # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width
812
  def _default_height_width(self, height, width, image):
 
25
  from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
26
  from diffusers.loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
27
  from diffusers.models import AutoencoderKL, ControlNetModel, MultiAdapter, T2IAdapter, UNet2DConditionModel
 
28
  from diffusers.models.lora import adjust_lora_scale_text_encoder
29
  from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
30
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
 
33
  from diffusers.utils import (
34
  PIL_INTERPOLATION,
35
  USE_PEFT_BACKEND,
36
+ deprecate,
37
  logging,
38
  replace_example_docstring,
39
  scale_lora_layers,
 
793
  add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
794
  return add_time_ids
795
 
 
796
  def upcast_vae(self):
797
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
798
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
799
 
800
  # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width
801
  def _default_height_width(self, height, width, image):
main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py CHANGED
@@ -43,7 +43,6 @@ from diffusers.models import (
43
  T2IAdapter,
44
  UNet2DConditionModel,
45
  )
46
- from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
47
  from diffusers.models.lora import adjust_lora_scale_text_encoder
48
  from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
49
  from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
@@ -52,6 +51,7 @@ from diffusers.schedulers import KarrasDiffusionSchedulers
52
  from diffusers.utils import (
53
  PIL_INTERPOLATION,
54
  USE_PEFT_BACKEND,
 
55
  logging,
56
  replace_example_docstring,
57
  scale_lora_layers,
@@ -1130,20 +1130,9 @@ class StableDiffusionXLControlNetAdapterInpaintPipeline(
1130
 
1131
  return add_time_ids, add_neg_time_ids
1132
 
1133
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1134
  def upcast_vae(self):
1135
- dtype = self.vae.dtype
1136
  self.vae.to(dtype=torch.float32)
1137
- use_torch_2_0_or_xformers = isinstance(
1138
- self.vae.decoder.mid_block.attentions[0].processor,
1139
- (AttnProcessor2_0, XFormersAttnProcessor),
1140
- )
1141
- # if xformers or torch_2_0 is used attention block does not need
1142
- # to be in float32 which can save lots of memory
1143
- if use_torch_2_0_or_xformers:
1144
- self.vae.post_quant_conv.to(dtype)
1145
- self.vae.decoder.conv_in.to(dtype)
1146
- self.vae.decoder.mid_block.to(dtype)
1147
 
1148
  # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width
1149
  def _default_height_width(self, height, width, image):
 
43
  T2IAdapter,
44
  UNet2DConditionModel,
45
  )
 
46
  from diffusers.models.lora import adjust_lora_scale_text_encoder
47
  from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
48
  from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
 
51
  from diffusers.utils import (
52
  PIL_INTERPOLATION,
53
  USE_PEFT_BACKEND,
54
+ deprecate,
55
  logging,
56
  replace_example_docstring,
57
  scale_lora_layers,
 
1130
 
1131
  return add_time_ids, add_neg_time_ids
1132
 
 
1133
  def upcast_vae(self):
1134
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
1135
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
1136
 
1137
  # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width
1138
  def _default_height_width(self, height, width, image):
main/pipeline_stable_diffusion_xl_differential_img2img.py CHANGED
@@ -35,10 +35,6 @@ from diffusers.loaders import (
35
  TextualInversionLoaderMixin,
36
  )
37
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
38
- from diffusers.models.attention_processor import (
39
- AttnProcessor2_0,
40
- XFormersAttnProcessor,
41
- )
42
  from diffusers.models.lora import adjust_lora_scale_text_encoder
43
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
44
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
@@ -848,21 +844,8 @@ class StableDiffusionXLDifferentialImg2ImgPipeline(
848
 
849
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
850
  def upcast_vae(self):
851
- dtype = self.vae.dtype
852
  self.vae.to(dtype=torch.float32)
853
- use_torch_2_0_or_xformers = isinstance(
854
- self.vae.decoder.mid_block.attentions[0].processor,
855
- (
856
- AttnProcessor2_0,
857
- XFormersAttnProcessor,
858
- ),
859
- )
860
- # if xformers or torch_2_0 is used attention block does not need
861
- # to be in float32 which can save lots of memory
862
- if use_torch_2_0_or_xformers:
863
- self.vae.post_quant_conv.to(dtype)
864
- self.vae.decoder.conv_in.to(dtype)
865
- self.vae.decoder.mid_block.to(dtype)
866
 
867
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
868
  def get_guidance_scale_embedding(
 
35
  TextualInversionLoaderMixin,
36
  )
37
  from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
 
 
 
 
38
  from diffusers.models.lora import adjust_lora_scale_text_encoder
39
  from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
40
  from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
 
844
 
845
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
846
  def upcast_vae(self):
847
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
848
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
849
 
850
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
851
  def get_guidance_scale_embedding(
main/pipeline_stable_diffusion_xl_ipex.py CHANGED
@@ -32,10 +32,6 @@ from diffusers.loaders import (
32
  TextualInversionLoaderMixin,
33
  )
34
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
35
- from diffusers.models.attention_processor import (
36
- AttnProcessor2_0,
37
- XFormersAttnProcessor,
38
- )
39
  from diffusers.models.lora import adjust_lora_scale_text_encoder
40
  from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
41
  from diffusers.schedulers import KarrasDiffusionSchedulers
@@ -658,23 +654,9 @@ class StableDiffusionXLPipelineIpex(
658
  add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
659
  return add_time_ids
660
 
661
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
662
  def upcast_vae(self):
663
- dtype = self.vae.dtype
664
  self.vae.to(dtype=torch.float32)
665
- use_torch_2_0_or_xformers = isinstance(
666
- self.vae.decoder.mid_block.attentions[0].processor,
667
- (
668
- AttnProcessor2_0,
669
- XFormersAttnProcessor,
670
- ),
671
- )
672
- # if xformers or torch_2_0 is used attention block does not need
673
- # to be in float32 which can save lots of memory
674
- if use_torch_2_0_or_xformers:
675
- self.vae.post_quant_conv.to(dtype)
676
- self.vae.decoder.conv_in.to(dtype)
677
- self.vae.decoder.mid_block.to(dtype)
678
 
679
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
680
  def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
 
32
  TextualInversionLoaderMixin,
33
  )
34
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
 
 
 
 
35
  from diffusers.models.lora import adjust_lora_scale_text_encoder
36
  from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
37
  from diffusers.schedulers import KarrasDiffusionSchedulers
 
654
  add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
655
  return add_time_ids
656
 
 
657
  def upcast_vae(self):
658
+ deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
659
  self.vae.to(dtype=torch.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
660
 
661
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
662
  def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):