Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -45,18 +45,13 @@ from torchvision.transforms import InterpolationMode
|
|
| 45 |
|
| 46 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 47 |
|
| 48 |
-
# vae = AutoencoderKL.from_pretrained('.', subfolder='vae')
|
| 49 |
-
# scheduler = DDIMScheduler.from_pretrained('.', subfolder='scheduler')
|
| 50 |
-
# image_encoder = CLIPVisionModelWithProjection.from_pretrained('.', subfolder="image_encoder")
|
| 51 |
-
# feature_extractor = CLIPImageProcessor.from_pretrained('.', subfolder="feature_extractor")
|
| 52 |
-
|
| 53 |
stable_diffusion_repo_path = "stabilityai/stable-diffusion-2-1-unclip"
|
| 54 |
vae = AutoencoderKL.from_pretrained(stable_diffusion_repo_path, subfolder='vae')
|
| 55 |
scheduler = DDIMScheduler.from_pretrained(stable_diffusion_repo_path, subfolder='scheduler')
|
| 56 |
sd_image_variations_diffusers_path = 'lambdalabs/sd-image-variations-diffusers'
|
| 57 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained(sd_image_variations_diffusers_path, subfolder="image_encoder")
|
| 58 |
feature_extractor = CLIPImageProcessor.from_pretrained(sd_image_variations_diffusers_path, subfolder="feature_extractor")
|
| 59 |
-
unet = UNet2DConditionModel.from_pretrained('.', subfolder="
|
| 60 |
|
| 61 |
pipe = DepthNormalEstimationPipeline(vae=vae,
|
| 62 |
image_encoder=image_encoder,
|
|
@@ -161,7 +156,7 @@ def run_demo():
|
|
| 161 |
value=10,
|
| 162 |
)
|
| 163 |
ensemble_size = gr.Slider(
|
| 164 |
-
label="Ensemble size (
|
| 165 |
minimum=1,
|
| 166 |
maximum=15,
|
| 167 |
step=1,
|
|
|
|
| 45 |
|
| 46 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
stable_diffusion_repo_path = "stabilityai/stable-diffusion-2-1-unclip"
|
| 49 |
vae = AutoencoderKL.from_pretrained(stable_diffusion_repo_path, subfolder='vae')
|
| 50 |
scheduler = DDIMScheduler.from_pretrained(stable_diffusion_repo_path, subfolder='scheduler')
|
| 51 |
sd_image_variations_diffusers_path = 'lambdalabs/sd-image-variations-diffusers'
|
| 52 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained(sd_image_variations_diffusers_path, subfolder="image_encoder")
|
| 53 |
feature_extractor = CLIPImageProcessor.from_pretrained(sd_image_variations_diffusers_path, subfolder="feature_extractor")
|
| 54 |
+
unet = UNet2DConditionModel.from_pretrained('.', subfolder="unet")
|
| 55 |
|
| 56 |
pipe = DepthNormalEstimationPipeline(vae=vae,
|
| 57 |
image_encoder=image_encoder,
|
|
|
|
| 156 |
value=10,
|
| 157 |
)
|
| 158 |
ensemble_size = gr.Slider(
|
| 159 |
+
label="Ensemble size (4 will be enough. More steps, higher accuracy)",
|
| 160 |
minimum=1,
|
| 161 |
maximum=15,
|
| 162 |
step=1,
|