Unload to CPU: VideoAutoencoderKL
Load to GPU: AutoencoderKL
Unload to CPU: AutoencoderKL
Load to GPU: CLIPTextModel
Unload to CPU: CLIPTextModel
Load to GPU: ModifiedUNet
Unload to CPU: ModifiedUNet
Load to GPU: AutoencoderKL
Unload to CPU: AutoencoderKL
Load to GPU: CLIPTextModel
Unload to CPU: CLIPTextModel
Load to GPU: Resampler
Load to GPU: ImprovedCLIPVisionModelWithProjection
Unload to CPU: Resampler
Unload to CPU: ImprovedCLIPVisionModelWithProjection
Load to GPU: VideoAutoencoderKL
Traceback (most recent call last):
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\gradio\queueing.py", line 528, in process_events
response = await route_utils.call_process_api(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\gradio\route_utils.py", line 270, in call_process_api
output = await app.get_blocks().process_api(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\gradio\blocks.py", line 1908, in process_api
result = await self.call_function(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\gradio\blocks.py", line 1485, in call_function
prediction = await anyio.to_thread.run_sync(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\anyio_backends_asyncio.py", line 2177, in run_sync_in_worker_thread
return await future
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\anyio_backends_asyncio.py", line 859, in run
result = context.run(func, *args)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\gradio\utils.py", line 808, in wrapper
response = f(*args, **kwargs)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\UNDO\Paints-UNDO\gradio_app.py", line 222, in process_video
frames, im1, im2 = process_video_inner(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\UNDO\Paints-UNDO\gradio_app.py", line 190, in process_video_inner
input_frame_latents, vae_hidden_states = video_pipe.encode_latents(input_frames, return_hidden_states=True)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\UNDO\Paints-UNDO\diffusers_vdm\pipeline.py", line 102, in encode_latents
encoder_posterior, hidden_states = self.vae.encode(x, return_hidden_states=return_hidden_states)
File "E:\UNDO\Paints-UNDO\diffusers_vdm\vae.py", line 804, in encode
h, hidden = self.encoder(x, return_hidden_states)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "E:\UNDO\Paints-UNDO\diffusers_vdm\vae.py", line 249, in forward
h = self.mid.attn_1(h)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\nn\modules\module.py", line 1532, in wrapped_call_impl
return self.call_impl(*args, **kwargs)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\nn\modules\module.py", line 1541, in call_impl
return forward_call(*args, **kwargs)
File "E:\UNDO\Paints-UNDO\diffusers_vdm\vae.py", line 411, in forward
h = self.attention(h)
File "E:\UNDO\Paints-UNDO\diffusers_vdm\vae.py", line 397, in attention
out = chunked_attention(
File "E:\UNDO\Paints-UNDO\diffusers_vdm\vae.py", line 36, in chunked_attention
out = xformers.ops.memory_efficient_attention(q, k, v)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\xformers\ops\fmha_init.py", line 276, in memory_efficient_attention
return memory_efficient_attention(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\xformers\ops\fmha_init.py", line 395, in _memory_efficient_attention
return memory_efficient_attention_forward(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\xformers\ops\fmha_init.py", line 414, in _memory_efficient_attention_forward
op = _dispatch_fw(inp, False)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\xformers\ops\fmha\dispatch.py", line 119, in _dispatch_fw
return _run_priority_list(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\xformers\ops\fmha\dispatch.py", line 55, in _run_priority_list
raise NotImplementedError(msg)
NotImplementedError: No operator found for memory_efficient_attention_forward
with inputs:
query : shape=(2, 2688, 1, 512) (torch.float16)
key : shape=(2, 2688, 1, 512) (torch.float16)
value : shape=(2, 2688, 1, 512) (torch.float16)
attn_bias : <class 'NoneType'>
p : 0.0
decoderF
is not supported because:
max(query.shape[-1] != value.shape[-1]) > 128
xFormers wasn't build with CUDA support
attn_bias type is <class 'NoneType'>
operator wasn't built - see python -m xformers.info
for more info
[email protected]
is not supported because:
max(query.shape[-1] != value.shape[-1]) > 256
xFormers wasn't build with CUDA support
operator wasn't built - see python -m xformers.info
for more info
cutlassF
is not supported because:
xFormers wasn't build with CUDA support
operator wasn't built - see python -m xformers.info
for more info
smallkF
is not supported because:
max(query.shape[-1] != value.shape[-1]) > 32
xFormers wasn't build with CUDA support
dtype=torch.float16 (supported: {torch.float32})
operator wasn't built - see python -m xformers.info
for more info
unsupported embed per head: 512
Unload to CPU: VideoAutoencoderKL
Load to GPU: CLIPTextModel
Unload to CPU: CLIPTextModel
Load to GPU: Resampler
Load to GPU: ImprovedCLIPVisionModelWithProjection
Unload to CPU: Resampler
Unload to CPU: ImprovedCLIPVisionModelWithProjection
Load to GPU: VideoAutoencoderKL
Traceback (most recent call last):
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\gradio\queueing.py", line 528, in process_events
response = await route_utils.call_process_api(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\gradio\route_utils.py", line 270, in call_process_api
output = await app.get_blocks().process_api(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\gradio\blocks.py", line 1908, in process_api
result = await self.call_function(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\gradio\blocks.py", line 1485, in call_function
prediction = await anyio.to_thread.run_sync(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\anyio_backends_asyncio.py", line 2177, in run_sync_in_worker_thread
return await future
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\anyio_backends_asyncio.py", line 859, in run
result = context.run(func, *args)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\gradio\utils.py", line 808, in wrapper
response = f(*args, **kwargs)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\UNDO\Paints-UNDO\gradio_app.py", line 222, in process_video
frames, im1, im2 = process_video_inner(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\UNDO\Paints-UNDO\gradio_app.py", line 190, in process_video_inner
input_frame_latents, vae_hidden_states = video_pipe.encode_latents(input_frames, return_hidden_states=True)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\UNDO\Paints-UNDO\diffusers_vdm\pipeline.py", line 102, in encode_latents
encoder_posterior, hidden_states = self.vae.encode(x, return_hidden_states=return_hidden_states)
File "E:\UNDO\Paints-UNDO\diffusers_vdm\vae.py", line 804, in encode
h, hidden = self.encoder(x, return_hidden_states)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "E:\UNDO\Paints-UNDO\diffusers_vdm\vae.py", line 249, in forward
h = self.mid.attn_1(h)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\nn\modules\module.py", line 1532, in wrapped_call_impl
return self.call_impl(*args, **kwargs)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\torch\nn\modules\module.py", line 1541, in call_impl
return forward_call(*args, **kwargs)
File "E:\UNDO\Paints-UNDO\diffusers_vdm\vae.py", line 411, in forward
h = self.attention(h)
File "E:\UNDO\Paints-UNDO\diffusers_vdm\vae.py", line 397, in attention
out = chunked_attention(
File "E:\UNDO\Paints-UNDO\diffusers_vdm\vae.py", line 36, in chunked_attention
out = xformers.ops.memory_efficient_attention(q, k, v)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\xformers\ops\fmha_init.py", line 276, in memory_efficient_attention
return memory_efficient_attention(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\xformers\ops\fmha_init.py", line 395, in _memory_efficient_attention
return memory_efficient_attention_forward(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\xformers\ops\fmha_init.py", line 414, in _memory_efficient_attention_forward
op = _dispatch_fw(inp, False)
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\xformers\ops\fmha\dispatch.py", line 119, in _dispatch_fw
return _run_priority_list(
File "C:\Users\DM.conda\envs\paints_undo\lib\site-packages\xformers\ops\fmha\dispatch.py", line 55, in _run_priority_list
raise NotImplementedError(msg)
NotImplementedError: No operator found for memory_efficient_attention_forward
with inputs:
query : shape=(2, 2688, 1, 512) (torch.float16)
key : shape=(2, 2688, 1, 512) (torch.float16)
value : shape=(2, 2688, 1, 512) (torch.float16)
attn_bias : <class 'NoneType'>
p : 0.0
decoderF
is not supported because:
max(query.shape[-1] != value.shape[-1]) > 128
xFormers wasn't build with CUDA support
attn_bias type is <class 'NoneType'>
operator wasn't built - see python -m xformers.info
for more info
[email protected]
is not supported because:
max(query.shape[-1] != value.shape[-1]) > 256
xFormers wasn't build with CUDA support
operator wasn't built - see python -m xformers.info
for more info
cutlassF
is not supported because:
xFormers wasn't build with CUDA support
operator wasn't built - see python -m xformers.info
for more info
smallkF
is not supported because:
max(query.shape[-1] != value.shape[-1]) > 32
xFormers wasn't build with CUDA support
dtype=torch.float16 (supported: {torch.float32})
operator wasn't built - see python -m xformers.info
for more info
unsupported embed per head: 512
13th Gen Intel(R) Core(TM) i7-13700K 3.40 GHz
RTX 4090