|
发表于 2024-3-29 22:15:41
|
显示全部楼层
请问各位大神出了三张图跳出来这个是什么原因啊、也不工作了
Error occurred when executing KSampler:
Query/Key/Value should either all have the same dtype, or (in the quantized case) Key/Value should have dtype torch.int32
query.dtype: torch.float16
key.dtype : torch.float32
value.dtype: torch.float32
File "D:\ComfyUI\execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "D:\ComfyUI\execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "D:\ComfyUI\execution.py", line 74, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
File "D:\ComfyUI\nodes.py", line 1369, in sample
return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
File "D:\ComfyUI\nodes.py", line 1339, in common_ksampler
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
File "D:\ComfyUI\custom_nodes\Comfyui-StableSR\nodes.py", line 75, in hook_sample
return original_sample(*args, **kwargs)
File "D:\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 9, in informative_sample
return original_sample(*args, **kwargs) # This code helps interpret error messages that occur within exceptions but does not have any impact on other operations.
File "D:\ComfyUI\comfy\sample.py", line 100, in sample
samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "D:\ComfyUI\comfy\samplers.py", line 705, in sample
return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "D:\ComfyUI\comfy\samplers.py", line 610, in sample
samples = sampler.sample(model_wrap, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar)
File "D:\ComfyUI\comfy\samplers.py", line 548, in sample
samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options)
File "D:\ComfyUI\python\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\ComfyUI\comfy\k_diffusion\sampling.py", line 154, in sample_euler_ancestral
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "D:\ComfyUI\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ComfyUI\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI\comfy\samplers.py", line 286, in forward
out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, model_options=model_options, seed=seed)
File "D:\ComfyUI\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ComfyUI\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI\comfy\samplers.py", line 273, in forward
return self.apply_model(*args, **kwargs)
File "D:\ComfyUI\comfy\samplers.py", line 270, in apply_model
out = sampling_function(self.inner_model, x, timestep, uncond, cond, cond_scale, model_options=model_options, seed=seed)
File "D:\ComfyUI\comfy\samplers.py", line 250, in sampling_function
cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond_, x, timestep, model_options)
File "D:\ComfyUI\comfy\samplers.py", line 224, in calc_cond_uncond_batch
output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks)
File "D:\ComfyUI\comfy\model_base.py", line 96, in apply_model
model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
File "D:\ComfyUI\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ComfyUI\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 850, in forward
h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator)
File "D:\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 44, in forward_timestep_embed
x = layer(x, context, transformer_options)
File "D:\ComfyUI\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ComfyUI\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI\comfy\ldm\modules\attention.py", line 633, in forward
x = block(x, context=context[i], transformer_options=transformer_options)
File "D:\ComfyUI\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ComfyUI\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI\comfy\ldm\modules\attention.py", line 460, in forward
return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
File "D:\ComfyUI\comfy\ldm\modules\diffusionmodules\util.py", line 191, in checkpoint
return func(*inputs)
File "D:\ComfyUI\comfy\ldm\modules\attention.py", line 557, in _forward
n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options)
File "D:\ComfyUI\custom_nodes\ComfyUI_InstantID\CrossAttentionPatch.py", line 124, in __call__
out_ip = optimized_attention(q, ip_k, ip_v, extra_options["n_heads"])
File "D:\ComfyUI\comfy\ldm\modules\attention.py", line 327, in attention_xformers
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=mask)
File "D:\ComfyUI\python\lib\site-packages\xformers\ops\fmha\__init__.py", line 223, in memory_efficient_attention
return _memory_efficient_attention(
File "D:\ComfyUI\python\lib\site-packages\xformers\ops\fmha\__init__.py", line 321, in _memory_efficient_attention
return _memory_efficient_attention_forward(
File "D:\ComfyUI\python\lib\site-packages\xformers\ops\fmha\__init__.py", line 334, in _memory_efficient_attention_forward
inp.validate_inputs()
File "D:\ComfyUI\python\lib\site-packages\xformers\ops\fmha\common.py", line 121, in validate_inputs
raise ValueError( |
|