You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
File "/Users/weiwei/ComfyUI/execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "/Users/weiwei/ComfyUI/execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "/Users/weiwei/ComfyUI/execution.py", line 74, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
File "/Users/weiwei/ComfyUI/custom_nodes/ComfyUI_omost/omost_nodes.py", line 99, in load_llm
llm_model = AutoModelForCausalLM.from_pretrained(
File "/Users/weiwei/Envs/comfyui/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py", line 563, in from_pretrained
return model_class.from_pretrained(
File "/Users/weiwei/Envs/comfyui/lib/python3.10/site-packages/transformers/modeling_utils.py", line 3202, in from_pretrained
hf_quantizer.validate_environment(
File "/Users/weiwei/Envs/comfyui/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_4bit.py", line 62, in validate_environment
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
The text was updated successfully, but these errors were encountered:
Error occurred when executing OmostLLMLoaderNode:
No GPU found. A GPU is needed for quantization.
File "/Users/weiwei/ComfyUI/execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "/Users/weiwei/ComfyUI/execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "/Users/weiwei/ComfyUI/execution.py", line 74, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
File "/Users/weiwei/ComfyUI/custom_nodes/ComfyUI_omost/omost_nodes.py", line 99, in load_llm
llm_model = AutoModelForCausalLM.from_pretrained(
File "/Users/weiwei/Envs/comfyui/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py", line 563, in from_pretrained
return model_class.from_pretrained(
File "/Users/weiwei/Envs/comfyui/lib/python3.10/site-packages/transformers/modeling_utils.py", line 3202, in from_pretrained
hf_quantizer.validate_environment(
File "/Users/weiwei/Envs/comfyui/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_4bit.py", line 62, in validate_environment
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
The text was updated successfully, but these errors were encountered: