You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Beforehand, Im sorry for my incompetence, i have never used github or an ai before.
The "Loading tensor models" stays at 0%.
I tried reinstalling and redownloading the model, didnt work.
here is the rest of the log:
Runtime launching in B: drive mode
INIT | Starting | Flask
INIT | OK | Flask
INIT | Starting | Webserver
INIT | Starting | LUA bridge
INIT | OK | LUA bridge
INIT | Starting | LUA Scripts
INIT | OK | LUA Scripts
INIT | OK | Webserver
MESSAGE | Webserver started! You may now connect with a browser at http://127.0.0.1:5000
INFO | main:do_connect:3544 - Client connected!
INIT | Searching | GPU support
INIT | Found | GPU support
INIT | Starting | Transformers
INIT | Info | Final device configuration:
DEVICE ID | LAYERS | DEVICE NAME
0 | 14 | NVIDIA GeForce RTX 3060 Laptop GPU
N/A | 0 | (Disk cache)
N/A | 14 | (CPU)
INIT | Loading model tensors: 0%| | 0/341 [02:08<?, ?it/s]
You are using a model of type gptj to instantiate a model of type gpt_neo. This is not supported for all configurations of models and can yield errors.
INFO | main:do_connect:3544 - Client connected!
INIT | Loading model tensors: 0%| | 0/341 [00:00<?, ?it/s]Exception in thread Thread-14:
Traceback (most recent call last):
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 399, in load_state_dict
return torch.load(checkpoint_file, map_location="cpu")
File "C:\KoboldAI\torch_lazy_loader.py", line 295, in torch_load
callback(retval, f=f, map_location=map_location, pickle_module=pickle_module, **pickle_load_args)
File "aiserver.py", line 2401, in lazy_load_callback
f = z.open(f"archive/data/{storage_key}")
File "B:\python\lib\zipfile.py", line 1514, in open
zinfo = self.getinfo(name)
File "B:\python\lib\zipfile.py", line 1441, in getinfo
raise KeyError(
KeyError: "There is no item named 'archive/data/0' in the archive"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 403, in load_state_dict
if f.read().startswith("version"):
File "B:\python\lib\encodings\cp1250.py", line 23, in decode
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
UnicodeDecodeError: 'charmap' codec can't decode byte 0x81 in position 1774: character maps to
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "aiserver.py", line 2604, in load_model
model = AutoModelForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", **lowmem)
File "B:\python\lib\site-packages\transformers\models\auto\auto_factory.py", line 463, in from_pretrained
return model_class.from_pretrained(
File "aiserver.py", line 1822, in new_from_pretrained
return old_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 2326, in from_pretrained
model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model(
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 2536, in _load_pretrained_model
state_dict = load_state_dict(shard_file)
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 415, in load_state_dict
raise OSError(
OSError: Unable to load weights from pytorch checkpoint file for 'cache\models--PygmalionAI--pygmalion-6b\snapshots\2a0d74449c8fbf0378194e95f64aa92e16297294\pytorch_model-00001-of-00002.bin' at 'cache\models--PygmalionAI--pygmalion-6b\snapshots\2a0d74449c8fbf0378194e95f64aa92e16297294\pytorch_model-00001-of-00002.bin'. If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 399, in load_state_dict
return torch.load(checkpoint_file, map_location="cpu")
File "C:\KoboldAI\torch_lazy_loader.py", line 295, in torch_load
callback(retval, f=f, map_location=map_location, pickle_module=pickle_module, **pickle_load_args)
File "aiserver.py", line 2401, in lazy_load_callback
f = z.open(f"archive/data/{storage_key}")
File "B:\python\lib\zipfile.py", line 1514, in open
zinfo = self.getinfo(name)
File "B:\python\lib\zipfile.py", line 1441, in getinfo
raise KeyError(
KeyError: "There is no item named 'archive/data/0' in the archive"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "B:\python\lib\threading.py", line 932, in _bootstrap_inner
self.run()
File "B:\python\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "B:\python\lib\site-packages\socketio\server.py", line 731, in _handle_event_internal
r = server._trigger_event(data[0], namespace, sid, *data[1:])
File "B:\python\lib\site-packages\socketio\server.py", line 756, in trigger_event
return self.handlers[namespace]event
File "B:\python\lib\site-packages\flask_socketio_init.py", line 282, in _handler
return self.handle_event(handler, message, namespace, sid,
File "B:\python\lib\site-packages\flask_socketio_init.py", line 826, in _handle_event
ret = handler(*args)
File "aiserver.py", line 466, in g
return f(*a, **k)
File "aiserver.py", line 3917, in get_message
load_model(use_gpu=msg['use_gpu'], gpu_layers=msg['gpu_layers'], disk_layers=msg['disk_layers'], online_model=msg['online_model'])
File "aiserver.py", line 2608, in load_model
model = GPTNeoForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", **lowmem)
File "aiserver.py", line 1822, in new_from_pretrained
return old_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 2326, in from_pretrained
model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model(
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 2536, in _load_pretrained_model
state_dict = load_state_dict(shard_file)
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 403, in load_state_dict
if f.read().startswith("version"):
File "B:\python\lib\encodings\cp1250.py", line 23, in decode
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
MemoryError
INIT | Closed | Webserver
INIT | Loading model tensors: 0%| | 0/341 [01:45<?, ?it/s]
Terminate batch job (Y/N)?
The text was updated successfully, but these errors were encountered:
Beforehand, Im sorry for my incompetence, i have never used github or an ai before.
The "Loading tensor models" stays at 0%.
I tried reinstalling and redownloading the model, didnt work.
here is the rest of the log:
Runtime launching in B: drive mode
INIT | Starting | Flask
INIT | OK | Flask
INIT | Starting | Webserver
INIT | Starting | LUA bridge
INIT | OK | LUA bridge
INIT | Starting | LUA Scripts
INIT | OK | LUA Scripts
INIT | OK | Webserver
MESSAGE | Webserver started! You may now connect with a browser at http://127.0.0.1:5000
INFO | main:do_connect:3544 - Client connected!
INIT | Searching | GPU support
INIT | Found | GPU support
INIT | Starting | Transformers
INIT | Info | Final device configuration:
DEVICE ID | LAYERS | DEVICE NAME
0 | 14 | NVIDIA GeForce RTX 3060 Laptop GPU
N/A | 0 | (Disk cache)
N/A | 14 | (CPU)
INIT | Loading model tensors: 0%| | 0/341 [02:08<?, ?it/s]
You are using a model of type gptj to instantiate a model of type gpt_neo. This is not supported for all configurations of models and can yield errors.
INFO | main:do_connect:3544 - Client connected!
INIT | Loading model tensors: 0%| | 0/341 [00:00<?, ?it/s]Exception in thread Thread-14:
Traceback (most recent call last):
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 399, in load_state_dict
return torch.load(checkpoint_file, map_location="cpu")
File "C:\KoboldAI\torch_lazy_loader.py", line 295, in torch_load
callback(retval, f=f, map_location=map_location, pickle_module=pickle_module, **pickle_load_args)
File "aiserver.py", line 2401, in lazy_load_callback
f = z.open(f"archive/data/{storage_key}")
File "B:\python\lib\zipfile.py", line 1514, in open
zinfo = self.getinfo(name)
File "B:\python\lib\zipfile.py", line 1441, in getinfo
raise KeyError(
KeyError: "There is no item named 'archive/data/0' in the archive"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 403, in load_state_dict
if f.read().startswith("version"):
File "B:\python\lib\encodings\cp1250.py", line 23, in decode
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
UnicodeDecodeError: 'charmap' codec can't decode byte 0x81 in position 1774: character maps to
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "aiserver.py", line 2604, in load_model
model = AutoModelForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", **lowmem)
File "B:\python\lib\site-packages\transformers\models\auto\auto_factory.py", line 463, in from_pretrained
return model_class.from_pretrained(
File "aiserver.py", line 1822, in new_from_pretrained
return old_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 2326, in from_pretrained
model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model(
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 2536, in _load_pretrained_model
state_dict = load_state_dict(shard_file)
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 415, in load_state_dict
raise OSError(
OSError: Unable to load weights from pytorch checkpoint file for 'cache\models--PygmalionAI--pygmalion-6b\snapshots\2a0d74449c8fbf0378194e95f64aa92e16297294\pytorch_model-00001-of-00002.bin' at 'cache\models--PygmalionAI--pygmalion-6b\snapshots\2a0d74449c8fbf0378194e95f64aa92e16297294\pytorch_model-00001-of-00002.bin'. If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 399, in load_state_dict
return torch.load(checkpoint_file, map_location="cpu")
File "C:\KoboldAI\torch_lazy_loader.py", line 295, in torch_load
callback(retval, f=f, map_location=map_location, pickle_module=pickle_module, **pickle_load_args)
File "aiserver.py", line 2401, in lazy_load_callback
f = z.open(f"archive/data/{storage_key}")
File "B:\python\lib\zipfile.py", line 1514, in open
zinfo = self.getinfo(name)
File "B:\python\lib\zipfile.py", line 1441, in getinfo
raise KeyError(
KeyError: "There is no item named 'archive/data/0' in the archive"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "B:\python\lib\threading.py", line 932, in _bootstrap_inner
self.run()
File "B:\python\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "B:\python\lib\site-packages\socketio\server.py", line 731, in _handle_event_internal
r = server._trigger_event(data[0], namespace, sid, *data[1:])
File "B:\python\lib\site-packages\socketio\server.py", line 756, in trigger_event
return self.handlers[namespace]event
File "B:\python\lib\site-packages\flask_socketio_init.py", line 282, in _handler
return self.handle_event(handler, message, namespace, sid,
File "B:\python\lib\site-packages\flask_socketio_init.py", line 826, in _handle_event
ret = handler(*args)
File "aiserver.py", line 466, in g
return f(*a, **k)
File "aiserver.py", line 3917, in get_message
load_model(use_gpu=msg['use_gpu'], gpu_layers=msg['gpu_layers'], disk_layers=msg['disk_layers'], online_model=msg['online_model'])
File "aiserver.py", line 2608, in load_model
model = GPTNeoForCausalLM.from_pretrained(vars.model, revision=vars.revision, cache_dir="cache", **lowmem)
File "aiserver.py", line 1822, in new_from_pretrained
return old_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 2326, in from_pretrained
model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model(
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 2536, in _load_pretrained_model
state_dict = load_state_dict(shard_file)
File "B:\python\lib\site-packages\transformers\modeling_utils.py", line 403, in load_state_dict
if f.read().startswith("version"):
File "B:\python\lib\encodings\cp1250.py", line 23, in decode
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
MemoryError
INIT | Closed | Webserver
INIT | Loading model tensors: 0%| | 0/341 [01:45<?, ?it/s]
Terminate batch job (Y/N)?
The text was updated successfully, but these errors were encountered: