You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hello, when i run the scripts runs/cls_train.py, i met the problems , can you help me solve it out? thanks very much!
Traceback (most recent call last):
File "", line 1, in
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
Exception in thread Thread-2:
Traceback (most recent call last):
File "D:\Anaconda3\envs\tensorflow_gpu\lib\threading.py", line 916, in _bootstrap_inner
self.run()
File "D:\Anaconda3\envs\tensorflow_gpu\lib\threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "D:\Anaconda3\envs\tensorflow_gpu\lib\site-packages\keras\utils\data_utils.py", line 548, in _run
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
File "D:\Anaconda3\envs\tensorflow_gpu\lib\site-packages\keras\utils\data_utils.py", line 522, in
initargs=(seqs,))
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\context.py", line 119, in Pool
context=self.get_context())
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\pool.py", line 174, in init
self._repopulate_pool()
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\pool.py", line 239, in _repopulate_pool
w.start()
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\popen_spawn_win32.py", line 65, in init
reduction.dump(process_obj, to_child)
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
OverflowError: cannot serialize a bytes object larger than 4 GiB
The text was updated successfully, but these errors were encountered:
Hello, when i run the scripts runs/cls_train.py, i met the problems , can you help me solve it out? thanks very much!
Traceback (most recent call last):
File "", line 1, in
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
Exception in thread Thread-2:
Traceback (most recent call last):
File "D:\Anaconda3\envs\tensorflow_gpu\lib\threading.py", line 916, in _bootstrap_inner
self.run()
File "D:\Anaconda3\envs\tensorflow_gpu\lib\threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "D:\Anaconda3\envs\tensorflow_gpu\lib\site-packages\keras\utils\data_utils.py", line 548, in _run
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
File "D:\Anaconda3\envs\tensorflow_gpu\lib\site-packages\keras\utils\data_utils.py", line 522, in
initargs=(seqs,))
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\context.py", line 119, in Pool
context=self.get_context())
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\pool.py", line 174, in init
self._repopulate_pool()
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\pool.py", line 239, in _repopulate_pool
w.start()
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\popen_spawn_win32.py", line 65, in init
reduction.dump(process_obj, to_child)
File "D:\Anaconda3\envs\tensorflow_gpu\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
OverflowError: cannot serialize a bytes object larger than 4 GiB
The text was updated successfully, but these errors were encountered: