Components: spancat
Merging training and evaluation data for 1 components
- [spancat] Training: 132 | Evaluation: 33 (20% split)
Training: 132 | Evaluation: 33
Labels: spancat (5)
Pipeline: ['tok2vec', 'spancat']
Initial learn rate: 0.001
E # LOSS TOK2VEC LOSS SPANCAT SPANS_SC_F SPANS_SC_P SPANS_SC_R SCORE
Aborting and saving the final best model. Encountered exception:
OutOfMemoryError('Out of memory allocating 1,876,703,232 bytes (allocated so
far: 6,663,892,992 bytes).')
Traceback (most recent call last):
File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/harsh/.local/lib/python3.8/site-packages/prodigy/main.py", line 61, in
controller = recipe(args, use_plac=True)
File "cython_src/prodigy/core.pyx", line 329, in prodigy.core.recipe.recipe_decorator.recipe_proxy
File "/home/harsh/.local/lib/python3.8/site-packages/plac_core.py", line 367, in call
cmd, result = parser.consume(arglist)
File "/home/harsh/.local/lib/python3.8/site-packages/plac_core.py", line 232, in consume
return cmd, self.func((args + varargs + extraopts), **kwargs)
File "/home/harsh/.local/lib/python3.8/site-packages/prodigy/recipes/train.py", line 278, in train
return _train(
File "/home/harsh/.local/lib/python3.8/site-packages/prodigy/recipes/train.py", line 198, in _train
spacy_train(nlp, output_path, use_gpu=gpu_id, stdout=stdout)
File "/home/harsh/.local/lib/python3.8/site-packages/spacy/training/loop.py", line 122, in train
raise e
File "/home/harsh/.local/lib/python3.8/site-packages/spacy/training/loop.py", line 105, in train
for batch, info, is_best_checkpoint in training_step_iterator:
File "/home/harsh/.local/lib/python3.8/site-packages/spacy/training/loop.py", line 203, in train_while_improving
nlp.update(
File "/home/harsh/.local/lib/python3.8/site-packages/spacy/language.py", line 1164, in update
proc.update(examples, sgd=None, losses=losses, **component_cfg[name]) # type: ignore
File "/home/harsh/.local/lib/python3.8/site-packages/spacy/pipeline/spancat.py", line 346, in update
backprop_scores(d_scores) # type: ignore
File "/home/harsh/.local/lib/python3.8/site-packages/thinc/layers/chain.py", line 60, in backprop
dX = callback(dY)
File "/home/harsh/.local/lib/python3.8/site-packages/thinc/layers/chain.py", line 60, in backprop
dX = callback(dY)
File "/home/harsh/.local/lib/python3.8/site-packages/thinc/layers/concatenate.py", line 67, in backprop
gradient = bwd(dY)
File "/home/harsh/.local/lib/python3.8/site-packages/thinc/layers/reduce_mean.py", line 26, in backprop
return Ragged(model.ops.backprop_reduce_mean(dY, lengths), lengths)
File "/home/harsh/.local/lib/python3.8/site-packages/thinc/backends/cupy_ops.py", line 235, in backprop_reduce_mean
return _custom_kernels.backprop_reduce_mean(d_means, lengths)
File "/home/harsh/.local/lib/python3.8/site-packages/thinc/backends/_custom_kernels.py", line 318, in backprop_reduce_mean
out = cupy.zeros((T, O), dtype="f")
File "/home/harsh/.local/lib/python3.8/site-packages/cupy/_creation/basic.py", line 211, in zeros
a = cupy.ndarray(shape, dtype, order=order)
File "cupy/_core/core.pyx", line 171, in cupy._core.core.ndarray.init
File "cupy/cuda/memory.pyx", line 698, in cupy.cuda.memory.alloc
File "cupy/cuda/memory.pyx", line 1375, in cupy.cuda.memory.MemoryPool.malloc
File "cupy/cuda/memory.pyx", line 1396, in cupy.cuda.memory.MemoryPool.malloc
File "cupy/cuda/memory.pyx", line 1076, in cupy.cuda.memory.SingleDeviceMemoryPool.malloc
File "cupy/cuda/memory.pyx", line 1097, in cupy.cuda.memory.SingleDeviceMemoryPool._malloc
File "cupy/cuda/memory.pyx", line 1335, in cupy.cuda.memory.SingleDeviceMemoryPool._try_malloc
cupy.cuda.memory.OutOfMemoryError: Out of memory allocating 1,876,703,232 bytes (allocated so far: 6,663,892,992 bytes).