Can we use Model trained on Spacy==2.0.18 with Spacy==2.1.3?

When i tried to load the spacy custom model it gave me a error.

nlp = spacy.load('/home/cloud/Spacy/Custom_model')

error                                     

Traceback (most recent call last)
in
----> 1 nlp = spacy.load(’/home/cloud/Spacy/Custom_model’)

~/miniconda3/envs/tensorflow_gpu/lib/python3.6/site-packages/spacy/__init__.py in load(name, **overrides)
     25     if depr_path not in (True, False, None):
     26         deprecation_warning(Warnings.W001.format(path=depr_path))
---> 27     return util.load_model(name, **overrides)
     28 
     29 

~/miniconda3/envs/tensorflow_gpu/lib/python3.6/site-packages/spacy/util.py in load_model(name, **overrides)
    131             return load_model_from_package(name, **overrides)
    132         if Path(name).exists():  # path to model data directory
--> 133             return load_model_from_path(Path(name), **overrides)
    134     elif hasattr(name, "exists"):  # Path or Path-like to model data
    135         return load_model_from_path(name, **overrides)

~/miniconda3/envs/tensorflow_gpu/lib/python3.6/site-packages/spacy/util.py in load_model_from_path(model_path, meta, **overrides)
    171             component = nlp.create_pipe(name, config=config)
    172             nlp.add_pipe(component, name=name)
--> 173     return nlp.from_disk(model_path)
    174 
    175 

~/miniconda3/envs/tensorflow_gpu/lib/python3.6/site-packages/spacy/language.py in from_disk(self, path, exclude, disable)
    784             # Convert to list here in case exclude is (default) tuple
    785             exclude = list(exclude) + ["vocab"]
--> 786         util.from_disk(path, deserializers, exclude)
    787         self._path = path
    788         return self

~/miniconda3/envs/tensorflow_gpu/lib/python3.6/site-packages/spacy/util.py in from_disk(path, readers, exclude)
    609         # Split to support file names like meta.json
    610         if key.split(".")[0] not in exclude:
--> 611             reader(path / key)
    612     return path
    613 

~/miniconda3/envs/tensorflow_gpu/lib/python3.6/site-packages/spacy/language.py in <lambda>(p)
    774         deserializers["meta.json"] = lambda p: self.meta.update(srsly.read_json(p))
    775         deserializers["vocab"] = lambda p: self.vocab.from_disk(p) and _fix_pretrained_vectors_name(self)
--> 776         deserializers["tokenizer"] = lambda p: self.tokenizer.from_disk(p, exclude=["vocab"])
    777         for name, proc in self.pipeline:
    778             if name in exclude:

tokenizer.pyx in spacy.tokenizer.Tokenizer.from_disk()

tokenizer.pyx in spacy.tokenizer.Tokenizer.from_bytes()

~/miniconda3/envs/tensorflow_gpu/lib/python3.6/re.py in compile(pattern, flags)
    231 def compile(pattern, flags=0):
    232     "Compile a regular expression pattern, returning a pattern object."
--> 233     return _compile(pattern, flags)
    234 
    235 def purge():

~/miniconda3/envs/tensorflow_gpu/lib/python3.6/re.py in _compile(pattern, flags)
    299     if not sre_compile.isstring(pattern):
    300         raise TypeError("first argument must be string or compiled pattern")
--> 301     p = sre_compile.compile(pattern, flags)
    302     if not (flags & DEBUG):
    303         if len(_cache) >= _MAXCACHE:

~/miniconda3/envs/tensorflow_gpu/lib/python3.6/sre_compile.py in compile(p, flags)
    560     if isstring(p):
    561         pattern = p
--> 562         p = sre_parse.parse(p, flags)
    563     else:
    564         pattern = None

~/miniconda3/envs/tensorflow_gpu/lib/python3.6/sre_parse.py in parse(str, flags, pattern)
    853 
    854     try:
--> 855         p = _parse_sub(source, pattern, flags & SRE_FLAG_VERBOSE, 0)
    856     except Verbose:
    857         # the VERBOSE flag was switched on inside the pattern.  to be

~/miniconda3/envs/tensorflow_gpu/lib/python3.6/sre_parse.py in _parse_sub(source, state, verbose, nested)
    414     while True:
    415         itemsappend(_parse(source, state, verbose, nested + 1,
--> 416                            not nested and not items))
    417         if not sourcematch("|"):
    418             break

~/miniconda3/envs/tensorflow_gpu/lib/python3.6/sre_parse.py in _parse(source, state, verbose, nested, first)
    525                     break
    526                 elif this[0] == "\\":
--> 527                     code1 = _class_escape(source, this)
    528                 else:
    529                     code1 = LITERAL, _ord(this)

~/miniconda3/envs/tensorflow_gpu/lib/python3.6/sre_parse.py in _class_escape(source, escape)
    334         if len(escape) == 2:
    335             if c in ASCIILETTERS:
--> 336                 raise source.error('bad escape %s' % escape, len(escape))
    337             return LITERAL, ord(escape[1])
    338     except ValueError:

error: bad escape \p at position 257

No – as you can see in the release notes, the new spaCy update requires new models and also training your own models.

You can also find more details on the model versioning here in the docs:

Additionally, the model versioning reflects both the compatibility with spaCy, as well as the major and minor model version. A model version a.b.c translates to:

  • a : spaCy major version . For example, 2 for spaCy v2.x.
  • b : Model major version . Models with a different major version can’t be loaded by the same code. For example, changing the width of the model, adding hidden layers or changing the activation changes the model major version.
  • c : Model minor version . Same model structure, but different parameter values, e.g. from being trained on different data, for different numbers of iterations, etc.

Hi @ines,

Thanks for the reply.

I am trying to use EntityRuler which has been introduced in new update.
Trying to

import spacy
from spacy.pipeline import EntityRuler
nlp = spacy.load('custom_model')

weights_pattern = [
    {"LIKE_NUM": True},
    {"LOWER": {"IN": [ "pounds","green mountain diapers"]}}
]
patterns = [{"label": "DIAPER", "pattern": weights_pattern}]
ruler = EntityRuler(nlp, patterns=patterns)
nlp.add_pipe(ruler, before="ner")

doc = nlp("green mountain diapers have best wipes")
print([(ent.text, ent.label_) for ent in doc.ents])

[]

It is not able to find the green mountain diapers.