PKaZZZ�?�Ƿ�fontTools/__init__.pyimport logging from fontTools.misc.loggingTools import configLogger log = logging.getLogger(__name__) version = __version__ = "4.51.0" __all__ = ["version", "log", "configLogger"] PKaZZZ��A���fontTools/__main__.pyimport sys def main(args=None): if args is None: args = sys.argv[1:] # TODO Handle library-wide options. Eg.: # --unicodedata # --verbose / other logging stuff # TODO Allow a way to run arbitrary modules? Useful for setting # library-wide options and calling another library. Eg.: # # $ fonttools --unicodedata=... fontmake ... # # This allows for a git-like command where thirdparty commands # can be added. Should we just try importing the fonttools # module first and try without if it fails? if len(sys.argv) < 2: sys.argv.append("help") if sys.argv[1] == "-h" or sys.argv[1] == "--help": sys.argv[1] = "help" mod = "fontTools." + sys.argv[1] sys.argv[1] = sys.argv[0] + " " + sys.argv[1] del sys.argv[0] import runpy runpy.run_module(mod, run_name="__main__") if __name__ == "__main__": sys.exit(main()) PKaZZZ�1ƅl3l3fontTools/afmLib.py"""Module for reading and writing AFM (Adobe Font Metrics) files. Note that this has been designed to read in AFM files generated by Fontographer and has not been tested on many other files. In particular, it does not implement the whole Adobe AFM specification [#f1]_ but, it should read most "common" AFM files. Here is an example of using `afmLib` to read, modify and write an AFM file: >>> from fontTools.afmLib import AFM >>> f = AFM("Tests/afmLib/data/TestAFM.afm") >>> >>> # Accessing a pair gets you the kern value >>> f[("V","A")] -60 >>> >>> # Accessing a glyph name gets you metrics >>> f["A"] (65, 668, (8, -25, 660, 666)) >>> # (charnum, width, bounding box) >>> >>> # Accessing an attribute gets you metadata >>> f.FontName 'TestFont-Regular' >>> f.FamilyName 'TestFont' >>> f.Weight 'Regular' >>> f.XHeight 500 >>> f.Ascender 750 >>> >>> # Attributes and items can also be set >>> f[("A","V")] = -150 # Tighten kerning >>> f.FontName = "TestFont Squished" >>> >>> # And the font written out again (remove the # in front) >>> #f.write("testfont-squished.afm") .. rubric:: Footnotes .. [#f1] `Adobe Technote 5004 <https://www.adobe.com/content/dam/acom/en/devnet/font/pdfs/5004.AFM_Spec.pdf>`_, Adobe Font Metrics File Format Specification. """ import re # every single line starts with a "word" identifierRE = re.compile(r"^([A-Za-z]+).*") # regular expression to parse char lines charRE = re.compile( r"(-?\d+)" # charnum r"\s*;\s*WX\s+" # ; WX r"(-?\d+)" # width r"\s*;\s*N\s+" # ; N r"([.A-Za-z0-9_]+)" # charname r"\s*;\s*B\s+" # ; B r"(-?\d+)" # left r"\s+" r"(-?\d+)" # bottom r"\s+" r"(-?\d+)" # right r"\s+" r"(-?\d+)" # top r"\s*;\s*" # ; ) # regular expression to parse kerning lines kernRE = re.compile( r"([.A-Za-z0-9_]+)" # leftchar r"\s+" r"([.A-Za-z0-9_]+)" # rightchar r"\s+" r"(-?\d+)" # value r"\s*" ) # regular expressions to parse composite info lines of the form: # Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ; compositeRE = re.compile( r"([.A-Za-z0-9_]+)" # char name r"\s+" r"(\d+)" # number of parts r"\s*;\s*" ) componentRE = re.compile( r"PCC\s+" # PPC r"([.A-Za-z0-9_]+)" # base char name r"\s+" r"(-?\d+)" # x offset r"\s+" r"(-?\d+)" # y offset r"\s*;\s*" ) preferredAttributeOrder = [ "FontName", "FullName", "FamilyName", "Weight", "ItalicAngle", "IsFixedPitch", "FontBBox", "UnderlinePosition", "UnderlineThickness", "Version", "Notice", "EncodingScheme", "CapHeight", "XHeight", "Ascender", "Descender", ] class error(Exception): pass class AFM(object): _attrs = None _keywords = [ "StartFontMetrics", "EndFontMetrics", "StartCharMetrics", "EndCharMetrics", "StartKernData", "StartKernPairs", "EndKernPairs", "EndKernData", "StartComposites", "EndComposites", ] def __init__(self, path=None): """AFM file reader. Instantiating an object with a path name will cause the file to be opened, read, and parsed. Alternatively the path can be left unspecified, and a file can be parsed later with the :meth:`read` method.""" self._attrs = {} self._chars = {} self._kerning = {} self._index = {} self._comments = [] self._composites = {} if path is not None: self.read(path) def read(self, path): """Opens, reads and parses a file.""" lines = readlines(path) for line in lines: if not line.strip(): continue m = identifierRE.match(line) if m is None: raise error("syntax error in AFM file: " + repr(line)) pos = m.regs[1][1] word = line[:pos] rest = line[pos:].strip() if word in self._keywords: continue if word == "C": self.parsechar(rest) elif word == "KPX": self.parsekernpair(rest) elif word == "CC": self.parsecomposite(rest) else: self.parseattr(word, rest) def parsechar(self, rest): m = charRE.match(rest) if m is None: raise error("syntax error in AFM file: " + repr(rest)) things = [] for fr, to in m.regs[1:]: things.append(rest[fr:to]) charname = things[2] del things[2] charnum, width, l, b, r, t = (int(thing) for thing in things) self._chars[charname] = charnum, width, (l, b, r, t) def parsekernpair(self, rest): m = kernRE.match(rest) if m is None: raise error("syntax error in AFM file: " + repr(rest)) things = [] for fr, to in m.regs[1:]: things.append(rest[fr:to]) leftchar, rightchar, value = things value = int(value) self._kerning[(leftchar, rightchar)] = value def parseattr(self, word, rest): if word == "FontBBox": l, b, r, t = [int(thing) for thing in rest.split()] self._attrs[word] = l, b, r, t elif word == "Comment": self._comments.append(rest) else: try: value = int(rest) except (ValueError, OverflowError): self._attrs[word] = rest else: self._attrs[word] = value def parsecomposite(self, rest): m = compositeRE.match(rest) if m is None: raise error("syntax error in AFM file: " + repr(rest)) charname = m.group(1) ncomponents = int(m.group(2)) rest = rest[m.regs[0][1] :] components = [] while True: m = componentRE.match(rest) if m is None: raise error("syntax error in AFM file: " + repr(rest)) basechar = m.group(1) xoffset = int(m.group(2)) yoffset = int(m.group(3)) components.append((basechar, xoffset, yoffset)) rest = rest[m.regs[0][1] :] if not rest: break assert len(components) == ncomponents self._composites[charname] = components def write(self, path, sep="\r"): """Writes out an AFM font to the given path.""" import time lines = [ "StartFontMetrics 2.0", "Comment Generated by afmLib; at %s" % (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time()))), ] # write comments, assuming (possibly wrongly!) they should # all appear at the top for comment in self._comments: lines.append("Comment " + comment) # write attributes, first the ones we know about, in # a preferred order attrs = self._attrs for attr in preferredAttributeOrder: if attr in attrs: value = attrs[attr] if attr == "FontBBox": value = "%s %s %s %s" % value lines.append(attr + " " + str(value)) # then write the attributes we don't know about, # in alphabetical order items = sorted(attrs.items()) for attr, value in items: if attr in preferredAttributeOrder: continue lines.append(attr + " " + str(value)) # write char metrics lines.append("StartCharMetrics " + repr(len(self._chars))) items = [ (charnum, (charname, width, box)) for charname, (charnum, width, box) in self._chars.items() ] def myKey(a): """Custom key function to make sure unencoded chars (-1) end up at the end of the list after sorting.""" if a[0] == -1: a = (0xFFFF,) + a[1:] # 0xffff is an arbitrary large number return a items.sort(key=myKey) for charnum, (charname, width, (l, b, r, t)) in items: lines.append( "C %d ; WX %d ; N %s ; B %d %d %d %d ;" % (charnum, width, charname, l, b, r, t) ) lines.append("EndCharMetrics") # write kerning info lines.append("StartKernData") lines.append("StartKernPairs " + repr(len(self._kerning))) items = sorted(self._kerning.items()) for (leftchar, rightchar), value in items: lines.append("KPX %s %s %d" % (leftchar, rightchar, value)) lines.append("EndKernPairs") lines.append("EndKernData") if self._composites: composites = sorted(self._composites.items()) lines.append("StartComposites %s" % len(self._composites)) for charname, components in composites: line = "CC %s %s ;" % (charname, len(components)) for basechar, xoffset, yoffset in components: line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset) lines.append(line) lines.append("EndComposites") lines.append("EndFontMetrics") writelines(path, lines, sep) def has_kernpair(self, pair): """Returns `True` if the given glyph pair (specified as a tuple) exists in the kerning dictionary.""" return pair in self._kerning def kernpairs(self): """Returns a list of all kern pairs in the kerning dictionary.""" return list(self._kerning.keys()) def has_char(self, char): """Returns `True` if the given glyph exists in the font.""" return char in self._chars def chars(self): """Returns a list of all glyph names in the font.""" return list(self._chars.keys()) def comments(self): """Returns all comments from the file.""" return self._comments def addComment(self, comment): """Adds a new comment to the file.""" self._comments.append(comment) def addComposite(self, glyphName, components): """Specifies that the glyph `glyphName` is made up of the given components. The components list should be of the following form:: [ (glyphname, xOffset, yOffset), ... ] """ self._composites[glyphName] = components def __getattr__(self, attr): if attr in self._attrs: return self._attrs[attr] else: raise AttributeError(attr) def __setattr__(self, attr, value): # all attrs *not* starting with "_" are consider to be AFM keywords if attr[:1] == "_": self.__dict__[attr] = value else: self._attrs[attr] = value def __delattr__(self, attr): # all attrs *not* starting with "_" are consider to be AFM keywords if attr[:1] == "_": try: del self.__dict__[attr] except KeyError: raise AttributeError(attr) else: try: del self._attrs[attr] except KeyError: raise AttributeError(attr) def __getitem__(self, key): if isinstance(key, tuple): # key is a tuple, return the kernpair return self._kerning[key] else: # return the metrics instead return self._chars[key] def __setitem__(self, key, value): if isinstance(key, tuple): # key is a tuple, set kernpair self._kerning[key] = value else: # set char metrics self._chars[key] = value def __delitem__(self, key): if isinstance(key, tuple): # key is a tuple, del kernpair del self._kerning[key] else: # del char metrics del self._chars[key] def __repr__(self): if hasattr(self, "FullName"): return "<AFM object for %s>" % self.FullName else: return "<AFM object at %x>" % id(self) def readlines(path): with open(path, "r", encoding="ascii") as f: data = f.read() return data.splitlines() def writelines(path, lines, sep="\r"): with open(path, "w", encoding="ascii", newline=sep) as f: f.write("\n".join(lines) + "\n") if __name__ == "__main__": import EasyDialogs path = EasyDialogs.AskFileForOpen() if path: afm = AFM(path) char = "A" if afm.has_char(char): print(afm[char]) # print charnum, width and boundingbox pair = ("A", "V") if afm.has_kernpair(pair): print(afm[pair]) # print kerning value for pair print(afm.Version) # various other afm entries have become attributes print(afm.Weight) # afm.comments() returns a list of all Comment lines found in the AFM print(afm.comments()) # print afm.chars() # print afm.kernpairs() print(afm) afm.write(path + ".muck") PKaZZZ�]�O�O�fontTools/agl.py# -*- coding: utf-8 -*- # The tables below are taken from # https://github.com/adobe-type-tools/agl-aglfn/raw/4036a9ca80a62f64f9de4f7321a9a045ad0ecfd6/glyphlist.txt # and # https://github.com/adobe-type-tools/agl-aglfn/raw/4036a9ca80a62f64f9de4f7321a9a045ad0ecfd6/aglfn.txt """ Interface to the Adobe Glyph List This module exists to convert glyph names from the Adobe Glyph List to their Unicode equivalents. Example usage: >>> from fontTools.agl import toUnicode >>> toUnicode("nahiragana") 'な' It also contains two dictionaries, ``UV2AGL`` and ``AGL2UV``, which map from Unicode codepoints to AGL names and vice versa: >>> import fontTools >>> fontTools.agl.UV2AGL[ord("?")] 'question' >>> fontTools.agl.AGL2UV["wcircumflex"] 373 This is used by fontTools when it has to construct glyph names for a font which doesn't include any (e.g. format 3.0 post tables). """ from fontTools.misc.textTools import tostr import re _aglText = """\ # ----------------------------------------------------------- # Copyright 2002-2019 Adobe (http://www.adobe.com/). # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the # following conditions are met: # # Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # Neither the name of Adobe nor the names of its contributors # may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND # CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------- # Name: Adobe Glyph List # Table version: 2.0 # Date: September 20, 2002 # URL: https://github.com/adobe-type-tools/agl-aglfn # # Format: two semicolon-delimited fields: # (1) glyph name--upper/lowercase letters and digits # (2) Unicode scalar value--four uppercase hexadecimal digits # A;0041 AE;00C6 AEacute;01FC AEmacron;01E2 AEsmall;F7E6 Aacute;00C1 Aacutesmall;F7E1 Abreve;0102 Abreveacute;1EAE Abrevecyrillic;04D0 Abrevedotbelow;1EB6 Abrevegrave;1EB0 Abrevehookabove;1EB2 Abrevetilde;1EB4 Acaron;01CD Acircle;24B6 Acircumflex;00C2 Acircumflexacute;1EA4 Acircumflexdotbelow;1EAC Acircumflexgrave;1EA6 Acircumflexhookabove;1EA8 Acircumflexsmall;F7E2 Acircumflextilde;1EAA Acute;F6C9 Acutesmall;F7B4 Acyrillic;0410 Adblgrave;0200 Adieresis;00C4 Adieresiscyrillic;04D2 Adieresismacron;01DE Adieresissmall;F7E4 Adotbelow;1EA0 Adotmacron;01E0 Agrave;00C0 Agravesmall;F7E0 Ahookabove;1EA2 Aiecyrillic;04D4 Ainvertedbreve;0202 Alpha;0391 Alphatonos;0386 Amacron;0100 Amonospace;FF21 Aogonek;0104 Aring;00C5 Aringacute;01FA Aringbelow;1E00 Aringsmall;F7E5 Asmall;F761 Atilde;00C3 Atildesmall;F7E3 Aybarmenian;0531 B;0042 Bcircle;24B7 Bdotaccent;1E02 Bdotbelow;1E04 Becyrillic;0411 Benarmenian;0532 Beta;0392 Bhook;0181 Blinebelow;1E06 Bmonospace;FF22 Brevesmall;F6F4 Bsmall;F762 Btopbar;0182 C;0043 Caarmenian;053E Cacute;0106 Caron;F6CA Caronsmall;F6F5 Ccaron;010C Ccedilla;00C7 Ccedillaacute;1E08 Ccedillasmall;F7E7 Ccircle;24B8 Ccircumflex;0108 Cdot;010A Cdotaccent;010A Cedillasmall;F7B8 Chaarmenian;0549 Cheabkhasiancyrillic;04BC Checyrillic;0427 Chedescenderabkhasiancyrillic;04BE Chedescendercyrillic;04B6 Chedieresiscyrillic;04F4 Cheharmenian;0543 Chekhakassiancyrillic;04CB Cheverticalstrokecyrillic;04B8 Chi;03A7 Chook;0187 Circumflexsmall;F6F6 Cmonospace;FF23 Coarmenian;0551 Csmall;F763 D;0044 DZ;01F1 DZcaron;01C4 Daarmenian;0534 Dafrican;0189 Dcaron;010E Dcedilla;1E10 Dcircle;24B9 Dcircumflexbelow;1E12 Dcroat;0110 Ddotaccent;1E0A Ddotbelow;1E0C Decyrillic;0414 Deicoptic;03EE Delta;2206 Deltagreek;0394 Dhook;018A Dieresis;F6CB DieresisAcute;F6CC DieresisGrave;F6CD Dieresissmall;F7A8 Digammagreek;03DC Djecyrillic;0402 Dlinebelow;1E0E Dmonospace;FF24 Dotaccentsmall;F6F7 Dslash;0110 Dsmall;F764 Dtopbar;018B Dz;01F2 Dzcaron;01C5 Dzeabkhasiancyrillic;04E0 Dzecyrillic;0405 Dzhecyrillic;040F E;0045 Eacute;00C9 Eacutesmall;F7E9 Ebreve;0114 Ecaron;011A Ecedillabreve;1E1C Echarmenian;0535 Ecircle;24BA Ecircumflex;00CA Ecircumflexacute;1EBE Ecircumflexbelow;1E18 Ecircumflexdotbelow;1EC6 Ecircumflexgrave;1EC0 Ecircumflexhookabove;1EC2 Ecircumflexsmall;F7EA Ecircumflextilde;1EC4 Ecyrillic;0404 Edblgrave;0204 Edieresis;00CB Edieresissmall;F7EB Edot;0116 Edotaccent;0116 Edotbelow;1EB8 Efcyrillic;0424 Egrave;00C8 Egravesmall;F7E8 Eharmenian;0537 Ehookabove;1EBA Eightroman;2167 Einvertedbreve;0206 Eiotifiedcyrillic;0464 Elcyrillic;041B Elevenroman;216A Emacron;0112 Emacronacute;1E16 Emacrongrave;1E14 Emcyrillic;041C Emonospace;FF25 Encyrillic;041D Endescendercyrillic;04A2 Eng;014A Enghecyrillic;04A4 Enhookcyrillic;04C7 Eogonek;0118 Eopen;0190 Epsilon;0395 Epsilontonos;0388 Ercyrillic;0420 Ereversed;018E Ereversedcyrillic;042D Escyrillic;0421 Esdescendercyrillic;04AA Esh;01A9 Esmall;F765 Eta;0397 Etarmenian;0538 Etatonos;0389 Eth;00D0 Ethsmall;F7F0 Etilde;1EBC Etildebelow;1E1A Euro;20AC Ezh;01B7 Ezhcaron;01EE Ezhreversed;01B8 F;0046 Fcircle;24BB Fdotaccent;1E1E Feharmenian;0556 Feicoptic;03E4 Fhook;0191 Fitacyrillic;0472 Fiveroman;2164 Fmonospace;FF26 Fourroman;2163 Fsmall;F766 G;0047 GBsquare;3387 Gacute;01F4 Gamma;0393 Gammaafrican;0194 Gangiacoptic;03EA Gbreve;011E Gcaron;01E6 Gcedilla;0122 Gcircle;24BC Gcircumflex;011C Gcommaaccent;0122 Gdot;0120 Gdotaccent;0120 Gecyrillic;0413 Ghadarmenian;0542 Ghemiddlehookcyrillic;0494 Ghestrokecyrillic;0492 Gheupturncyrillic;0490 Ghook;0193 Gimarmenian;0533 Gjecyrillic;0403 Gmacron;1E20 Gmonospace;FF27 Grave;F6CE Gravesmall;F760 Gsmall;F767 Gsmallhook;029B Gstroke;01E4 H;0048 H18533;25CF H18543;25AA H18551;25AB H22073;25A1 HPsquare;33CB Haabkhasiancyrillic;04A8 Hadescendercyrillic;04B2 Hardsigncyrillic;042A Hbar;0126 Hbrevebelow;1E2A Hcedilla;1E28 Hcircle;24BD Hcircumflex;0124 Hdieresis;1E26 Hdotaccent;1E22 Hdotbelow;1E24 Hmonospace;FF28 Hoarmenian;0540 Horicoptic;03E8 Hsmall;F768 Hungarumlaut;F6CF Hungarumlautsmall;F6F8 Hzsquare;3390 I;0049 IAcyrillic;042F IJ;0132 IUcyrillic;042E Iacute;00CD Iacutesmall;F7ED Ibreve;012C Icaron;01CF Icircle;24BE Icircumflex;00CE Icircumflexsmall;F7EE Icyrillic;0406 Idblgrave;0208 Idieresis;00CF Idieresisacute;1E2E Idieresiscyrillic;04E4 Idieresissmall;F7EF Idot;0130 Idotaccent;0130 Idotbelow;1ECA Iebrevecyrillic;04D6 Iecyrillic;0415 Ifraktur;2111 Igrave;00CC Igravesmall;F7EC Ihookabove;1EC8 Iicyrillic;0418 Iinvertedbreve;020A Iishortcyrillic;0419 Imacron;012A Imacroncyrillic;04E2 Imonospace;FF29 Iniarmenian;053B Iocyrillic;0401 Iogonek;012E Iota;0399 Iotaafrican;0196 Iotadieresis;03AA Iotatonos;038A Ismall;F769 Istroke;0197 Itilde;0128 Itildebelow;1E2C Izhitsacyrillic;0474 Izhitsadblgravecyrillic;0476 J;004A Jaarmenian;0541 Jcircle;24BF Jcircumflex;0134 Jecyrillic;0408 Jheharmenian;054B Jmonospace;FF2A Jsmall;F76A K;004B KBsquare;3385 KKsquare;33CD Kabashkircyrillic;04A0 Kacute;1E30 Kacyrillic;041A Kadescendercyrillic;049A Kahookcyrillic;04C3 Kappa;039A Kastrokecyrillic;049E Kaverticalstrokecyrillic;049C Kcaron;01E8 Kcedilla;0136 Kcircle;24C0 Kcommaaccent;0136 Kdotbelow;1E32 Keharmenian;0554 Kenarmenian;053F Khacyrillic;0425 Kheicoptic;03E6 Khook;0198 Kjecyrillic;040C Klinebelow;1E34 Kmonospace;FF2B Koppacyrillic;0480 Koppagreek;03DE Ksicyrillic;046E Ksmall;F76B L;004C LJ;01C7 LL;F6BF Lacute;0139 Lambda;039B Lcaron;013D Lcedilla;013B Lcircle;24C1 Lcircumflexbelow;1E3C Lcommaaccent;013B Ldot;013F Ldotaccent;013F Ldotbelow;1E36 Ldotbelowmacron;1E38 Liwnarmenian;053C Lj;01C8 Ljecyrillic;0409 Llinebelow;1E3A Lmonospace;FF2C Lslash;0141 Lslashsmall;F6F9 Lsmall;F76C M;004D MBsquare;3386 Macron;F6D0 Macronsmall;F7AF Macute;1E3E Mcircle;24C2 Mdotaccent;1E40 Mdotbelow;1E42 Menarmenian;0544 Mmonospace;FF2D Msmall;F76D Mturned;019C Mu;039C N;004E NJ;01CA Nacute;0143 Ncaron;0147 Ncedilla;0145 Ncircle;24C3 Ncircumflexbelow;1E4A Ncommaaccent;0145 Ndotaccent;1E44 Ndotbelow;1E46 Nhookleft;019D Nineroman;2168 Nj;01CB Njecyrillic;040A Nlinebelow;1E48 Nmonospace;FF2E Nowarmenian;0546 Nsmall;F76E Ntilde;00D1 Ntildesmall;F7F1 Nu;039D O;004F OE;0152 OEsmall;F6FA Oacute;00D3 Oacutesmall;F7F3 Obarredcyrillic;04E8 Obarreddieresiscyrillic;04EA Obreve;014E Ocaron;01D1 Ocenteredtilde;019F Ocircle;24C4 Ocircumflex;00D4 Ocircumflexacute;1ED0 Ocircumflexdotbelow;1ED8 Ocircumflexgrave;1ED2 Ocircumflexhookabove;1ED4 Ocircumflexsmall;F7F4 Ocircumflextilde;1ED6 Ocyrillic;041E Odblacute;0150 Odblgrave;020C Odieresis;00D6 Odieresiscyrillic;04E6 Odieresissmall;F7F6 Odotbelow;1ECC Ogoneksmall;F6FB Ograve;00D2 Ogravesmall;F7F2 Oharmenian;0555 Ohm;2126 Ohookabove;1ECE Ohorn;01A0 Ohornacute;1EDA Ohorndotbelow;1EE2 Ohorngrave;1EDC Ohornhookabove;1EDE Ohorntilde;1EE0 Ohungarumlaut;0150 Oi;01A2 Oinvertedbreve;020E Omacron;014C Omacronacute;1E52 Omacrongrave;1E50 Omega;2126 Omegacyrillic;0460 Omegagreek;03A9 Omegaroundcyrillic;047A Omegatitlocyrillic;047C Omegatonos;038F Omicron;039F Omicrontonos;038C Omonospace;FF2F Oneroman;2160 Oogonek;01EA Oogonekmacron;01EC Oopen;0186 Oslash;00D8 Oslashacute;01FE Oslashsmall;F7F8 Osmall;F76F Ostrokeacute;01FE Otcyrillic;047E Otilde;00D5 Otildeacute;1E4C Otildedieresis;1E4E Otildesmall;F7F5 P;0050 Pacute;1E54 Pcircle;24C5 Pdotaccent;1E56 Pecyrillic;041F Peharmenian;054A Pemiddlehookcyrillic;04A6 Phi;03A6 Phook;01A4 Pi;03A0 Piwrarmenian;0553 Pmonospace;FF30 Psi;03A8 Psicyrillic;0470 Psmall;F770 Q;0051 Qcircle;24C6 Qmonospace;FF31 Qsmall;F771 R;0052 Raarmenian;054C Racute;0154 Rcaron;0158 Rcedilla;0156 Rcircle;24C7 Rcommaaccent;0156 Rdblgrave;0210 Rdotaccent;1E58 Rdotbelow;1E5A Rdotbelowmacron;1E5C Reharmenian;0550 Rfraktur;211C Rho;03A1 Ringsmall;F6FC Rinvertedbreve;0212 Rlinebelow;1E5E Rmonospace;FF32 Rsmall;F772 Rsmallinverted;0281 Rsmallinvertedsuperior;02B6 S;0053 SF010000;250C SF020000;2514 SF030000;2510 SF040000;2518 SF050000;253C SF060000;252C SF070000;2534 SF080000;251C SF090000;2524 SF100000;2500 SF110000;2502 SF190000;2561 SF200000;2562 SF210000;2556 SF220000;2555 SF230000;2563 SF240000;2551 SF250000;2557 SF260000;255D SF270000;255C SF280000;255B SF360000;255E SF370000;255F SF380000;255A SF390000;2554 SF400000;2569 SF410000;2566 SF420000;2560 SF430000;2550 SF440000;256C SF450000;2567 SF460000;2568 SF470000;2564 SF480000;2565 SF490000;2559 SF500000;2558 SF510000;2552 SF520000;2553 SF530000;256B SF540000;256A Sacute;015A Sacutedotaccent;1E64 Sampigreek;03E0 Scaron;0160 Scarondotaccent;1E66 Scaronsmall;F6FD Scedilla;015E Schwa;018F Schwacyrillic;04D8 Schwadieresiscyrillic;04DA Scircle;24C8 Scircumflex;015C Scommaaccent;0218 Sdotaccent;1E60 Sdotbelow;1E62 Sdotbelowdotaccent;1E68 Seharmenian;054D Sevenroman;2166 Shaarmenian;0547 Shacyrillic;0428 Shchacyrillic;0429 Sheicoptic;03E2 Shhacyrillic;04BA Shimacoptic;03EC Sigma;03A3 Sixroman;2165 Smonospace;FF33 Softsigncyrillic;042C Ssmall;F773 Stigmagreek;03DA T;0054 Tau;03A4 Tbar;0166 Tcaron;0164 Tcedilla;0162 Tcircle;24C9 Tcircumflexbelow;1E70 Tcommaaccent;0162 Tdotaccent;1E6A Tdotbelow;1E6C Tecyrillic;0422 Tedescendercyrillic;04AC Tenroman;2169 Tetsecyrillic;04B4 Theta;0398 Thook;01AC Thorn;00DE Thornsmall;F7FE Threeroman;2162 Tildesmall;F6FE Tiwnarmenian;054F Tlinebelow;1E6E Tmonospace;FF34 Toarmenian;0539 Tonefive;01BC Tonesix;0184 Tonetwo;01A7 Tretroflexhook;01AE Tsecyrillic;0426 Tshecyrillic;040B Tsmall;F774 Twelveroman;216B Tworoman;2161 U;0055 Uacute;00DA Uacutesmall;F7FA Ubreve;016C Ucaron;01D3 Ucircle;24CA Ucircumflex;00DB Ucircumflexbelow;1E76 Ucircumflexsmall;F7FB Ucyrillic;0423 Udblacute;0170 Udblgrave;0214 Udieresis;00DC Udieresisacute;01D7 Udieresisbelow;1E72 Udieresiscaron;01D9 Udieresiscyrillic;04F0 Udieresisgrave;01DB Udieresismacron;01D5 Udieresissmall;F7FC Udotbelow;1EE4 Ugrave;00D9 Ugravesmall;F7F9 Uhookabove;1EE6 Uhorn;01AF Uhornacute;1EE8 Uhorndotbelow;1EF0 Uhorngrave;1EEA Uhornhookabove;1EEC Uhorntilde;1EEE Uhungarumlaut;0170 Uhungarumlautcyrillic;04F2 Uinvertedbreve;0216 Ukcyrillic;0478 Umacron;016A Umacroncyrillic;04EE Umacrondieresis;1E7A Umonospace;FF35 Uogonek;0172 Upsilon;03A5 Upsilon1;03D2 Upsilonacutehooksymbolgreek;03D3 Upsilonafrican;01B1 Upsilondieresis;03AB Upsilondieresishooksymbolgreek;03D4 Upsilonhooksymbol;03D2 Upsilontonos;038E Uring;016E Ushortcyrillic;040E Usmall;F775 Ustraightcyrillic;04AE Ustraightstrokecyrillic;04B0 Utilde;0168 Utildeacute;1E78 Utildebelow;1E74 V;0056 Vcircle;24CB Vdotbelow;1E7E Vecyrillic;0412 Vewarmenian;054E Vhook;01B2 Vmonospace;FF36 Voarmenian;0548 Vsmall;F776 Vtilde;1E7C W;0057 Wacute;1E82 Wcircle;24CC Wcircumflex;0174 Wdieresis;1E84 Wdotaccent;1E86 Wdotbelow;1E88 Wgrave;1E80 Wmonospace;FF37 Wsmall;F777 X;0058 Xcircle;24CD Xdieresis;1E8C Xdotaccent;1E8A Xeharmenian;053D Xi;039E Xmonospace;FF38 Xsmall;F778 Y;0059 Yacute;00DD Yacutesmall;F7FD Yatcyrillic;0462 Ycircle;24CE Ycircumflex;0176 Ydieresis;0178 Ydieresissmall;F7FF Ydotaccent;1E8E Ydotbelow;1EF4 Yericyrillic;042B Yerudieresiscyrillic;04F8 Ygrave;1EF2 Yhook;01B3 Yhookabove;1EF6 Yiarmenian;0545 Yicyrillic;0407 Yiwnarmenian;0552 Ymonospace;FF39 Ysmall;F779 Ytilde;1EF8 Yusbigcyrillic;046A Yusbigiotifiedcyrillic;046C Yuslittlecyrillic;0466 Yuslittleiotifiedcyrillic;0468 Z;005A Zaarmenian;0536 Zacute;0179 Zcaron;017D Zcaronsmall;F6FF Zcircle;24CF Zcircumflex;1E90 Zdot;017B Zdotaccent;017B Zdotbelow;1E92 Zecyrillic;0417 Zedescendercyrillic;0498 Zedieresiscyrillic;04DE Zeta;0396 Zhearmenian;053A Zhebrevecyrillic;04C1 Zhecyrillic;0416 Zhedescendercyrillic;0496 Zhedieresiscyrillic;04DC Zlinebelow;1E94 Zmonospace;FF3A Zsmall;F77A Zstroke;01B5 a;0061 aabengali;0986 aacute;00E1 aadeva;0906 aagujarati;0A86 aagurmukhi;0A06 aamatragurmukhi;0A3E aarusquare;3303 aavowelsignbengali;09BE aavowelsigndeva;093E aavowelsigngujarati;0ABE abbreviationmarkarmenian;055F abbreviationsigndeva;0970 abengali;0985 abopomofo;311A abreve;0103 abreveacute;1EAF abrevecyrillic;04D1 abrevedotbelow;1EB7 abrevegrave;1EB1 abrevehookabove;1EB3 abrevetilde;1EB5 acaron;01CE acircle;24D0 acircumflex;00E2 acircumflexacute;1EA5 acircumflexdotbelow;1EAD acircumflexgrave;1EA7 acircumflexhookabove;1EA9 acircumflextilde;1EAB acute;00B4 acutebelowcmb;0317 acutecmb;0301 acutecomb;0301 acutedeva;0954 acutelowmod;02CF acutetonecmb;0341 acyrillic;0430 adblgrave;0201 addakgurmukhi;0A71 adeva;0905 adieresis;00E4 adieresiscyrillic;04D3 adieresismacron;01DF adotbelow;1EA1 adotmacron;01E1 ae;00E6 aeacute;01FD aekorean;3150 aemacron;01E3 afii00208;2015 afii08941;20A4 afii10017;0410 afii10018;0411 afii10019;0412 afii10020;0413 afii10021;0414 afii10022;0415 afii10023;0401 afii10024;0416 afii10025;0417 afii10026;0418 afii10027;0419 afii10028;041A afii10029;041B afii10030;041C afii10031;041D afii10032;041E afii10033;041F afii10034;0420 afii10035;0421 afii10036;0422 afii10037;0423 afii10038;0424 afii10039;0425 afii10040;0426 afii10041;0427 afii10042;0428 afii10043;0429 afii10044;042A afii10045;042B afii10046;042C afii10047;042D afii10048;042E afii10049;042F afii10050;0490 afii10051;0402 afii10052;0403 afii10053;0404 afii10054;0405 afii10055;0406 afii10056;0407 afii10057;0408 afii10058;0409 afii10059;040A afii10060;040B afii10061;040C afii10062;040E afii10063;F6C4 afii10064;F6C5 afii10065;0430 afii10066;0431 afii10067;0432 afii10068;0433 afii10069;0434 afii10070;0435 afii10071;0451 afii10072;0436 afii10073;0437 afii10074;0438 afii10075;0439 afii10076;043A afii10077;043B afii10078;043C afii10079;043D afii10080;043E afii10081;043F afii10082;0440 afii10083;0441 afii10084;0442 afii10085;0443 afii10086;0444 afii10087;0445 afii10088;0446 afii10089;0447 afii10090;0448 afii10091;0449 afii10092;044A afii10093;044B afii10094;044C afii10095;044D afii10096;044E afii10097;044F afii10098;0491 afii10099;0452 afii10100;0453 afii10101;0454 afii10102;0455 afii10103;0456 afii10104;0457 afii10105;0458 afii10106;0459 afii10107;045A afii10108;045B afii10109;045C afii10110;045E afii10145;040F afii10146;0462 afii10147;0472 afii10148;0474 afii10192;F6C6 afii10193;045F afii10194;0463 afii10195;0473 afii10196;0475 afii10831;F6C7 afii10832;F6C8 afii10846;04D9 afii299;200E afii300;200F afii301;200D afii57381;066A afii57388;060C afii57392;0660 afii57393;0661 afii57394;0662 afii57395;0663 afii57396;0664 afii57397;0665 afii57398;0666 afii57399;0667 afii57400;0668 afii57401;0669 afii57403;061B afii57407;061F afii57409;0621 afii57410;0622 afii57411;0623 afii57412;0624 afii57413;0625 afii57414;0626 afii57415;0627 afii57416;0628 afii57417;0629 afii57418;062A afii57419;062B afii57420;062C afii57421;062D afii57422;062E afii57423;062F afii57424;0630 afii57425;0631 afii57426;0632 afii57427;0633 afii57428;0634 afii57429;0635 afii57430;0636 afii57431;0637 afii57432;0638 afii57433;0639 afii57434;063A afii57440;0640 afii57441;0641 afii57442;0642 afii57443;0643 afii57444;0644 afii57445;0645 afii57446;0646 afii57448;0648 afii57449;0649 afii57450;064A afii57451;064B afii57452;064C afii57453;064D afii57454;064E afii57455;064F afii57456;0650 afii57457;0651 afii57458;0652 afii57470;0647 afii57505;06A4 afii57506;067E afii57507;0686 afii57508;0698 afii57509;06AF afii57511;0679 afii57512;0688 afii57513;0691 afii57514;06BA afii57519;06D2 afii57534;06D5 afii57636;20AA afii57645;05BE afii57658;05C3 afii57664;05D0 afii57665;05D1 afii57666;05D2 afii57667;05D3 afii57668;05D4 afii57669;05D5 afii57670;05D6 afii57671;05D7 afii57672;05D8 afii57673;05D9 afii57674;05DA afii57675;05DB afii57676;05DC afii57677;05DD afii57678;05DE afii57679;05DF afii57680;05E0 afii57681;05E1 afii57682;05E2 afii57683;05E3 afii57684;05E4 afii57685;05E5 afii57686;05E6 afii57687;05E7 afii57688;05E8 afii57689;05E9 afii57690;05EA afii57694;FB2A afii57695;FB2B afii57700;FB4B afii57705;FB1F afii57716;05F0 afii57717;05F1 afii57718;05F2 afii57723;FB35 afii57793;05B4 afii57794;05B5 afii57795;05B6 afii57796;05BB afii57797;05B8 afii57798;05B7 afii57799;05B0 afii57800;05B2 afii57801;05B1 afii57802;05B3 afii57803;05C2 afii57804;05C1 afii57806;05B9 afii57807;05BC afii57839;05BD afii57841;05BF afii57842;05C0 afii57929;02BC afii61248;2105 afii61289;2113 afii61352;2116 afii61573;202C afii61574;202D afii61575;202E afii61664;200C afii63167;066D afii64937;02BD agrave;00E0 agujarati;0A85 agurmukhi;0A05 ahiragana;3042 ahookabove;1EA3 aibengali;0990 aibopomofo;311E aideva;0910 aiecyrillic;04D5 aigujarati;0A90 aigurmukhi;0A10 aimatragurmukhi;0A48 ainarabic;0639 ainfinalarabic;FECA aininitialarabic;FECB ainmedialarabic;FECC ainvertedbreve;0203 aivowelsignbengali;09C8 aivowelsigndeva;0948 aivowelsigngujarati;0AC8 akatakana;30A2 akatakanahalfwidth;FF71 akorean;314F alef;05D0 alefarabic;0627 alefdageshhebrew;FB30 aleffinalarabic;FE8E alefhamzaabovearabic;0623 alefhamzaabovefinalarabic;FE84 alefhamzabelowarabic;0625 alefhamzabelowfinalarabic;FE88 alefhebrew;05D0 aleflamedhebrew;FB4F alefmaddaabovearabic;0622 alefmaddaabovefinalarabic;FE82 alefmaksuraarabic;0649 alefmaksurafinalarabic;FEF0 alefmaksurainitialarabic;FEF3 alefmaksuramedialarabic;FEF4 alefpatahhebrew;FB2E alefqamatshebrew;FB2F aleph;2135 allequal;224C alpha;03B1 alphatonos;03AC amacron;0101 amonospace;FF41 ampersand;0026 ampersandmonospace;FF06 ampersandsmall;F726 amsquare;33C2 anbopomofo;3122 angbopomofo;3124 angkhankhuthai;0E5A angle;2220 anglebracketleft;3008 anglebracketleftvertical;FE3F anglebracketright;3009 anglebracketrightvertical;FE40 angleleft;2329 angleright;232A angstrom;212B anoteleia;0387 anudattadeva;0952 anusvarabengali;0982 anusvaradeva;0902 anusvaragujarati;0A82 aogonek;0105 apaatosquare;3300 aparen;249C apostrophearmenian;055A apostrophemod;02BC apple;F8FF approaches;2250 approxequal;2248 approxequalorimage;2252 approximatelyequal;2245 araeaekorean;318E araeakorean;318D arc;2312 arighthalfring;1E9A aring;00E5 aringacute;01FB aringbelow;1E01 arrowboth;2194 arrowdashdown;21E3 arrowdashleft;21E0 arrowdashright;21E2 arrowdashup;21E1 arrowdblboth;21D4 arrowdbldown;21D3 arrowdblleft;21D0 arrowdblright;21D2 arrowdblup;21D1 arrowdown;2193 arrowdownleft;2199 arrowdownright;2198 arrowdownwhite;21E9 arrowheaddownmod;02C5 arrowheadleftmod;02C2 arrowheadrightmod;02C3 arrowheadupmod;02C4 arrowhorizex;F8E7 arrowleft;2190 arrowleftdbl;21D0 arrowleftdblstroke;21CD arrowleftoverright;21C6 arrowleftwhite;21E6 arrowright;2192 arrowrightdblstroke;21CF arrowrightheavy;279E arrowrightoverleft;21C4 arrowrightwhite;21E8 arrowtableft;21E4 arrowtabright;21E5 arrowup;2191 arrowupdn;2195 arrowupdnbse;21A8 arrowupdownbase;21A8 arrowupleft;2196 arrowupleftofdown;21C5 arrowupright;2197 arrowupwhite;21E7 arrowvertex;F8E6 asciicircum;005E asciicircummonospace;FF3E asciitilde;007E asciitildemonospace;FF5E ascript;0251 ascriptturned;0252 asmallhiragana;3041 asmallkatakana;30A1 asmallkatakanahalfwidth;FF67 asterisk;002A asteriskaltonearabic;066D asteriskarabic;066D asteriskmath;2217 asteriskmonospace;FF0A asterisksmall;FE61 asterism;2042 asuperior;F6E9 asymptoticallyequal;2243 at;0040 atilde;00E3 atmonospace;FF20 atsmall;FE6B aturned;0250 aubengali;0994 aubopomofo;3120 audeva;0914 augujarati;0A94 augurmukhi;0A14 aulengthmarkbengali;09D7 aumatragurmukhi;0A4C auvowelsignbengali;09CC auvowelsigndeva;094C auvowelsigngujarati;0ACC avagrahadeva;093D aybarmenian;0561 ayin;05E2 ayinaltonehebrew;FB20 ayinhebrew;05E2 b;0062 babengali;09AC backslash;005C backslashmonospace;FF3C badeva;092C bagujarati;0AAC bagurmukhi;0A2C bahiragana;3070 bahtthai;0E3F bakatakana;30D0 bar;007C barmonospace;FF5C bbopomofo;3105 bcircle;24D1 bdotaccent;1E03 bdotbelow;1E05 beamedsixteenthnotes;266C because;2235 becyrillic;0431 beharabic;0628 behfinalarabic;FE90 behinitialarabic;FE91 behiragana;3079 behmedialarabic;FE92 behmeeminitialarabic;FC9F behmeemisolatedarabic;FC08 behnoonfinalarabic;FC6D bekatakana;30D9 benarmenian;0562 bet;05D1 beta;03B2 betasymbolgreek;03D0 betdagesh;FB31 betdageshhebrew;FB31 bethebrew;05D1 betrafehebrew;FB4C bhabengali;09AD bhadeva;092D bhagujarati;0AAD bhagurmukhi;0A2D bhook;0253 bihiragana;3073 bikatakana;30D3 bilabialclick;0298 bindigurmukhi;0A02 birusquare;3331 blackcircle;25CF blackdiamond;25C6 blackdownpointingtriangle;25BC blackleftpointingpointer;25C4 blackleftpointingtriangle;25C0 blacklenticularbracketleft;3010 blacklenticularbracketleftvertical;FE3B blacklenticularbracketright;3011 blacklenticularbracketrightvertical;FE3C blacklowerlefttriangle;25E3 blacklowerrighttriangle;25E2 blackrectangle;25AC blackrightpointingpointer;25BA blackrightpointingtriangle;25B6 blacksmallsquare;25AA blacksmilingface;263B blacksquare;25A0 blackstar;2605 blackupperlefttriangle;25E4 blackupperrighttriangle;25E5 blackuppointingsmalltriangle;25B4 blackuppointingtriangle;25B2 blank;2423 blinebelow;1E07 block;2588 bmonospace;FF42 bobaimaithai;0E1A bohiragana;307C bokatakana;30DC bparen;249D bqsquare;33C3 braceex;F8F4 braceleft;007B braceleftbt;F8F3 braceleftmid;F8F2 braceleftmonospace;FF5B braceleftsmall;FE5B bracelefttp;F8F1 braceleftvertical;FE37 braceright;007D bracerightbt;F8FE bracerightmid;F8FD bracerightmonospace;FF5D bracerightsmall;FE5C bracerighttp;F8FC bracerightvertical;FE38 bracketleft;005B bracketleftbt;F8F0 bracketleftex;F8EF bracketleftmonospace;FF3B bracketlefttp;F8EE bracketright;005D bracketrightbt;F8FB bracketrightex;F8FA bracketrightmonospace;FF3D bracketrighttp;F8F9 breve;02D8 brevebelowcmb;032E brevecmb;0306 breveinvertedbelowcmb;032F breveinvertedcmb;0311 breveinverteddoublecmb;0361 bridgebelowcmb;032A bridgeinvertedbelowcmb;033A brokenbar;00A6 bstroke;0180 bsuperior;F6EA btopbar;0183 buhiragana;3076 bukatakana;30D6 bullet;2022 bulletinverse;25D8 bulletoperator;2219 bullseye;25CE c;0063 caarmenian;056E cabengali;099A cacute;0107 cadeva;091A cagujarati;0A9A cagurmukhi;0A1A calsquare;3388 candrabindubengali;0981 candrabinducmb;0310 candrabindudeva;0901 candrabindugujarati;0A81 capslock;21EA careof;2105 caron;02C7 caronbelowcmb;032C caroncmb;030C carriagereturn;21B5 cbopomofo;3118 ccaron;010D ccedilla;00E7 ccedillaacute;1E09 ccircle;24D2 ccircumflex;0109 ccurl;0255 cdot;010B cdotaccent;010B cdsquare;33C5 cedilla;00B8 cedillacmb;0327 cent;00A2 centigrade;2103 centinferior;F6DF centmonospace;FFE0 centoldstyle;F7A2 centsuperior;F6E0 chaarmenian;0579 chabengali;099B chadeva;091B chagujarati;0A9B chagurmukhi;0A1B chbopomofo;3114 cheabkhasiancyrillic;04BD checkmark;2713 checyrillic;0447 chedescenderabkhasiancyrillic;04BF chedescendercyrillic;04B7 chedieresiscyrillic;04F5 cheharmenian;0573 chekhakassiancyrillic;04CC cheverticalstrokecyrillic;04B9 chi;03C7 chieuchacirclekorean;3277 chieuchaparenkorean;3217 chieuchcirclekorean;3269 chieuchkorean;314A chieuchparenkorean;3209 chochangthai;0E0A chochanthai;0E08 chochingthai;0E09 chochoethai;0E0C chook;0188 cieucacirclekorean;3276 cieucaparenkorean;3216 cieuccirclekorean;3268 cieuckorean;3148 cieucparenkorean;3208 cieucuparenkorean;321C circle;25CB circlemultiply;2297 circleot;2299 circleplus;2295 circlepostalmark;3036 circlewithlefthalfblack;25D0 circlewithrighthalfblack;25D1 circumflex;02C6 circumflexbelowcmb;032D circumflexcmb;0302 clear;2327 clickalveolar;01C2 clickdental;01C0 clicklateral;01C1 clickretroflex;01C3 club;2663 clubsuitblack;2663 clubsuitwhite;2667 cmcubedsquare;33A4 cmonospace;FF43 cmsquaredsquare;33A0 coarmenian;0581 colon;003A colonmonetary;20A1 colonmonospace;FF1A colonsign;20A1 colonsmall;FE55 colontriangularhalfmod;02D1 colontriangularmod;02D0 comma;002C commaabovecmb;0313 commaaboverightcmb;0315 commaaccent;F6C3 commaarabic;060C commaarmenian;055D commainferior;F6E1 commamonospace;FF0C commareversedabovecmb;0314 commareversedmod;02BD commasmall;FE50 commasuperior;F6E2 commaturnedabovecmb;0312 commaturnedmod;02BB compass;263C congruent;2245 contourintegral;222E control;2303 controlACK;0006 controlBEL;0007 controlBS;0008 controlCAN;0018 controlCR;000D controlDC1;0011 controlDC2;0012 controlDC3;0013 controlDC4;0014 controlDEL;007F controlDLE;0010 controlEM;0019 controlENQ;0005 controlEOT;0004 controlESC;001B controlETB;0017 controlETX;0003 controlFF;000C controlFS;001C controlGS;001D controlHT;0009 controlLF;000A controlNAK;0015 controlRS;001E controlSI;000F controlSO;000E controlSOT;0002 controlSTX;0001 controlSUB;001A controlSYN;0016 controlUS;001F controlVT;000B copyright;00A9 copyrightsans;F8E9 copyrightserif;F6D9 cornerbracketleft;300C cornerbracketlefthalfwidth;FF62 cornerbracketleftvertical;FE41 cornerbracketright;300D cornerbracketrighthalfwidth;FF63 cornerbracketrightvertical;FE42 corporationsquare;337F cosquare;33C7 coverkgsquare;33C6 cparen;249E cruzeiro;20A2 cstretched;0297 curlyand;22CF curlyor;22CE currency;00A4 cyrBreve;F6D1 cyrFlex;F6D2 cyrbreve;F6D4 cyrflex;F6D5 d;0064 daarmenian;0564 dabengali;09A6 dadarabic;0636 dadeva;0926 dadfinalarabic;FEBE dadinitialarabic;FEBF dadmedialarabic;FEC0 dagesh;05BC dageshhebrew;05BC dagger;2020 daggerdbl;2021 dagujarati;0AA6 dagurmukhi;0A26 dahiragana;3060 dakatakana;30C0 dalarabic;062F dalet;05D3 daletdagesh;FB33 daletdageshhebrew;FB33 dalethatafpatah;05D3 05B2 dalethatafpatahhebrew;05D3 05B2 dalethatafsegol;05D3 05B1 dalethatafsegolhebrew;05D3 05B1 dalethebrew;05D3 dalethiriq;05D3 05B4 dalethiriqhebrew;05D3 05B4 daletholam;05D3 05B9 daletholamhebrew;05D3 05B9 daletpatah;05D3 05B7 daletpatahhebrew;05D3 05B7 daletqamats;05D3 05B8 daletqamatshebrew;05D3 05B8 daletqubuts;05D3 05BB daletqubutshebrew;05D3 05BB daletsegol;05D3 05B6 daletsegolhebrew;05D3 05B6 daletsheva;05D3 05B0 daletshevahebrew;05D3 05B0 dalettsere;05D3 05B5 dalettserehebrew;05D3 05B5 dalfinalarabic;FEAA dammaarabic;064F dammalowarabic;064F dammatanaltonearabic;064C dammatanarabic;064C danda;0964 dargahebrew;05A7 dargalefthebrew;05A7 dasiapneumatacyrilliccmb;0485 dblGrave;F6D3 dblanglebracketleft;300A dblanglebracketleftvertical;FE3D dblanglebracketright;300B dblanglebracketrightvertical;FE3E dblarchinvertedbelowcmb;032B dblarrowleft;21D4 dblarrowright;21D2 dbldanda;0965 dblgrave;F6D6 dblgravecmb;030F dblintegral;222C dbllowline;2017 dbllowlinecmb;0333 dbloverlinecmb;033F dblprimemod;02BA dblverticalbar;2016 dblverticallineabovecmb;030E dbopomofo;3109 dbsquare;33C8 dcaron;010F dcedilla;1E11 dcircle;24D3 dcircumflexbelow;1E13 dcroat;0111 ddabengali;09A1 ddadeva;0921 ddagujarati;0AA1 ddagurmukhi;0A21 ddalarabic;0688 ddalfinalarabic;FB89 dddhadeva;095C ddhabengali;09A2 ddhadeva;0922 ddhagujarati;0AA2 ddhagurmukhi;0A22 ddotaccent;1E0B ddotbelow;1E0D decimalseparatorarabic;066B decimalseparatorpersian;066B decyrillic;0434 degree;00B0 dehihebrew;05AD dehiragana;3067 deicoptic;03EF dekatakana;30C7 deleteleft;232B deleteright;2326 delta;03B4 deltaturned;018D denominatorminusonenumeratorbengali;09F8 dezh;02A4 dhabengali;09A7 dhadeva;0927 dhagujarati;0AA7 dhagurmukhi;0A27 dhook;0257 dialytikatonos;0385 dialytikatonoscmb;0344 diamond;2666 diamondsuitwhite;2662 dieresis;00A8 dieresisacute;F6D7 dieresisbelowcmb;0324 dieresiscmb;0308 dieresisgrave;F6D8 dieresistonos;0385 dihiragana;3062 dikatakana;30C2 dittomark;3003 divide;00F7 divides;2223 divisionslash;2215 djecyrillic;0452 dkshade;2593 dlinebelow;1E0F dlsquare;3397 dmacron;0111 dmonospace;FF44 dnblock;2584 dochadathai;0E0E dodekthai;0E14 dohiragana;3069 dokatakana;30C9 dollar;0024 dollarinferior;F6E3 dollarmonospace;FF04 dollaroldstyle;F724 dollarsmall;FE69 dollarsuperior;F6E4 dong;20AB dorusquare;3326 dotaccent;02D9 dotaccentcmb;0307 dotbelowcmb;0323 dotbelowcomb;0323 dotkatakana;30FB dotlessi;0131 dotlessj;F6BE dotlessjstrokehook;0284 dotmath;22C5 dottedcircle;25CC doubleyodpatah;FB1F doubleyodpatahhebrew;FB1F downtackbelowcmb;031E downtackmod;02D5 dparen;249F dsuperior;F6EB dtail;0256 dtopbar;018C duhiragana;3065 dukatakana;30C5 dz;01F3 dzaltone;02A3 dzcaron;01C6 dzcurl;02A5 dzeabkhasiancyrillic;04E1 dzecyrillic;0455 dzhecyrillic;045F e;0065 eacute;00E9 earth;2641 ebengali;098F ebopomofo;311C ebreve;0115 ecandradeva;090D ecandragujarati;0A8D ecandravowelsigndeva;0945 ecandravowelsigngujarati;0AC5 ecaron;011B ecedillabreve;1E1D echarmenian;0565 echyiwnarmenian;0587 ecircle;24D4 ecircumflex;00EA ecircumflexacute;1EBF ecircumflexbelow;1E19 ecircumflexdotbelow;1EC7 ecircumflexgrave;1EC1 ecircumflexhookabove;1EC3 ecircumflextilde;1EC5 ecyrillic;0454 edblgrave;0205 edeva;090F edieresis;00EB edot;0117 edotaccent;0117 edotbelow;1EB9 eegurmukhi;0A0F eematragurmukhi;0A47 efcyrillic;0444 egrave;00E8 egujarati;0A8F eharmenian;0567 ehbopomofo;311D ehiragana;3048 ehookabove;1EBB eibopomofo;311F eight;0038 eightarabic;0668 eightbengali;09EE eightcircle;2467 eightcircleinversesansserif;2791 eightdeva;096E eighteencircle;2471 eighteenparen;2485 eighteenperiod;2499 eightgujarati;0AEE eightgurmukhi;0A6E eighthackarabic;0668 eighthangzhou;3028 eighthnotebeamed;266B eightideographicparen;3227 eightinferior;2088 eightmonospace;FF18 eightoldstyle;F738 eightparen;247B eightperiod;248F eightpersian;06F8 eightroman;2177 eightsuperior;2078 eightthai;0E58 einvertedbreve;0207 eiotifiedcyrillic;0465 ekatakana;30A8 ekatakanahalfwidth;FF74 ekonkargurmukhi;0A74 ekorean;3154 elcyrillic;043B element;2208 elevencircle;246A elevenparen;247E elevenperiod;2492 elevenroman;217A ellipsis;2026 ellipsisvertical;22EE emacron;0113 emacronacute;1E17 emacrongrave;1E15 emcyrillic;043C emdash;2014 emdashvertical;FE31 emonospace;FF45 emphasismarkarmenian;055B emptyset;2205 enbopomofo;3123 encyrillic;043D endash;2013 endashvertical;FE32 endescendercyrillic;04A3 eng;014B engbopomofo;3125 enghecyrillic;04A5 enhookcyrillic;04C8 enspace;2002 eogonek;0119 eokorean;3153 eopen;025B eopenclosed;029A eopenreversed;025C eopenreversedclosed;025E eopenreversedhook;025D eparen;24A0 epsilon;03B5 epsilontonos;03AD equal;003D equalmonospace;FF1D equalsmall;FE66 equalsuperior;207C equivalence;2261 erbopomofo;3126 ercyrillic;0440 ereversed;0258 ereversedcyrillic;044D escyrillic;0441 esdescendercyrillic;04AB esh;0283 eshcurl;0286 eshortdeva;090E eshortvowelsigndeva;0946 eshreversedloop;01AA eshsquatreversed;0285 esmallhiragana;3047 esmallkatakana;30A7 esmallkatakanahalfwidth;FF6A estimated;212E esuperior;F6EC eta;03B7 etarmenian;0568 etatonos;03AE eth;00F0 etilde;1EBD etildebelow;1E1B etnahtafoukhhebrew;0591 etnahtafoukhlefthebrew;0591 etnahtahebrew;0591 etnahtalefthebrew;0591 eturned;01DD eukorean;3161 euro;20AC evowelsignbengali;09C7 evowelsigndeva;0947 evowelsigngujarati;0AC7 exclam;0021 exclamarmenian;055C exclamdbl;203C exclamdown;00A1 exclamdownsmall;F7A1 exclammonospace;FF01 exclamsmall;F721 existential;2203 ezh;0292 ezhcaron;01EF ezhcurl;0293 ezhreversed;01B9 ezhtail;01BA f;0066 fadeva;095E fagurmukhi;0A5E fahrenheit;2109 fathaarabic;064E fathalowarabic;064E fathatanarabic;064B fbopomofo;3108 fcircle;24D5 fdotaccent;1E1F feharabic;0641 feharmenian;0586 fehfinalarabic;FED2 fehinitialarabic;FED3 fehmedialarabic;FED4 feicoptic;03E5 female;2640 ff;FB00 ffi;FB03 ffl;FB04 fi;FB01 fifteencircle;246E fifteenparen;2482 fifteenperiod;2496 figuredash;2012 filledbox;25A0 filledrect;25AC finalkaf;05DA finalkafdagesh;FB3A finalkafdageshhebrew;FB3A finalkafhebrew;05DA finalkafqamats;05DA 05B8 finalkafqamatshebrew;05DA 05B8 finalkafsheva;05DA 05B0 finalkafshevahebrew;05DA 05B0 finalmem;05DD finalmemhebrew;05DD finalnun;05DF finalnunhebrew;05DF finalpe;05E3 finalpehebrew;05E3 finaltsadi;05E5 finaltsadihebrew;05E5 firsttonechinese;02C9 fisheye;25C9 fitacyrillic;0473 five;0035 fivearabic;0665 fivebengali;09EB fivecircle;2464 fivecircleinversesansserif;278E fivedeva;096B fiveeighths;215D fivegujarati;0AEB fivegurmukhi;0A6B fivehackarabic;0665 fivehangzhou;3025 fiveideographicparen;3224 fiveinferior;2085 fivemonospace;FF15 fiveoldstyle;F735 fiveparen;2478 fiveperiod;248C fivepersian;06F5 fiveroman;2174 fivesuperior;2075 fivethai;0E55 fl;FB02 florin;0192 fmonospace;FF46 fmsquare;3399 fofanthai;0E1F fofathai;0E1D fongmanthai;0E4F forall;2200 four;0034 fourarabic;0664 fourbengali;09EA fourcircle;2463 fourcircleinversesansserif;278D fourdeva;096A fourgujarati;0AEA fourgurmukhi;0A6A fourhackarabic;0664 fourhangzhou;3024 fourideographicparen;3223 fourinferior;2084 fourmonospace;FF14 fournumeratorbengali;09F7 fouroldstyle;F734 fourparen;2477 fourperiod;248B fourpersian;06F4 fourroman;2173 foursuperior;2074 fourteencircle;246D fourteenparen;2481 fourteenperiod;2495 fourthai;0E54 fourthtonechinese;02CB fparen;24A1 fraction;2044 franc;20A3 g;0067 gabengali;0997 gacute;01F5 gadeva;0917 gafarabic;06AF gaffinalarabic;FB93 gafinitialarabic;FB94 gafmedialarabic;FB95 gagujarati;0A97 gagurmukhi;0A17 gahiragana;304C gakatakana;30AC gamma;03B3 gammalatinsmall;0263 gammasuperior;02E0 gangiacoptic;03EB gbopomofo;310D gbreve;011F gcaron;01E7 gcedilla;0123 gcircle;24D6 gcircumflex;011D gcommaaccent;0123 gdot;0121 gdotaccent;0121 gecyrillic;0433 gehiragana;3052 gekatakana;30B2 geometricallyequal;2251 gereshaccenthebrew;059C gereshhebrew;05F3 gereshmuqdamhebrew;059D germandbls;00DF gershayimaccenthebrew;059E gershayimhebrew;05F4 getamark;3013 ghabengali;0998 ghadarmenian;0572 ghadeva;0918 ghagujarati;0A98 ghagurmukhi;0A18 ghainarabic;063A ghainfinalarabic;FECE ghaininitialarabic;FECF ghainmedialarabic;FED0 ghemiddlehookcyrillic;0495 ghestrokecyrillic;0493 gheupturncyrillic;0491 ghhadeva;095A ghhagurmukhi;0A5A ghook;0260 ghzsquare;3393 gihiragana;304E gikatakana;30AE gimarmenian;0563 gimel;05D2 gimeldagesh;FB32 gimeldageshhebrew;FB32 gimelhebrew;05D2 gjecyrillic;0453 glottalinvertedstroke;01BE glottalstop;0294 glottalstopinverted;0296 glottalstopmod;02C0 glottalstopreversed;0295 glottalstopreversedmod;02C1 glottalstopreversedsuperior;02E4 glottalstopstroke;02A1 glottalstopstrokereversed;02A2 gmacron;1E21 gmonospace;FF47 gohiragana;3054 gokatakana;30B4 gparen;24A2 gpasquare;33AC gradient;2207 grave;0060 gravebelowcmb;0316 gravecmb;0300 gravecomb;0300 gravedeva;0953 gravelowmod;02CE gravemonospace;FF40 gravetonecmb;0340 greater;003E greaterequal;2265 greaterequalorless;22DB greatermonospace;FF1E greaterorequivalent;2273 greaterorless;2277 greateroverequal;2267 greatersmall;FE65 gscript;0261 gstroke;01E5 guhiragana;3050 guillemotleft;00AB guillemotright;00BB guilsinglleft;2039 guilsinglright;203A gukatakana;30B0 guramusquare;3318 gysquare;33C9 h;0068 haabkhasiancyrillic;04A9 haaltonearabic;06C1 habengali;09B9 hadescendercyrillic;04B3 hadeva;0939 hagujarati;0AB9 hagurmukhi;0A39 haharabic;062D hahfinalarabic;FEA2 hahinitialarabic;FEA3 hahiragana;306F hahmedialarabic;FEA4 haitusquare;332A hakatakana;30CF hakatakanahalfwidth;FF8A halantgurmukhi;0A4D hamzaarabic;0621 hamzadammaarabic;0621 064F hamzadammatanarabic;0621 064C hamzafathaarabic;0621 064E hamzafathatanarabic;0621 064B hamzalowarabic;0621 hamzalowkasraarabic;0621 0650 hamzalowkasratanarabic;0621 064D hamzasukunarabic;0621 0652 hangulfiller;3164 hardsigncyrillic;044A harpoonleftbarbup;21BC harpoonrightbarbup;21C0 hasquare;33CA hatafpatah;05B2 hatafpatah16;05B2 hatafpatah23;05B2 hatafpatah2f;05B2 hatafpatahhebrew;05B2 hatafpatahnarrowhebrew;05B2 hatafpatahquarterhebrew;05B2 hatafpatahwidehebrew;05B2 hatafqamats;05B3 hatafqamats1b;05B3 hatafqamats28;05B3 hatafqamats34;05B3 hatafqamatshebrew;05B3 hatafqamatsnarrowhebrew;05B3 hatafqamatsquarterhebrew;05B3 hatafqamatswidehebrew;05B3 hatafsegol;05B1 hatafsegol17;05B1 hatafsegol24;05B1 hatafsegol30;05B1 hatafsegolhebrew;05B1 hatafsegolnarrowhebrew;05B1 hatafsegolquarterhebrew;05B1 hatafsegolwidehebrew;05B1 hbar;0127 hbopomofo;310F hbrevebelow;1E2B hcedilla;1E29 hcircle;24D7 hcircumflex;0125 hdieresis;1E27 hdotaccent;1E23 hdotbelow;1E25 he;05D4 heart;2665 heartsuitblack;2665 heartsuitwhite;2661 hedagesh;FB34 hedageshhebrew;FB34 hehaltonearabic;06C1 heharabic;0647 hehebrew;05D4 hehfinalaltonearabic;FBA7 hehfinalalttwoarabic;FEEA hehfinalarabic;FEEA hehhamzaabovefinalarabic;FBA5 hehhamzaaboveisolatedarabic;FBA4 hehinitialaltonearabic;FBA8 hehinitialarabic;FEEB hehiragana;3078 hehmedialaltonearabic;FBA9 hehmedialarabic;FEEC heiseierasquare;337B hekatakana;30D8 hekatakanahalfwidth;FF8D hekutaarusquare;3336 henghook;0267 herutusquare;3339 het;05D7 hethebrew;05D7 hhook;0266 hhooksuperior;02B1 hieuhacirclekorean;327B hieuhaparenkorean;321B hieuhcirclekorean;326D hieuhkorean;314E hieuhparenkorean;320D hihiragana;3072 hikatakana;30D2 hikatakanahalfwidth;FF8B hiriq;05B4 hiriq14;05B4 hiriq21;05B4 hiriq2d;05B4 hiriqhebrew;05B4 hiriqnarrowhebrew;05B4 hiriqquarterhebrew;05B4 hiriqwidehebrew;05B4 hlinebelow;1E96 hmonospace;FF48 hoarmenian;0570 hohipthai;0E2B hohiragana;307B hokatakana;30DB hokatakanahalfwidth;FF8E holam;05B9 holam19;05B9 holam26;05B9 holam32;05B9 holamhebrew;05B9 holamnarrowhebrew;05B9 holamquarterhebrew;05B9 holamwidehebrew;05B9 honokhukthai;0E2E hookabovecomb;0309 hookcmb;0309 hookpalatalizedbelowcmb;0321 hookretroflexbelowcmb;0322 hoonsquare;3342 horicoptic;03E9 horizontalbar;2015 horncmb;031B hotsprings;2668 house;2302 hparen;24A3 hsuperior;02B0 hturned;0265 huhiragana;3075 huiitosquare;3333 hukatakana;30D5 hukatakanahalfwidth;FF8C hungarumlaut;02DD hungarumlautcmb;030B hv;0195 hyphen;002D hypheninferior;F6E5 hyphenmonospace;FF0D hyphensmall;FE63 hyphensuperior;F6E6 hyphentwo;2010 i;0069 iacute;00ED iacyrillic;044F ibengali;0987 ibopomofo;3127 ibreve;012D icaron;01D0 icircle;24D8 icircumflex;00EE icyrillic;0456 idblgrave;0209 ideographearthcircle;328F ideographfirecircle;328B ideographicallianceparen;323F ideographiccallparen;323A ideographiccentrecircle;32A5 ideographicclose;3006 ideographiccomma;3001 ideographiccommaleft;FF64 ideographiccongratulationparen;3237 ideographiccorrectcircle;32A3 ideographicearthparen;322F ideographicenterpriseparen;323D ideographicexcellentcircle;329D ideographicfestivalparen;3240 ideographicfinancialcircle;3296 ideographicfinancialparen;3236 ideographicfireparen;322B ideographichaveparen;3232 ideographichighcircle;32A4 ideographiciterationmark;3005 ideographiclaborcircle;3298 ideographiclaborparen;3238 ideographicleftcircle;32A7 ideographiclowcircle;32A6 ideographicmedicinecircle;32A9 ideographicmetalparen;322E ideographicmoonparen;322A ideographicnameparen;3234 ideographicperiod;3002 ideographicprintcircle;329E ideographicreachparen;3243 ideographicrepresentparen;3239 ideographicresourceparen;323E ideographicrightcircle;32A8 ideographicsecretcircle;3299 ideographicselfparen;3242 ideographicsocietyparen;3233 ideographicspace;3000 ideographicspecialparen;3235 ideographicstockparen;3231 ideographicstudyparen;323B ideographicsunparen;3230 ideographicsuperviseparen;323C ideographicwaterparen;322C ideographicwoodparen;322D ideographiczero;3007 ideographmetalcircle;328E ideographmooncircle;328A ideographnamecircle;3294 ideographsuncircle;3290 ideographwatercircle;328C ideographwoodcircle;328D ideva;0907 idieresis;00EF idieresisacute;1E2F idieresiscyrillic;04E5 idotbelow;1ECB iebrevecyrillic;04D7 iecyrillic;0435 ieungacirclekorean;3275 ieungaparenkorean;3215 ieungcirclekorean;3267 ieungkorean;3147 ieungparenkorean;3207 igrave;00EC igujarati;0A87 igurmukhi;0A07 ihiragana;3044 ihookabove;1EC9 iibengali;0988 iicyrillic;0438 iideva;0908 iigujarati;0A88 iigurmukhi;0A08 iimatragurmukhi;0A40 iinvertedbreve;020B iishortcyrillic;0439 iivowelsignbengali;09C0 iivowelsigndeva;0940 iivowelsigngujarati;0AC0 ij;0133 ikatakana;30A4 ikatakanahalfwidth;FF72 ikorean;3163 ilde;02DC iluyhebrew;05AC imacron;012B imacroncyrillic;04E3 imageorapproximatelyequal;2253 imatragurmukhi;0A3F imonospace;FF49 increment;2206 infinity;221E iniarmenian;056B integral;222B integralbottom;2321 integralbt;2321 integralex;F8F5 integraltop;2320 integraltp;2320 intersection;2229 intisquare;3305 invbullet;25D8 invcircle;25D9 invsmileface;263B iocyrillic;0451 iogonek;012F iota;03B9 iotadieresis;03CA iotadieresistonos;0390 iotalatin;0269 iotatonos;03AF iparen;24A4 irigurmukhi;0A72 ismallhiragana;3043 ismallkatakana;30A3 ismallkatakanahalfwidth;FF68 issharbengali;09FA istroke;0268 isuperior;F6ED iterationhiragana;309D iterationkatakana;30FD itilde;0129 itildebelow;1E2D iubopomofo;3129 iucyrillic;044E ivowelsignbengali;09BF ivowelsigndeva;093F ivowelsigngujarati;0ABF izhitsacyrillic;0475 izhitsadblgravecyrillic;0477 j;006A jaarmenian;0571 jabengali;099C jadeva;091C jagujarati;0A9C jagurmukhi;0A1C jbopomofo;3110 jcaron;01F0 jcircle;24D9 jcircumflex;0135 jcrossedtail;029D jdotlessstroke;025F jecyrillic;0458 jeemarabic;062C jeemfinalarabic;FE9E jeeminitialarabic;FE9F jeemmedialarabic;FEA0 jeharabic;0698 jehfinalarabic;FB8B jhabengali;099D jhadeva;091D jhagujarati;0A9D jhagurmukhi;0A1D jheharmenian;057B jis;3004 jmonospace;FF4A jparen;24A5 jsuperior;02B2 k;006B kabashkircyrillic;04A1 kabengali;0995 kacute;1E31 kacyrillic;043A kadescendercyrillic;049B kadeva;0915 kaf;05DB kafarabic;0643 kafdagesh;FB3B kafdageshhebrew;FB3B kaffinalarabic;FEDA kafhebrew;05DB kafinitialarabic;FEDB kafmedialarabic;FEDC kafrafehebrew;FB4D kagujarati;0A95 kagurmukhi;0A15 kahiragana;304B kahookcyrillic;04C4 kakatakana;30AB kakatakanahalfwidth;FF76 kappa;03BA kappasymbolgreek;03F0 kapyeounmieumkorean;3171 kapyeounphieuphkorean;3184 kapyeounpieupkorean;3178 kapyeounssangpieupkorean;3179 karoriisquare;330D kashidaautoarabic;0640 kashidaautonosidebearingarabic;0640 kasmallkatakana;30F5 kasquare;3384 kasraarabic;0650 kasratanarabic;064D kastrokecyrillic;049F katahiraprolongmarkhalfwidth;FF70 kaverticalstrokecyrillic;049D kbopomofo;310E kcalsquare;3389 kcaron;01E9 kcedilla;0137 kcircle;24DA kcommaaccent;0137 kdotbelow;1E33 keharmenian;0584 kehiragana;3051 kekatakana;30B1 kekatakanahalfwidth;FF79 kenarmenian;056F kesmallkatakana;30F6 kgreenlandic;0138 khabengali;0996 khacyrillic;0445 khadeva;0916 khagujarati;0A96 khagurmukhi;0A16 khaharabic;062E khahfinalarabic;FEA6 khahinitialarabic;FEA7 khahmedialarabic;FEA8 kheicoptic;03E7 khhadeva;0959 khhagurmukhi;0A59 khieukhacirclekorean;3278 khieukhaparenkorean;3218 khieukhcirclekorean;326A khieukhkorean;314B khieukhparenkorean;320A khokhaithai;0E02 khokhonthai;0E05 khokhuatthai;0E03 khokhwaithai;0E04 khomutthai;0E5B khook;0199 khorakhangthai;0E06 khzsquare;3391 kihiragana;304D kikatakana;30AD kikatakanahalfwidth;FF77 kiroguramusquare;3315 kiromeetorusquare;3316 kirosquare;3314 kiyeokacirclekorean;326E kiyeokaparenkorean;320E kiyeokcirclekorean;3260 kiyeokkorean;3131 kiyeokparenkorean;3200 kiyeoksioskorean;3133 kjecyrillic;045C klinebelow;1E35 klsquare;3398 kmcubedsquare;33A6 kmonospace;FF4B kmsquaredsquare;33A2 kohiragana;3053 kohmsquare;33C0 kokaithai;0E01 kokatakana;30B3 kokatakanahalfwidth;FF7A kooposquare;331E koppacyrillic;0481 koreanstandardsymbol;327F koroniscmb;0343 kparen;24A6 kpasquare;33AA ksicyrillic;046F ktsquare;33CF kturned;029E kuhiragana;304F kukatakana;30AF kukatakanahalfwidth;FF78 kvsquare;33B8 kwsquare;33BE l;006C labengali;09B2 lacute;013A ladeva;0932 lagujarati;0AB2 lagurmukhi;0A32 lakkhangyaothai;0E45 lamaleffinalarabic;FEFC lamalefhamzaabovefinalarabic;FEF8 lamalefhamzaaboveisolatedarabic;FEF7 lamalefhamzabelowfinalarabic;FEFA lamalefhamzabelowisolatedarabic;FEF9 lamalefisolatedarabic;FEFB lamalefmaddaabovefinalarabic;FEF6 lamalefmaddaaboveisolatedarabic;FEF5 lamarabic;0644 lambda;03BB lambdastroke;019B lamed;05DC lameddagesh;FB3C lameddageshhebrew;FB3C lamedhebrew;05DC lamedholam;05DC 05B9 lamedholamdagesh;05DC 05B9 05BC lamedholamdageshhebrew;05DC 05B9 05BC lamedholamhebrew;05DC 05B9 lamfinalarabic;FEDE lamhahinitialarabic;FCCA laminitialarabic;FEDF lamjeeminitialarabic;FCC9 lamkhahinitialarabic;FCCB lamlamhehisolatedarabic;FDF2 lammedialarabic;FEE0 lammeemhahinitialarabic;FD88 lammeeminitialarabic;FCCC lammeemjeeminitialarabic;FEDF FEE4 FEA0 lammeemkhahinitialarabic;FEDF FEE4 FEA8 largecircle;25EF lbar;019A lbelt;026C lbopomofo;310C lcaron;013E lcedilla;013C lcircle;24DB lcircumflexbelow;1E3D lcommaaccent;013C ldot;0140 ldotaccent;0140 ldotbelow;1E37 ldotbelowmacron;1E39 leftangleabovecmb;031A lefttackbelowcmb;0318 less;003C lessequal;2264 lessequalorgreater;22DA lessmonospace;FF1C lessorequivalent;2272 lessorgreater;2276 lessoverequal;2266 lesssmall;FE64 lezh;026E lfblock;258C lhookretroflex;026D lira;20A4 liwnarmenian;056C lj;01C9 ljecyrillic;0459 ll;F6C0 lladeva;0933 llagujarati;0AB3 llinebelow;1E3B llladeva;0934 llvocalicbengali;09E1 llvocalicdeva;0961 llvocalicvowelsignbengali;09E3 llvocalicvowelsigndeva;0963 lmiddletilde;026B lmonospace;FF4C lmsquare;33D0 lochulathai;0E2C logicaland;2227 logicalnot;00AC logicalnotreversed;2310 logicalor;2228 lolingthai;0E25 longs;017F lowlinecenterline;FE4E lowlinecmb;0332 lowlinedashed;FE4D lozenge;25CA lparen;24A7 lslash;0142 lsquare;2113 lsuperior;F6EE ltshade;2591 luthai;0E26 lvocalicbengali;098C lvocalicdeva;090C lvocalicvowelsignbengali;09E2 lvocalicvowelsigndeva;0962 lxsquare;33D3 m;006D mabengali;09AE macron;00AF macronbelowcmb;0331 macroncmb;0304 macronlowmod;02CD macronmonospace;FFE3 macute;1E3F madeva;092E magujarati;0AAE magurmukhi;0A2E mahapakhhebrew;05A4 mahapakhlefthebrew;05A4 mahiragana;307E maichattawalowleftthai;F895 maichattawalowrightthai;F894 maichattawathai;0E4B maichattawaupperleftthai;F893 maieklowleftthai;F88C maieklowrightthai;F88B maiekthai;0E48 maiekupperleftthai;F88A maihanakatleftthai;F884 maihanakatthai;0E31 maitaikhuleftthai;F889 maitaikhuthai;0E47 maitholowleftthai;F88F maitholowrightthai;F88E maithothai;0E49 maithoupperleftthai;F88D maitrilowleftthai;F892 maitrilowrightthai;F891 maitrithai;0E4A maitriupperleftthai;F890 maiyamokthai;0E46 makatakana;30DE makatakanahalfwidth;FF8F male;2642 mansyonsquare;3347 maqafhebrew;05BE mars;2642 masoracirclehebrew;05AF masquare;3383 mbopomofo;3107 mbsquare;33D4 mcircle;24DC mcubedsquare;33A5 mdotaccent;1E41 mdotbelow;1E43 meemarabic;0645 meemfinalarabic;FEE2 meeminitialarabic;FEE3 meemmedialarabic;FEE4 meemmeeminitialarabic;FCD1 meemmeemisolatedarabic;FC48 meetorusquare;334D mehiragana;3081 meizierasquare;337E mekatakana;30E1 mekatakanahalfwidth;FF92 mem;05DE memdagesh;FB3E memdageshhebrew;FB3E memhebrew;05DE menarmenian;0574 merkhahebrew;05A5 merkhakefulahebrew;05A6 merkhakefulalefthebrew;05A6 merkhalefthebrew;05A5 mhook;0271 mhzsquare;3392 middledotkatakanahalfwidth;FF65 middot;00B7 mieumacirclekorean;3272 mieumaparenkorean;3212 mieumcirclekorean;3264 mieumkorean;3141 mieumpansioskorean;3170 mieumparenkorean;3204 mieumpieupkorean;316E mieumsioskorean;316F mihiragana;307F mikatakana;30DF mikatakanahalfwidth;FF90 minus;2212 minusbelowcmb;0320 minuscircle;2296 minusmod;02D7 minusplus;2213 minute;2032 miribaarusquare;334A mirisquare;3349 mlonglegturned;0270 mlsquare;3396 mmcubedsquare;33A3 mmonospace;FF4D mmsquaredsquare;339F mohiragana;3082 mohmsquare;33C1 mokatakana;30E2 mokatakanahalfwidth;FF93 molsquare;33D6 momathai;0E21 moverssquare;33A7 moverssquaredsquare;33A8 mparen;24A8 mpasquare;33AB mssquare;33B3 msuperior;F6EF mturned;026F mu;00B5 mu1;00B5 muasquare;3382 muchgreater;226B muchless;226A mufsquare;338C mugreek;03BC mugsquare;338D muhiragana;3080 mukatakana;30E0 mukatakanahalfwidth;FF91 mulsquare;3395 multiply;00D7 mumsquare;339B munahhebrew;05A3 munahlefthebrew;05A3 musicalnote;266A musicalnotedbl;266B musicflatsign;266D musicsharpsign;266F mussquare;33B2 muvsquare;33B6 muwsquare;33BC mvmegasquare;33B9 mvsquare;33B7 mwmegasquare;33BF mwsquare;33BD n;006E nabengali;09A8 nabla;2207 nacute;0144 nadeva;0928 nagujarati;0AA8 nagurmukhi;0A28 nahiragana;306A nakatakana;30CA nakatakanahalfwidth;FF85 napostrophe;0149 nasquare;3381 nbopomofo;310B nbspace;00A0 ncaron;0148 ncedilla;0146 ncircle;24DD ncircumflexbelow;1E4B ncommaaccent;0146 ndotaccent;1E45 ndotbelow;1E47 nehiragana;306D nekatakana;30CD nekatakanahalfwidth;FF88 newsheqelsign;20AA nfsquare;338B ngabengali;0999 ngadeva;0919 ngagujarati;0A99 ngagurmukhi;0A19 ngonguthai;0E07 nhiragana;3093 nhookleft;0272 nhookretroflex;0273 nieunacirclekorean;326F nieunaparenkorean;320F nieuncieuckorean;3135 nieuncirclekorean;3261 nieunhieuhkorean;3136 nieunkorean;3134 nieunpansioskorean;3168 nieunparenkorean;3201 nieunsioskorean;3167 nieuntikeutkorean;3166 nihiragana;306B nikatakana;30CB nikatakanahalfwidth;FF86 nikhahitleftthai;F899 nikhahitthai;0E4D nine;0039 ninearabic;0669 ninebengali;09EF ninecircle;2468 ninecircleinversesansserif;2792 ninedeva;096F ninegujarati;0AEF ninegurmukhi;0A6F ninehackarabic;0669 ninehangzhou;3029 nineideographicparen;3228 nineinferior;2089 ninemonospace;FF19 nineoldstyle;F739 nineparen;247C nineperiod;2490 ninepersian;06F9 nineroman;2178 ninesuperior;2079 nineteencircle;2472 nineteenparen;2486 nineteenperiod;249A ninethai;0E59 nj;01CC njecyrillic;045A nkatakana;30F3 nkatakanahalfwidth;FF9D nlegrightlong;019E nlinebelow;1E49 nmonospace;FF4E nmsquare;339A nnabengali;09A3 nnadeva;0923 nnagujarati;0AA3 nnagurmukhi;0A23 nnnadeva;0929 nohiragana;306E nokatakana;30CE nokatakanahalfwidth;FF89 nonbreakingspace;00A0 nonenthai;0E13 nonuthai;0E19 noonarabic;0646 noonfinalarabic;FEE6 noonghunnaarabic;06BA noonghunnafinalarabic;FB9F noonhehinitialarabic;FEE7 FEEC nooninitialarabic;FEE7 noonjeeminitialarabic;FCD2 noonjeemisolatedarabic;FC4B noonmedialarabic;FEE8 noonmeeminitialarabic;FCD5 noonmeemisolatedarabic;FC4E noonnoonfinalarabic;FC8D notcontains;220C notelement;2209 notelementof;2209 notequal;2260 notgreater;226F notgreaternorequal;2271 notgreaternorless;2279 notidentical;2262 notless;226E notlessnorequal;2270 notparallel;2226 notprecedes;2280 notsubset;2284 notsucceeds;2281 notsuperset;2285 nowarmenian;0576 nparen;24A9 nssquare;33B1 nsuperior;207F ntilde;00F1 nu;03BD nuhiragana;306C nukatakana;30CC nukatakanahalfwidth;FF87 nuktabengali;09BC nuktadeva;093C nuktagujarati;0ABC nuktagurmukhi;0A3C numbersign;0023 numbersignmonospace;FF03 numbersignsmall;FE5F numeralsigngreek;0374 numeralsignlowergreek;0375 numero;2116 nun;05E0 nundagesh;FB40 nundageshhebrew;FB40 nunhebrew;05E0 nvsquare;33B5 nwsquare;33BB nyabengali;099E nyadeva;091E nyagujarati;0A9E nyagurmukhi;0A1E o;006F oacute;00F3 oangthai;0E2D obarred;0275 obarredcyrillic;04E9 obarreddieresiscyrillic;04EB obengali;0993 obopomofo;311B obreve;014F ocandradeva;0911 ocandragujarati;0A91 ocandravowelsigndeva;0949 ocandravowelsigngujarati;0AC9 ocaron;01D2 ocircle;24DE ocircumflex;00F4 ocircumflexacute;1ED1 ocircumflexdotbelow;1ED9 ocircumflexgrave;1ED3 ocircumflexhookabove;1ED5 ocircumflextilde;1ED7 ocyrillic;043E odblacute;0151 odblgrave;020D odeva;0913 odieresis;00F6 odieresiscyrillic;04E7 odotbelow;1ECD oe;0153 oekorean;315A ogonek;02DB ogonekcmb;0328 ograve;00F2 ogujarati;0A93 oharmenian;0585 ohiragana;304A ohookabove;1ECF ohorn;01A1 ohornacute;1EDB ohorndotbelow;1EE3 ohorngrave;1EDD ohornhookabove;1EDF ohorntilde;1EE1 ohungarumlaut;0151 oi;01A3 oinvertedbreve;020F okatakana;30AA okatakanahalfwidth;FF75 okorean;3157 olehebrew;05AB omacron;014D omacronacute;1E53 omacrongrave;1E51 omdeva;0950 omega;03C9 omega1;03D6 omegacyrillic;0461 omegalatinclosed;0277 omegaroundcyrillic;047B omegatitlocyrillic;047D omegatonos;03CE omgujarati;0AD0 omicron;03BF omicrontonos;03CC omonospace;FF4F one;0031 onearabic;0661 onebengali;09E7 onecircle;2460 onecircleinversesansserif;278A onedeva;0967 onedotenleader;2024 oneeighth;215B onefitted;F6DC onegujarati;0AE7 onegurmukhi;0A67 onehackarabic;0661 onehalf;00BD onehangzhou;3021 oneideographicparen;3220 oneinferior;2081 onemonospace;FF11 onenumeratorbengali;09F4 oneoldstyle;F731 oneparen;2474 oneperiod;2488 onepersian;06F1 onequarter;00BC oneroman;2170 onesuperior;00B9 onethai;0E51 onethird;2153 oogonek;01EB oogonekmacron;01ED oogurmukhi;0A13 oomatragurmukhi;0A4B oopen;0254 oparen;24AA openbullet;25E6 option;2325 ordfeminine;00AA ordmasculine;00BA orthogonal;221F oshortdeva;0912 oshortvowelsigndeva;094A oslash;00F8 oslashacute;01FF osmallhiragana;3049 osmallkatakana;30A9 osmallkatakanahalfwidth;FF6B ostrokeacute;01FF osuperior;F6F0 otcyrillic;047F otilde;00F5 otildeacute;1E4D otildedieresis;1E4F oubopomofo;3121 overline;203E overlinecenterline;FE4A overlinecmb;0305 overlinedashed;FE49 overlinedblwavy;FE4C overlinewavy;FE4B overscore;00AF ovowelsignbengali;09CB ovowelsigndeva;094B ovowelsigngujarati;0ACB p;0070 paampssquare;3380 paasentosquare;332B pabengali;09AA pacute;1E55 padeva;092A pagedown;21DF pageup;21DE pagujarati;0AAA pagurmukhi;0A2A pahiragana;3071 paiyannoithai;0E2F pakatakana;30D1 palatalizationcyrilliccmb;0484 palochkacyrillic;04C0 pansioskorean;317F paragraph;00B6 parallel;2225 parenleft;0028 parenleftaltonearabic;FD3E parenleftbt;F8ED parenleftex;F8EC parenleftinferior;208D parenleftmonospace;FF08 parenleftsmall;FE59 parenleftsuperior;207D parenlefttp;F8EB parenleftvertical;FE35 parenright;0029 parenrightaltonearabic;FD3F parenrightbt;F8F8 parenrightex;F8F7 parenrightinferior;208E parenrightmonospace;FF09 parenrightsmall;FE5A parenrightsuperior;207E parenrighttp;F8F6 parenrightvertical;FE36 partialdiff;2202 paseqhebrew;05C0 pashtahebrew;0599 pasquare;33A9 patah;05B7 patah11;05B7 patah1d;05B7 patah2a;05B7 patahhebrew;05B7 patahnarrowhebrew;05B7 patahquarterhebrew;05B7 patahwidehebrew;05B7 pazerhebrew;05A1 pbopomofo;3106 pcircle;24DF pdotaccent;1E57 pe;05E4 pecyrillic;043F pedagesh;FB44 pedageshhebrew;FB44 peezisquare;333B pefinaldageshhebrew;FB43 peharabic;067E peharmenian;057A pehebrew;05E4 pehfinalarabic;FB57 pehinitialarabic;FB58 pehiragana;307A pehmedialarabic;FB59 pekatakana;30DA pemiddlehookcyrillic;04A7 perafehebrew;FB4E percent;0025 percentarabic;066A percentmonospace;FF05 percentsmall;FE6A period;002E periodarmenian;0589 periodcentered;00B7 periodhalfwidth;FF61 periodinferior;F6E7 periodmonospace;FF0E periodsmall;FE52 periodsuperior;F6E8 perispomenigreekcmb;0342 perpendicular;22A5 perthousand;2030 peseta;20A7 pfsquare;338A phabengali;09AB phadeva;092B phagujarati;0AAB phagurmukhi;0A2B phi;03C6 phi1;03D5 phieuphacirclekorean;327A phieuphaparenkorean;321A phieuphcirclekorean;326C phieuphkorean;314D phieuphparenkorean;320C philatin;0278 phinthuthai;0E3A phisymbolgreek;03D5 phook;01A5 phophanthai;0E1E phophungthai;0E1C phosamphaothai;0E20 pi;03C0 pieupacirclekorean;3273 pieupaparenkorean;3213 pieupcieuckorean;3176 pieupcirclekorean;3265 pieupkiyeokkorean;3172 pieupkorean;3142 pieupparenkorean;3205 pieupsioskiyeokkorean;3174 pieupsioskorean;3144 pieupsiostikeutkorean;3175 pieupthieuthkorean;3177 pieuptikeutkorean;3173 pihiragana;3074 pikatakana;30D4 pisymbolgreek;03D6 piwrarmenian;0583 plus;002B plusbelowcmb;031F pluscircle;2295 plusminus;00B1 plusmod;02D6 plusmonospace;FF0B plussmall;FE62 plussuperior;207A pmonospace;FF50 pmsquare;33D8 pohiragana;307D pointingindexdownwhite;261F pointingindexleftwhite;261C pointingindexrightwhite;261E pointingindexupwhite;261D pokatakana;30DD poplathai;0E1B postalmark;3012 postalmarkface;3020 pparen;24AB precedes;227A prescription;211E primemod;02B9 primereversed;2035 product;220F projective;2305 prolongedkana;30FC propellor;2318 propersubset;2282 propersuperset;2283 proportion;2237 proportional;221D psi;03C8 psicyrillic;0471 psilipneumatacyrilliccmb;0486 pssquare;33B0 puhiragana;3077 pukatakana;30D7 pvsquare;33B4 pwsquare;33BA q;0071 qadeva;0958 qadmahebrew;05A8 qafarabic;0642 qaffinalarabic;FED6 qafinitialarabic;FED7 qafmedialarabic;FED8 qamats;05B8 qamats10;05B8 qamats1a;05B8 qamats1c;05B8 qamats27;05B8 qamats29;05B8 qamats33;05B8 qamatsde;05B8 qamatshebrew;05B8 qamatsnarrowhebrew;05B8 qamatsqatanhebrew;05B8 qamatsqatannarrowhebrew;05B8 qamatsqatanquarterhebrew;05B8 qamatsqatanwidehebrew;05B8 qamatsquarterhebrew;05B8 qamatswidehebrew;05B8 qarneyparahebrew;059F qbopomofo;3111 qcircle;24E0 qhook;02A0 qmonospace;FF51 qof;05E7 qofdagesh;FB47 qofdageshhebrew;FB47 qofhatafpatah;05E7 05B2 qofhatafpatahhebrew;05E7 05B2 qofhatafsegol;05E7 05B1 qofhatafsegolhebrew;05E7 05B1 qofhebrew;05E7 qofhiriq;05E7 05B4 qofhiriqhebrew;05E7 05B4 qofholam;05E7 05B9 qofholamhebrew;05E7 05B9 qofpatah;05E7 05B7 qofpatahhebrew;05E7 05B7 qofqamats;05E7 05B8 qofqamatshebrew;05E7 05B8 qofqubuts;05E7 05BB qofqubutshebrew;05E7 05BB qofsegol;05E7 05B6 qofsegolhebrew;05E7 05B6 qofsheva;05E7 05B0 qofshevahebrew;05E7 05B0 qoftsere;05E7 05B5 qoftserehebrew;05E7 05B5 qparen;24AC quarternote;2669 qubuts;05BB qubuts18;05BB qubuts25;05BB qubuts31;05BB qubutshebrew;05BB qubutsnarrowhebrew;05BB qubutsquarterhebrew;05BB qubutswidehebrew;05BB question;003F questionarabic;061F questionarmenian;055E questiondown;00BF questiondownsmall;F7BF questiongreek;037E questionmonospace;FF1F questionsmall;F73F quotedbl;0022 quotedblbase;201E quotedblleft;201C quotedblmonospace;FF02 quotedblprime;301E quotedblprimereversed;301D quotedblright;201D quoteleft;2018 quoteleftreversed;201B quotereversed;201B quoteright;2019 quoterightn;0149 quotesinglbase;201A quotesingle;0027 quotesinglemonospace;FF07 r;0072 raarmenian;057C rabengali;09B0 racute;0155 radeva;0930 radical;221A radicalex;F8E5 radoverssquare;33AE radoverssquaredsquare;33AF radsquare;33AD rafe;05BF rafehebrew;05BF ragujarati;0AB0 ragurmukhi;0A30 rahiragana;3089 rakatakana;30E9 rakatakanahalfwidth;FF97 ralowerdiagonalbengali;09F1 ramiddlediagonalbengali;09F0 ramshorn;0264 ratio;2236 rbopomofo;3116 rcaron;0159 rcedilla;0157 rcircle;24E1 rcommaaccent;0157 rdblgrave;0211 rdotaccent;1E59 rdotbelow;1E5B rdotbelowmacron;1E5D referencemark;203B reflexsubset;2286 reflexsuperset;2287 registered;00AE registersans;F8E8 registerserif;F6DA reharabic;0631 reharmenian;0580 rehfinalarabic;FEAE rehiragana;308C rehyehaleflamarabic;0631 FEF3 FE8E 0644 rekatakana;30EC rekatakanahalfwidth;FF9A resh;05E8 reshdageshhebrew;FB48 reshhatafpatah;05E8 05B2 reshhatafpatahhebrew;05E8 05B2 reshhatafsegol;05E8 05B1 reshhatafsegolhebrew;05E8 05B1 reshhebrew;05E8 reshhiriq;05E8 05B4 reshhiriqhebrew;05E8 05B4 reshholam;05E8 05B9 reshholamhebrew;05E8 05B9 reshpatah;05E8 05B7 reshpatahhebrew;05E8 05B7 reshqamats;05E8 05B8 reshqamatshebrew;05E8 05B8 reshqubuts;05E8 05BB reshqubutshebrew;05E8 05BB reshsegol;05E8 05B6 reshsegolhebrew;05E8 05B6 reshsheva;05E8 05B0 reshshevahebrew;05E8 05B0 reshtsere;05E8 05B5 reshtserehebrew;05E8 05B5 reversedtilde;223D reviahebrew;0597 reviamugrashhebrew;0597 revlogicalnot;2310 rfishhook;027E rfishhookreversed;027F rhabengali;09DD rhadeva;095D rho;03C1 rhook;027D rhookturned;027B rhookturnedsuperior;02B5 rhosymbolgreek;03F1 rhotichookmod;02DE rieulacirclekorean;3271 rieulaparenkorean;3211 rieulcirclekorean;3263 rieulhieuhkorean;3140 rieulkiyeokkorean;313A rieulkiyeoksioskorean;3169 rieulkorean;3139 rieulmieumkorean;313B rieulpansioskorean;316C rieulparenkorean;3203 rieulphieuphkorean;313F rieulpieupkorean;313C rieulpieupsioskorean;316B rieulsioskorean;313D rieulthieuthkorean;313E rieultikeutkorean;316A rieulyeorinhieuhkorean;316D rightangle;221F righttackbelowcmb;0319 righttriangle;22BF rihiragana;308A rikatakana;30EA rikatakanahalfwidth;FF98 ring;02DA ringbelowcmb;0325 ringcmb;030A ringhalfleft;02BF ringhalfleftarmenian;0559 ringhalfleftbelowcmb;031C ringhalfleftcentered;02D3 ringhalfright;02BE ringhalfrightbelowcmb;0339 ringhalfrightcentered;02D2 rinvertedbreve;0213 rittorusquare;3351 rlinebelow;1E5F rlongleg;027C rlonglegturned;027A rmonospace;FF52 rohiragana;308D rokatakana;30ED rokatakanahalfwidth;FF9B roruathai;0E23 rparen;24AD rrabengali;09DC rradeva;0931 rragurmukhi;0A5C rreharabic;0691 rrehfinalarabic;FB8D rrvocalicbengali;09E0 rrvocalicdeva;0960 rrvocalicgujarati;0AE0 rrvocalicvowelsignbengali;09C4 rrvocalicvowelsigndeva;0944 rrvocalicvowelsigngujarati;0AC4 rsuperior;F6F1 rtblock;2590 rturned;0279 rturnedsuperior;02B4 ruhiragana;308B rukatakana;30EB rukatakanahalfwidth;FF99 rupeemarkbengali;09F2 rupeesignbengali;09F3 rupiah;F6DD ruthai;0E24 rvocalicbengali;098B rvocalicdeva;090B rvocalicgujarati;0A8B rvocalicvowelsignbengali;09C3 rvocalicvowelsigndeva;0943 rvocalicvowelsigngujarati;0AC3 s;0073 sabengali;09B8 sacute;015B sacutedotaccent;1E65 sadarabic;0635 sadeva;0938 sadfinalarabic;FEBA sadinitialarabic;FEBB sadmedialarabic;FEBC sagujarati;0AB8 sagurmukhi;0A38 sahiragana;3055 sakatakana;30B5 sakatakanahalfwidth;FF7B sallallahoualayhewasallamarabic;FDFA samekh;05E1 samekhdagesh;FB41 samekhdageshhebrew;FB41 samekhhebrew;05E1 saraaathai;0E32 saraaethai;0E41 saraaimaimalaithai;0E44 saraaimaimuanthai;0E43 saraamthai;0E33 saraathai;0E30 saraethai;0E40 saraiileftthai;F886 saraiithai;0E35 saraileftthai;F885 saraithai;0E34 saraothai;0E42 saraueeleftthai;F888 saraueethai;0E37 saraueleftthai;F887 sarauethai;0E36 sarauthai;0E38 sarauuthai;0E39 sbopomofo;3119 scaron;0161 scarondotaccent;1E67 scedilla;015F schwa;0259 schwacyrillic;04D9 schwadieresiscyrillic;04DB schwahook;025A scircle;24E2 scircumflex;015D scommaaccent;0219 sdotaccent;1E61 sdotbelow;1E63 sdotbelowdotaccent;1E69 seagullbelowcmb;033C second;2033 secondtonechinese;02CA section;00A7 seenarabic;0633 seenfinalarabic;FEB2 seeninitialarabic;FEB3 seenmedialarabic;FEB4 segol;05B6 segol13;05B6 segol1f;05B6 segol2c;05B6 segolhebrew;05B6 segolnarrowhebrew;05B6 segolquarterhebrew;05B6 segoltahebrew;0592 segolwidehebrew;05B6 seharmenian;057D sehiragana;305B sekatakana;30BB sekatakanahalfwidth;FF7E semicolon;003B semicolonarabic;061B semicolonmonospace;FF1B semicolonsmall;FE54 semivoicedmarkkana;309C semivoicedmarkkanahalfwidth;FF9F sentisquare;3322 sentosquare;3323 seven;0037 sevenarabic;0667 sevenbengali;09ED sevencircle;2466 sevencircleinversesansserif;2790 sevendeva;096D seveneighths;215E sevengujarati;0AED sevengurmukhi;0A6D sevenhackarabic;0667 sevenhangzhou;3027 sevenideographicparen;3226 seveninferior;2087 sevenmonospace;FF17 sevenoldstyle;F737 sevenparen;247A sevenperiod;248E sevenpersian;06F7 sevenroman;2176 sevensuperior;2077 seventeencircle;2470 seventeenparen;2484 seventeenperiod;2498 seventhai;0E57 sfthyphen;00AD shaarmenian;0577 shabengali;09B6 shacyrillic;0448 shaddaarabic;0651 shaddadammaarabic;FC61 shaddadammatanarabic;FC5E shaddafathaarabic;FC60 shaddafathatanarabic;0651 064B shaddakasraarabic;FC62 shaddakasratanarabic;FC5F shade;2592 shadedark;2593 shadelight;2591 shademedium;2592 shadeva;0936 shagujarati;0AB6 shagurmukhi;0A36 shalshelethebrew;0593 shbopomofo;3115 shchacyrillic;0449 sheenarabic;0634 sheenfinalarabic;FEB6 sheeninitialarabic;FEB7 sheenmedialarabic;FEB8 sheicoptic;03E3 sheqel;20AA sheqelhebrew;20AA sheva;05B0 sheva115;05B0 sheva15;05B0 sheva22;05B0 sheva2e;05B0 shevahebrew;05B0 shevanarrowhebrew;05B0 shevaquarterhebrew;05B0 shevawidehebrew;05B0 shhacyrillic;04BB shimacoptic;03ED shin;05E9 shindagesh;FB49 shindageshhebrew;FB49 shindageshshindot;FB2C shindageshshindothebrew;FB2C shindageshsindot;FB2D shindageshsindothebrew;FB2D shindothebrew;05C1 shinhebrew;05E9 shinshindot;FB2A shinshindothebrew;FB2A shinsindot;FB2B shinsindothebrew;FB2B shook;0282 sigma;03C3 sigma1;03C2 sigmafinal;03C2 sigmalunatesymbolgreek;03F2 sihiragana;3057 sikatakana;30B7 sikatakanahalfwidth;FF7C siluqhebrew;05BD siluqlefthebrew;05BD similar;223C sindothebrew;05C2 siosacirclekorean;3274 siosaparenkorean;3214 sioscieuckorean;317E sioscirclekorean;3266 sioskiyeokkorean;317A sioskorean;3145 siosnieunkorean;317B siosparenkorean;3206 siospieupkorean;317D siostikeutkorean;317C six;0036 sixarabic;0666 sixbengali;09EC sixcircle;2465 sixcircleinversesansserif;278F sixdeva;096C sixgujarati;0AEC sixgurmukhi;0A6C sixhackarabic;0666 sixhangzhou;3026 sixideographicparen;3225 sixinferior;2086 sixmonospace;FF16 sixoldstyle;F736 sixparen;2479 sixperiod;248D sixpersian;06F6 sixroman;2175 sixsuperior;2076 sixteencircle;246F sixteencurrencydenominatorbengali;09F9 sixteenparen;2483 sixteenperiod;2497 sixthai;0E56 slash;002F slashmonospace;FF0F slong;017F slongdotaccent;1E9B smileface;263A smonospace;FF53 sofpasuqhebrew;05C3 softhyphen;00AD softsigncyrillic;044C sohiragana;305D sokatakana;30BD sokatakanahalfwidth;FF7F soliduslongoverlaycmb;0338 solidusshortoverlaycmb;0337 sorusithai;0E29 sosalathai;0E28 sosothai;0E0B sosuathai;0E2A space;0020 spacehackarabic;0020 spade;2660 spadesuitblack;2660 spadesuitwhite;2664 sparen;24AE squarebelowcmb;033B squarecc;33C4 squarecm;339D squarediagonalcrosshatchfill;25A9 squarehorizontalfill;25A4 squarekg;338F squarekm;339E squarekmcapital;33CE squareln;33D1 squarelog;33D2 squaremg;338E squaremil;33D5 squaremm;339C squaremsquared;33A1 squareorthogonalcrosshatchfill;25A6 squareupperlefttolowerrightfill;25A7 squareupperrighttolowerleftfill;25A8 squareverticalfill;25A5 squarewhitewithsmallblack;25A3 srsquare;33DB ssabengali;09B7 ssadeva;0937 ssagujarati;0AB7 ssangcieuckorean;3149 ssanghieuhkorean;3185 ssangieungkorean;3180 ssangkiyeokkorean;3132 ssangnieunkorean;3165 ssangpieupkorean;3143 ssangsioskorean;3146 ssangtikeutkorean;3138 ssuperior;F6F2 sterling;00A3 sterlingmonospace;FFE1 strokelongoverlaycmb;0336 strokeshortoverlaycmb;0335 subset;2282 subsetnotequal;228A subsetorequal;2286 succeeds;227B suchthat;220B suhiragana;3059 sukatakana;30B9 sukatakanahalfwidth;FF7D sukunarabic;0652 summation;2211 sun;263C superset;2283 supersetnotequal;228B supersetorequal;2287 svsquare;33DC syouwaerasquare;337C t;0074 tabengali;09A4 tackdown;22A4 tackleft;22A3 tadeva;0924 tagujarati;0AA4 tagurmukhi;0A24 taharabic;0637 tahfinalarabic;FEC2 tahinitialarabic;FEC3 tahiragana;305F tahmedialarabic;FEC4 taisyouerasquare;337D takatakana;30BF takatakanahalfwidth;FF80 tatweelarabic;0640 tau;03C4 tav;05EA tavdages;FB4A tavdagesh;FB4A tavdageshhebrew;FB4A tavhebrew;05EA tbar;0167 tbopomofo;310A tcaron;0165 tccurl;02A8 tcedilla;0163 tcheharabic;0686 tchehfinalarabic;FB7B tchehinitialarabic;FB7C tchehmedialarabic;FB7D tchehmeeminitialarabic;FB7C FEE4 tcircle;24E3 tcircumflexbelow;1E71 tcommaaccent;0163 tdieresis;1E97 tdotaccent;1E6B tdotbelow;1E6D tecyrillic;0442 tedescendercyrillic;04AD teharabic;062A tehfinalarabic;FE96 tehhahinitialarabic;FCA2 tehhahisolatedarabic;FC0C tehinitialarabic;FE97 tehiragana;3066 tehjeeminitialarabic;FCA1 tehjeemisolatedarabic;FC0B tehmarbutaarabic;0629 tehmarbutafinalarabic;FE94 tehmedialarabic;FE98 tehmeeminitialarabic;FCA4 tehmeemisolatedarabic;FC0E tehnoonfinalarabic;FC73 tekatakana;30C6 tekatakanahalfwidth;FF83 telephone;2121 telephoneblack;260E telishagedolahebrew;05A0 telishaqetanahebrew;05A9 tencircle;2469 tenideographicparen;3229 tenparen;247D tenperiod;2491 tenroman;2179 tesh;02A7 tet;05D8 tetdagesh;FB38 tetdageshhebrew;FB38 tethebrew;05D8 tetsecyrillic;04B5 tevirhebrew;059B tevirlefthebrew;059B thabengali;09A5 thadeva;0925 thagujarati;0AA5 thagurmukhi;0A25 thalarabic;0630 thalfinalarabic;FEAC thanthakhatlowleftthai;F898 thanthakhatlowrightthai;F897 thanthakhatthai;0E4C thanthakhatupperleftthai;F896 theharabic;062B thehfinalarabic;FE9A thehinitialarabic;FE9B thehmedialarabic;FE9C thereexists;2203 therefore;2234 theta;03B8 theta1;03D1 thetasymbolgreek;03D1 thieuthacirclekorean;3279 thieuthaparenkorean;3219 thieuthcirclekorean;326B thieuthkorean;314C thieuthparenkorean;320B thirteencircle;246C thirteenparen;2480 thirteenperiod;2494 thonangmonthothai;0E11 thook;01AD thophuthaothai;0E12 thorn;00FE thothahanthai;0E17 thothanthai;0E10 thothongthai;0E18 thothungthai;0E16 thousandcyrillic;0482 thousandsseparatorarabic;066C thousandsseparatorpersian;066C three;0033 threearabic;0663 threebengali;09E9 threecircle;2462 threecircleinversesansserif;278C threedeva;0969 threeeighths;215C threegujarati;0AE9 threegurmukhi;0A69 threehackarabic;0663 threehangzhou;3023 threeideographicparen;3222 threeinferior;2083 threemonospace;FF13 threenumeratorbengali;09F6 threeoldstyle;F733 threeparen;2476 threeperiod;248A threepersian;06F3 threequarters;00BE threequartersemdash;F6DE threeroman;2172 threesuperior;00B3 threethai;0E53 thzsquare;3394 tihiragana;3061 tikatakana;30C1 tikatakanahalfwidth;FF81 tikeutacirclekorean;3270 tikeutaparenkorean;3210 tikeutcirclekorean;3262 tikeutkorean;3137 tikeutparenkorean;3202 tilde;02DC tildebelowcmb;0330 tildecmb;0303 tildecomb;0303 tildedoublecmb;0360 tildeoperator;223C tildeoverlaycmb;0334 tildeverticalcmb;033E timescircle;2297 tipehahebrew;0596 tipehalefthebrew;0596 tippigurmukhi;0A70 titlocyrilliccmb;0483 tiwnarmenian;057F tlinebelow;1E6F tmonospace;FF54 toarmenian;0569 tohiragana;3068 tokatakana;30C8 tokatakanahalfwidth;FF84 tonebarextrahighmod;02E5 tonebarextralowmod;02E9 tonebarhighmod;02E6 tonebarlowmod;02E8 tonebarmidmod;02E7 tonefive;01BD tonesix;0185 tonetwo;01A8 tonos;0384 tonsquare;3327 topatakthai;0E0F tortoiseshellbracketleft;3014 tortoiseshellbracketleftsmall;FE5D tortoiseshellbracketleftvertical;FE39 tortoiseshellbracketright;3015 tortoiseshellbracketrightsmall;FE5E tortoiseshellbracketrightvertical;FE3A totaothai;0E15 tpalatalhook;01AB tparen;24AF trademark;2122 trademarksans;F8EA trademarkserif;F6DB tretroflexhook;0288 triagdn;25BC triaglf;25C4 triagrt;25BA triagup;25B2 ts;02A6 tsadi;05E6 tsadidagesh;FB46 tsadidageshhebrew;FB46 tsadihebrew;05E6 tsecyrillic;0446 tsere;05B5 tsere12;05B5 tsere1e;05B5 tsere2b;05B5 tserehebrew;05B5 tserenarrowhebrew;05B5 tserequarterhebrew;05B5 tserewidehebrew;05B5 tshecyrillic;045B tsuperior;F6F3 ttabengali;099F ttadeva;091F ttagujarati;0A9F ttagurmukhi;0A1F tteharabic;0679 ttehfinalarabic;FB67 ttehinitialarabic;FB68 ttehmedialarabic;FB69 tthabengali;09A0 tthadeva;0920 tthagujarati;0AA0 tthagurmukhi;0A20 tturned;0287 tuhiragana;3064 tukatakana;30C4 tukatakanahalfwidth;FF82 tusmallhiragana;3063 tusmallkatakana;30C3 tusmallkatakanahalfwidth;FF6F twelvecircle;246B twelveparen;247F twelveperiod;2493 twelveroman;217B twentycircle;2473 twentyhangzhou;5344 twentyparen;2487 twentyperiod;249B two;0032 twoarabic;0662 twobengali;09E8 twocircle;2461 twocircleinversesansserif;278B twodeva;0968 twodotenleader;2025 twodotleader;2025 twodotleadervertical;FE30 twogujarati;0AE8 twogurmukhi;0A68 twohackarabic;0662 twohangzhou;3022 twoideographicparen;3221 twoinferior;2082 twomonospace;FF12 twonumeratorbengali;09F5 twooldstyle;F732 twoparen;2475 twoperiod;2489 twopersian;06F2 tworoman;2171 twostroke;01BB twosuperior;00B2 twothai;0E52 twothirds;2154 u;0075 uacute;00FA ubar;0289 ubengali;0989 ubopomofo;3128 ubreve;016D ucaron;01D4 ucircle;24E4 ucircumflex;00FB ucircumflexbelow;1E77 ucyrillic;0443 udattadeva;0951 udblacute;0171 udblgrave;0215 udeva;0909 udieresis;00FC udieresisacute;01D8 udieresisbelow;1E73 udieresiscaron;01DA udieresiscyrillic;04F1 udieresisgrave;01DC udieresismacron;01D6 udotbelow;1EE5 ugrave;00F9 ugujarati;0A89 ugurmukhi;0A09 uhiragana;3046 uhookabove;1EE7 uhorn;01B0 uhornacute;1EE9 uhorndotbelow;1EF1 uhorngrave;1EEB uhornhookabove;1EED uhorntilde;1EEF uhungarumlaut;0171 uhungarumlautcyrillic;04F3 uinvertedbreve;0217 ukatakana;30A6 ukatakanahalfwidth;FF73 ukcyrillic;0479 ukorean;315C umacron;016B umacroncyrillic;04EF umacrondieresis;1E7B umatragurmukhi;0A41 umonospace;FF55 underscore;005F underscoredbl;2017 underscoremonospace;FF3F underscorevertical;FE33 underscorewavy;FE4F union;222A universal;2200 uogonek;0173 uparen;24B0 upblock;2580 upperdothebrew;05C4 upsilon;03C5 upsilondieresis;03CB upsilondieresistonos;03B0 upsilonlatin;028A upsilontonos;03CD uptackbelowcmb;031D uptackmod;02D4 uragurmukhi;0A73 uring;016F ushortcyrillic;045E usmallhiragana;3045 usmallkatakana;30A5 usmallkatakanahalfwidth;FF69 ustraightcyrillic;04AF ustraightstrokecyrillic;04B1 utilde;0169 utildeacute;1E79 utildebelow;1E75 uubengali;098A uudeva;090A uugujarati;0A8A uugurmukhi;0A0A uumatragurmukhi;0A42 uuvowelsignbengali;09C2 uuvowelsigndeva;0942 uuvowelsigngujarati;0AC2 uvowelsignbengali;09C1 uvowelsigndeva;0941 uvowelsigngujarati;0AC1 v;0076 vadeva;0935 vagujarati;0AB5 vagurmukhi;0A35 vakatakana;30F7 vav;05D5 vavdagesh;FB35 vavdagesh65;FB35 vavdageshhebrew;FB35 vavhebrew;05D5 vavholam;FB4B vavholamhebrew;FB4B vavvavhebrew;05F0 vavyodhebrew;05F1 vcircle;24E5 vdotbelow;1E7F vecyrillic;0432 veharabic;06A4 vehfinalarabic;FB6B vehinitialarabic;FB6C vehmedialarabic;FB6D vekatakana;30F9 venus;2640 verticalbar;007C verticallineabovecmb;030D verticallinebelowcmb;0329 verticallinelowmod;02CC verticallinemod;02C8 vewarmenian;057E vhook;028B vikatakana;30F8 viramabengali;09CD viramadeva;094D viramagujarati;0ACD visargabengali;0983 visargadeva;0903 visargagujarati;0A83 vmonospace;FF56 voarmenian;0578 voicediterationhiragana;309E voicediterationkatakana;30FE voicedmarkkana;309B voicedmarkkanahalfwidth;FF9E vokatakana;30FA vparen;24B1 vtilde;1E7D vturned;028C vuhiragana;3094 vukatakana;30F4 w;0077 wacute;1E83 waekorean;3159 wahiragana;308F wakatakana;30EF wakatakanahalfwidth;FF9C wakorean;3158 wasmallhiragana;308E wasmallkatakana;30EE wattosquare;3357 wavedash;301C wavyunderscorevertical;FE34 wawarabic;0648 wawfinalarabic;FEEE wawhamzaabovearabic;0624 wawhamzaabovefinalarabic;FE86 wbsquare;33DD wcircle;24E6 wcircumflex;0175 wdieresis;1E85 wdotaccent;1E87 wdotbelow;1E89 wehiragana;3091 weierstrass;2118 wekatakana;30F1 wekorean;315E weokorean;315D wgrave;1E81 whitebullet;25E6 whitecircle;25CB whitecircleinverse;25D9 whitecornerbracketleft;300E whitecornerbracketleftvertical;FE43 whitecornerbracketright;300F whitecornerbracketrightvertical;FE44 whitediamond;25C7 whitediamondcontainingblacksmalldiamond;25C8 whitedownpointingsmalltriangle;25BF whitedownpointingtriangle;25BD whiteleftpointingsmalltriangle;25C3 whiteleftpointingtriangle;25C1 whitelenticularbracketleft;3016 whitelenticularbracketright;3017 whiterightpointingsmalltriangle;25B9 whiterightpointingtriangle;25B7 whitesmallsquare;25AB whitesmilingface;263A whitesquare;25A1 whitestar;2606 whitetelephone;260F whitetortoiseshellbracketleft;3018 whitetortoiseshellbracketright;3019 whiteuppointingsmalltriangle;25B5 whiteuppointingtriangle;25B3 wihiragana;3090 wikatakana;30F0 wikorean;315F wmonospace;FF57 wohiragana;3092 wokatakana;30F2 wokatakanahalfwidth;FF66 won;20A9 wonmonospace;FFE6 wowaenthai;0E27 wparen;24B2 wring;1E98 wsuperior;02B7 wturned;028D wynn;01BF x;0078 xabovecmb;033D xbopomofo;3112 xcircle;24E7 xdieresis;1E8D xdotaccent;1E8B xeharmenian;056D xi;03BE xmonospace;FF58 xparen;24B3 xsuperior;02E3 y;0079 yaadosquare;334E yabengali;09AF yacute;00FD yadeva;092F yaekorean;3152 yagujarati;0AAF yagurmukhi;0A2F yahiragana;3084 yakatakana;30E4 yakatakanahalfwidth;FF94 yakorean;3151 yamakkanthai;0E4E yasmallhiragana;3083 yasmallkatakana;30E3 yasmallkatakanahalfwidth;FF6C yatcyrillic;0463 ycircle;24E8 ycircumflex;0177 ydieresis;00FF ydotaccent;1E8F ydotbelow;1EF5 yeharabic;064A yehbarreearabic;06D2 yehbarreefinalarabic;FBAF yehfinalarabic;FEF2 yehhamzaabovearabic;0626 yehhamzaabovefinalarabic;FE8A yehhamzaaboveinitialarabic;FE8B yehhamzaabovemedialarabic;FE8C yehinitialarabic;FEF3 yehmedialarabic;FEF4 yehmeeminitialarabic;FCDD yehmeemisolatedarabic;FC58 yehnoonfinalarabic;FC94 yehthreedotsbelowarabic;06D1 yekorean;3156 yen;00A5 yenmonospace;FFE5 yeokorean;3155 yeorinhieuhkorean;3186 yerahbenyomohebrew;05AA yerahbenyomolefthebrew;05AA yericyrillic;044B yerudieresiscyrillic;04F9 yesieungkorean;3181 yesieungpansioskorean;3183 yesieungsioskorean;3182 yetivhebrew;059A ygrave;1EF3 yhook;01B4 yhookabove;1EF7 yiarmenian;0575 yicyrillic;0457 yikorean;3162 yinyang;262F yiwnarmenian;0582 ymonospace;FF59 yod;05D9 yoddagesh;FB39 yoddageshhebrew;FB39 yodhebrew;05D9 yodyodhebrew;05F2 yodyodpatahhebrew;FB1F yohiragana;3088 yoikorean;3189 yokatakana;30E8 yokatakanahalfwidth;FF96 yokorean;315B yosmallhiragana;3087 yosmallkatakana;30E7 yosmallkatakanahalfwidth;FF6E yotgreek;03F3 yoyaekorean;3188 yoyakorean;3187 yoyakthai;0E22 yoyingthai;0E0D yparen;24B4 ypogegrammeni;037A ypogegrammenigreekcmb;0345 yr;01A6 yring;1E99 ysuperior;02B8 ytilde;1EF9 yturned;028E yuhiragana;3086 yuikorean;318C yukatakana;30E6 yukatakanahalfwidth;FF95 yukorean;3160 yusbigcyrillic;046B yusbigiotifiedcyrillic;046D yuslittlecyrillic;0467 yuslittleiotifiedcyrillic;0469 yusmallhiragana;3085 yusmallkatakana;30E5 yusmallkatakanahalfwidth;FF6D yuyekorean;318B yuyeokorean;318A yyabengali;09DF yyadeva;095F z;007A zaarmenian;0566 zacute;017A zadeva;095B zagurmukhi;0A5B zaharabic;0638 zahfinalarabic;FEC6 zahinitialarabic;FEC7 zahiragana;3056 zahmedialarabic;FEC8 zainarabic;0632 zainfinalarabic;FEB0 zakatakana;30B6 zaqefgadolhebrew;0595 zaqefqatanhebrew;0594 zarqahebrew;0598 zayin;05D6 zayindagesh;FB36 zayindageshhebrew;FB36 zayinhebrew;05D6 zbopomofo;3117 zcaron;017E zcircle;24E9 zcircumflex;1E91 zcurl;0291 zdot;017C zdotaccent;017C zdotbelow;1E93 zecyrillic;0437 zedescendercyrillic;0499 zedieresiscyrillic;04DF zehiragana;305C zekatakana;30BC zero;0030 zeroarabic;0660 zerobengali;09E6 zerodeva;0966 zerogujarati;0AE6 zerogurmukhi;0A66 zerohackarabic;0660 zeroinferior;2080 zeromonospace;FF10 zerooldstyle;F730 zeropersian;06F0 zerosuperior;2070 zerothai;0E50 zerowidthjoiner;FEFF zerowidthnonjoiner;200C zerowidthspace;200B zeta;03B6 zhbopomofo;3113 zhearmenian;056A zhebrevecyrillic;04C2 zhecyrillic;0436 zhedescendercyrillic;0497 zhedieresiscyrillic;04DD zihiragana;3058 zikatakana;30B8 zinorhebrew;05AE zlinebelow;1E95 zmonospace;FF5A zohiragana;305E zokatakana;30BE zparen;24B5 zretroflexhook;0290 zstroke;01B6 zuhiragana;305A zukatakana;30BA # END """ _aglfnText = """\ # ----------------------------------------------------------- # Copyright 2002-2019 Adobe (http://www.adobe.com/). # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the # following conditions are met: # # Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # Neither the name of Adobe nor the names of its contributors # may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND # CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------- # Name: Adobe Glyph List For New Fonts # Table version: 1.7 # Date: November 6, 2008 # URL: https://github.com/adobe-type-tools/agl-aglfn # # Description: # # AGLFN (Adobe Glyph List For New Fonts) provides a list of base glyph # names that are recommended for new fonts, which are compatible with # the AGL (Adobe Glyph List) Specification, and which should be used # as described in Section 6 of that document. AGLFN comprises the set # of glyph names from AGL that map via the AGL Specification rules to # the semantically correct UV (Unicode Value). For example, "Asmall" # is omitted because AGL maps this glyph name to the PUA (Private Use # Area) value U+F761, rather than to the UV that maps from the glyph # name "A." Also omitted is "ffi," because AGL maps this to the # Alphabetic Presentation Forms value U+FB03, rather than decomposing # it into the following sequence of three UVs: U+0066, U+0066, and # U+0069. The name "arrowvertex" has been omitted because this glyph # now has a real UV, and AGL is now incorrect in mapping it to the PUA # value U+F8E6. If you do not find an appropriate name for your glyph # in this list, then please refer to Section 6 of the AGL # Specification. # # Format: three semicolon-delimited fields: # (1) Standard UV or CUS UV--four uppercase hexadecimal digits # (2) Glyph name--upper/lowercase letters and digits # (3) Character names: Unicode character names for standard UVs, and # descriptive names for CUS UVs--uppercase letters, hyphen, and # space # # The records are sorted by glyph name in increasing ASCII order, # entries with the same glyph name are sorted in decreasing priority # order, the UVs and Unicode character names are provided for # convenience, lines starting with "#" are comments, and blank lines # should be ignored. # # Revision History: # # 1.7 [6 November 2008] # - Reverted to the original 1.4 and earlier mappings for Delta, # Omega, and mu. # - Removed mappings for "afii" names. These should now be assigned # "uni" names. # - Removed mappings for "commaaccent" names. These should now be # assigned "uni" names. # # 1.6 [30 January 2006] # - Completed work intended in 1.5. # # 1.5 [23 November 2005] # - Removed duplicated block at end of file. # - Changed mappings: # 2206;Delta;INCREMENT changed to 0394;Delta;GREEK CAPITAL LETTER DELTA # 2126;Omega;OHM SIGN changed to 03A9;Omega;GREEK CAPITAL LETTER OMEGA # 03BC;mu;MICRO SIGN changed to 03BC;mu;GREEK SMALL LETTER MU # - Corrected statement above about why "ffi" is omitted. # # 1.4 [24 September 2003] # - Changed version to 1.4, to avoid confusion with the AGL 1.3. # - Fixed spelling errors in the header. # - Fully removed "arrowvertex," as it is mapped only to a PUA Unicode # value in some fonts. # # 1.1 [17 April 2003] # - Renamed [Tt]cedilla back to [Tt]commaaccent. # # 1.0 [31 January 2003] # - Original version. # - Derived from the AGLv1.2 by: # removing the PUA area codes; # removing duplicate Unicode mappings; and # renaming "tcommaaccent" to "tcedilla" and "Tcommaaccent" to "Tcedilla" # 0041;A;LATIN CAPITAL LETTER A 00C6;AE;LATIN CAPITAL LETTER AE 01FC;AEacute;LATIN CAPITAL LETTER AE WITH ACUTE 00C1;Aacute;LATIN CAPITAL LETTER A WITH ACUTE 0102;Abreve;LATIN CAPITAL LETTER A WITH BREVE 00C2;Acircumflex;LATIN CAPITAL LETTER A WITH CIRCUMFLEX 00C4;Adieresis;LATIN CAPITAL LETTER A WITH DIAERESIS 00C0;Agrave;LATIN CAPITAL LETTER A WITH GRAVE 0391;Alpha;GREEK CAPITAL LETTER ALPHA 0386;Alphatonos;GREEK CAPITAL LETTER ALPHA WITH TONOS 0100;Amacron;LATIN CAPITAL LETTER A WITH MACRON 0104;Aogonek;LATIN CAPITAL LETTER A WITH OGONEK 00C5;Aring;LATIN CAPITAL LETTER A WITH RING ABOVE 01FA;Aringacute;LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE 00C3;Atilde;LATIN CAPITAL LETTER A WITH TILDE 0042;B;LATIN CAPITAL LETTER B 0392;Beta;GREEK CAPITAL LETTER BETA 0043;C;LATIN CAPITAL LETTER C 0106;Cacute;LATIN CAPITAL LETTER C WITH ACUTE 010C;Ccaron;LATIN CAPITAL LETTER C WITH CARON 00C7;Ccedilla;LATIN CAPITAL LETTER C WITH CEDILLA 0108;Ccircumflex;LATIN CAPITAL LETTER C WITH CIRCUMFLEX 010A;Cdotaccent;LATIN CAPITAL LETTER C WITH DOT ABOVE 03A7;Chi;GREEK CAPITAL LETTER CHI 0044;D;LATIN CAPITAL LETTER D 010E;Dcaron;LATIN CAPITAL LETTER D WITH CARON 0110;Dcroat;LATIN CAPITAL LETTER D WITH STROKE 2206;Delta;INCREMENT 0045;E;LATIN CAPITAL LETTER E 00C9;Eacute;LATIN CAPITAL LETTER E WITH ACUTE 0114;Ebreve;LATIN CAPITAL LETTER E WITH BREVE 011A;Ecaron;LATIN CAPITAL LETTER E WITH CARON 00CA;Ecircumflex;LATIN CAPITAL LETTER E WITH CIRCUMFLEX 00CB;Edieresis;LATIN CAPITAL LETTER E WITH DIAERESIS 0116;Edotaccent;LATIN CAPITAL LETTER E WITH DOT ABOVE 00C8;Egrave;LATIN CAPITAL LETTER E WITH GRAVE 0112;Emacron;LATIN CAPITAL LETTER E WITH MACRON 014A;Eng;LATIN CAPITAL LETTER ENG 0118;Eogonek;LATIN CAPITAL LETTER E WITH OGONEK 0395;Epsilon;GREEK CAPITAL LETTER EPSILON 0388;Epsilontonos;GREEK CAPITAL LETTER EPSILON WITH TONOS 0397;Eta;GREEK CAPITAL LETTER ETA 0389;Etatonos;GREEK CAPITAL LETTER ETA WITH TONOS 00D0;Eth;LATIN CAPITAL LETTER ETH 20AC;Euro;EURO SIGN 0046;F;LATIN CAPITAL LETTER F 0047;G;LATIN CAPITAL LETTER G 0393;Gamma;GREEK CAPITAL LETTER GAMMA 011E;Gbreve;LATIN CAPITAL LETTER G WITH BREVE 01E6;Gcaron;LATIN CAPITAL LETTER G WITH CARON 011C;Gcircumflex;LATIN CAPITAL LETTER G WITH CIRCUMFLEX 0120;Gdotaccent;LATIN CAPITAL LETTER G WITH DOT ABOVE 0048;H;LATIN CAPITAL LETTER H 25CF;H18533;BLACK CIRCLE 25AA;H18543;BLACK SMALL SQUARE 25AB;H18551;WHITE SMALL SQUARE 25A1;H22073;WHITE SQUARE 0126;Hbar;LATIN CAPITAL LETTER H WITH STROKE 0124;Hcircumflex;LATIN CAPITAL LETTER H WITH CIRCUMFLEX 0049;I;LATIN CAPITAL LETTER I 0132;IJ;LATIN CAPITAL LIGATURE IJ 00CD;Iacute;LATIN CAPITAL LETTER I WITH ACUTE 012C;Ibreve;LATIN CAPITAL LETTER I WITH BREVE 00CE;Icircumflex;LATIN CAPITAL LETTER I WITH CIRCUMFLEX 00CF;Idieresis;LATIN CAPITAL LETTER I WITH DIAERESIS 0130;Idotaccent;LATIN CAPITAL LETTER I WITH DOT ABOVE 2111;Ifraktur;BLACK-LETTER CAPITAL I 00CC;Igrave;LATIN CAPITAL LETTER I WITH GRAVE 012A;Imacron;LATIN CAPITAL LETTER I WITH MACRON 012E;Iogonek;LATIN CAPITAL LETTER I WITH OGONEK 0399;Iota;GREEK CAPITAL LETTER IOTA 03AA;Iotadieresis;GREEK CAPITAL LETTER IOTA WITH DIALYTIKA 038A;Iotatonos;GREEK CAPITAL LETTER IOTA WITH TONOS 0128;Itilde;LATIN CAPITAL LETTER I WITH TILDE 004A;J;LATIN CAPITAL LETTER J 0134;Jcircumflex;LATIN CAPITAL LETTER J WITH CIRCUMFLEX 004B;K;LATIN CAPITAL LETTER K 039A;Kappa;GREEK CAPITAL LETTER KAPPA 004C;L;LATIN CAPITAL LETTER L 0139;Lacute;LATIN CAPITAL LETTER L WITH ACUTE 039B;Lambda;GREEK CAPITAL LETTER LAMDA 013D;Lcaron;LATIN CAPITAL LETTER L WITH CARON 013F;Ldot;LATIN CAPITAL LETTER L WITH MIDDLE DOT 0141;Lslash;LATIN CAPITAL LETTER L WITH STROKE 004D;M;LATIN CAPITAL LETTER M 039C;Mu;GREEK CAPITAL LETTER MU 004E;N;LATIN CAPITAL LETTER N 0143;Nacute;LATIN CAPITAL LETTER N WITH ACUTE 0147;Ncaron;LATIN CAPITAL LETTER N WITH CARON 00D1;Ntilde;LATIN CAPITAL LETTER N WITH TILDE 039D;Nu;GREEK CAPITAL LETTER NU 004F;O;LATIN CAPITAL LETTER O 0152;OE;LATIN CAPITAL LIGATURE OE 00D3;Oacute;LATIN CAPITAL LETTER O WITH ACUTE 014E;Obreve;LATIN CAPITAL LETTER O WITH BREVE 00D4;Ocircumflex;LATIN CAPITAL LETTER O WITH CIRCUMFLEX 00D6;Odieresis;LATIN CAPITAL LETTER O WITH DIAERESIS 00D2;Ograve;LATIN CAPITAL LETTER O WITH GRAVE 01A0;Ohorn;LATIN CAPITAL LETTER O WITH HORN 0150;Ohungarumlaut;LATIN CAPITAL LETTER O WITH DOUBLE ACUTE 014C;Omacron;LATIN CAPITAL LETTER O WITH MACRON 2126;Omega;OHM SIGN 038F;Omegatonos;GREEK CAPITAL LETTER OMEGA WITH TONOS 039F;Omicron;GREEK CAPITAL LETTER OMICRON 038C;Omicrontonos;GREEK CAPITAL LETTER OMICRON WITH TONOS 00D8;Oslash;LATIN CAPITAL LETTER O WITH STROKE 01FE;Oslashacute;LATIN CAPITAL LETTER O WITH STROKE AND ACUTE 00D5;Otilde;LATIN CAPITAL LETTER O WITH TILDE 0050;P;LATIN CAPITAL LETTER P 03A6;Phi;GREEK CAPITAL LETTER PHI 03A0;Pi;GREEK CAPITAL LETTER PI 03A8;Psi;GREEK CAPITAL LETTER PSI 0051;Q;LATIN CAPITAL LETTER Q 0052;R;LATIN CAPITAL LETTER R 0154;Racute;LATIN CAPITAL LETTER R WITH ACUTE 0158;Rcaron;LATIN CAPITAL LETTER R WITH CARON 211C;Rfraktur;BLACK-LETTER CAPITAL R 03A1;Rho;GREEK CAPITAL LETTER RHO 0053;S;LATIN CAPITAL LETTER S 250C;SF010000;BOX DRAWINGS LIGHT DOWN AND RIGHT 2514;SF020000;BOX DRAWINGS LIGHT UP AND RIGHT 2510;SF030000;BOX DRAWINGS LIGHT DOWN AND LEFT 2518;SF040000;BOX DRAWINGS LIGHT UP AND LEFT 253C;SF050000;BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 252C;SF060000;BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 2534;SF070000;BOX DRAWINGS LIGHT UP AND HORIZONTAL 251C;SF080000;BOX DRAWINGS LIGHT VERTICAL AND RIGHT 2524;SF090000;BOX DRAWINGS LIGHT VERTICAL AND LEFT 2500;SF100000;BOX DRAWINGS LIGHT HORIZONTAL 2502;SF110000;BOX DRAWINGS LIGHT VERTICAL 2561;SF190000;BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 2562;SF200000;BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 2556;SF210000;BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 2555;SF220000;BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 2563;SF230000;BOX DRAWINGS DOUBLE VERTICAL AND LEFT 2551;SF240000;BOX DRAWINGS DOUBLE VERTICAL 2557;SF250000;BOX DRAWINGS DOUBLE DOWN AND LEFT 255D;SF260000;BOX DRAWINGS DOUBLE UP AND LEFT 255C;SF270000;BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 255B;SF280000;BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 255E;SF360000;BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 255F;SF370000;BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 255A;SF380000;BOX DRAWINGS DOUBLE UP AND RIGHT 2554;SF390000;BOX DRAWINGS DOUBLE DOWN AND RIGHT 2569;SF400000;BOX DRAWINGS DOUBLE UP AND HORIZONTAL 2566;SF410000;BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 2560;SF420000;BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 2550;SF430000;BOX DRAWINGS DOUBLE HORIZONTAL 256C;SF440000;BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 2567;SF450000;BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 2568;SF460000;BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 2564;SF470000;BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 2565;SF480000;BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 2559;SF490000;BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 2558;SF500000;BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 2552;SF510000;BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 2553;SF520000;BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 256B;SF530000;BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 256A;SF540000;BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 015A;Sacute;LATIN CAPITAL LETTER S WITH ACUTE 0160;Scaron;LATIN CAPITAL LETTER S WITH CARON 015E;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA 015C;Scircumflex;LATIN CAPITAL LETTER S WITH CIRCUMFLEX 03A3;Sigma;GREEK CAPITAL LETTER SIGMA 0054;T;LATIN CAPITAL LETTER T 03A4;Tau;GREEK CAPITAL LETTER TAU 0166;Tbar;LATIN CAPITAL LETTER T WITH STROKE 0164;Tcaron;LATIN CAPITAL LETTER T WITH CARON 0398;Theta;GREEK CAPITAL LETTER THETA 00DE;Thorn;LATIN CAPITAL LETTER THORN 0055;U;LATIN CAPITAL LETTER U 00DA;Uacute;LATIN CAPITAL LETTER U WITH ACUTE 016C;Ubreve;LATIN CAPITAL LETTER U WITH BREVE 00DB;Ucircumflex;LATIN CAPITAL LETTER U WITH CIRCUMFLEX 00DC;Udieresis;LATIN CAPITAL LETTER U WITH DIAERESIS 00D9;Ugrave;LATIN CAPITAL LETTER U WITH GRAVE 01AF;Uhorn;LATIN CAPITAL LETTER U WITH HORN 0170;Uhungarumlaut;LATIN CAPITAL LETTER U WITH DOUBLE ACUTE 016A;Umacron;LATIN CAPITAL LETTER U WITH MACRON 0172;Uogonek;LATIN CAPITAL LETTER U WITH OGONEK 03A5;Upsilon;GREEK CAPITAL LETTER UPSILON 03D2;Upsilon1;GREEK UPSILON WITH HOOK SYMBOL 03AB;Upsilondieresis;GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA 038E;Upsilontonos;GREEK CAPITAL LETTER UPSILON WITH TONOS 016E;Uring;LATIN CAPITAL LETTER U WITH RING ABOVE 0168;Utilde;LATIN CAPITAL LETTER U WITH TILDE 0056;V;LATIN CAPITAL LETTER V 0057;W;LATIN CAPITAL LETTER W 1E82;Wacute;LATIN CAPITAL LETTER W WITH ACUTE 0174;Wcircumflex;LATIN CAPITAL LETTER W WITH CIRCUMFLEX 1E84;Wdieresis;LATIN CAPITAL LETTER W WITH DIAERESIS 1E80;Wgrave;LATIN CAPITAL LETTER W WITH GRAVE 0058;X;LATIN CAPITAL LETTER X 039E;Xi;GREEK CAPITAL LETTER XI 0059;Y;LATIN CAPITAL LETTER Y 00DD;Yacute;LATIN CAPITAL LETTER Y WITH ACUTE 0176;Ycircumflex;LATIN CAPITAL LETTER Y WITH CIRCUMFLEX 0178;Ydieresis;LATIN CAPITAL LETTER Y WITH DIAERESIS 1EF2;Ygrave;LATIN CAPITAL LETTER Y WITH GRAVE 005A;Z;LATIN CAPITAL LETTER Z 0179;Zacute;LATIN CAPITAL LETTER Z WITH ACUTE 017D;Zcaron;LATIN CAPITAL LETTER Z WITH CARON 017B;Zdotaccent;LATIN CAPITAL LETTER Z WITH DOT ABOVE 0396;Zeta;GREEK CAPITAL LETTER ZETA 0061;a;LATIN SMALL LETTER A 00E1;aacute;LATIN SMALL LETTER A WITH ACUTE 0103;abreve;LATIN SMALL LETTER A WITH BREVE 00E2;acircumflex;LATIN SMALL LETTER A WITH CIRCUMFLEX 00B4;acute;ACUTE ACCENT 0301;acutecomb;COMBINING ACUTE ACCENT 00E4;adieresis;LATIN SMALL LETTER A WITH DIAERESIS 00E6;ae;LATIN SMALL LETTER AE 01FD;aeacute;LATIN SMALL LETTER AE WITH ACUTE 00E0;agrave;LATIN SMALL LETTER A WITH GRAVE 2135;aleph;ALEF SYMBOL 03B1;alpha;GREEK SMALL LETTER ALPHA 03AC;alphatonos;GREEK SMALL LETTER ALPHA WITH TONOS 0101;amacron;LATIN SMALL LETTER A WITH MACRON 0026;ampersand;AMPERSAND 2220;angle;ANGLE 2329;angleleft;LEFT-POINTING ANGLE BRACKET 232A;angleright;RIGHT-POINTING ANGLE BRACKET 0387;anoteleia;GREEK ANO TELEIA 0105;aogonek;LATIN SMALL LETTER A WITH OGONEK 2248;approxequal;ALMOST EQUAL TO 00E5;aring;LATIN SMALL LETTER A WITH RING ABOVE 01FB;aringacute;LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE 2194;arrowboth;LEFT RIGHT ARROW 21D4;arrowdblboth;LEFT RIGHT DOUBLE ARROW 21D3;arrowdbldown;DOWNWARDS DOUBLE ARROW 21D0;arrowdblleft;LEFTWARDS DOUBLE ARROW 21D2;arrowdblright;RIGHTWARDS DOUBLE ARROW 21D1;arrowdblup;UPWARDS DOUBLE ARROW 2193;arrowdown;DOWNWARDS ARROW 2190;arrowleft;LEFTWARDS ARROW 2192;arrowright;RIGHTWARDS ARROW 2191;arrowup;UPWARDS ARROW 2195;arrowupdn;UP DOWN ARROW 21A8;arrowupdnbse;UP DOWN ARROW WITH BASE 005E;asciicircum;CIRCUMFLEX ACCENT 007E;asciitilde;TILDE 002A;asterisk;ASTERISK 2217;asteriskmath;ASTERISK OPERATOR 0040;at;COMMERCIAL AT 00E3;atilde;LATIN SMALL LETTER A WITH TILDE 0062;b;LATIN SMALL LETTER B 005C;backslash;REVERSE SOLIDUS 007C;bar;VERTICAL LINE 03B2;beta;GREEK SMALL LETTER BETA 2588;block;FULL BLOCK 007B;braceleft;LEFT CURLY BRACKET 007D;braceright;RIGHT CURLY BRACKET 005B;bracketleft;LEFT SQUARE BRACKET 005D;bracketright;RIGHT SQUARE BRACKET 02D8;breve;BREVE 00A6;brokenbar;BROKEN BAR 2022;bullet;BULLET 0063;c;LATIN SMALL LETTER C 0107;cacute;LATIN SMALL LETTER C WITH ACUTE 02C7;caron;CARON 21B5;carriagereturn;DOWNWARDS ARROW WITH CORNER LEFTWARDS 010D;ccaron;LATIN SMALL LETTER C WITH CARON 00E7;ccedilla;LATIN SMALL LETTER C WITH CEDILLA 0109;ccircumflex;LATIN SMALL LETTER C WITH CIRCUMFLEX 010B;cdotaccent;LATIN SMALL LETTER C WITH DOT ABOVE 00B8;cedilla;CEDILLA 00A2;cent;CENT SIGN 03C7;chi;GREEK SMALL LETTER CHI 25CB;circle;WHITE CIRCLE 2297;circlemultiply;CIRCLED TIMES 2295;circleplus;CIRCLED PLUS 02C6;circumflex;MODIFIER LETTER CIRCUMFLEX ACCENT 2663;club;BLACK CLUB SUIT 003A;colon;COLON 20A1;colonmonetary;COLON SIGN 002C;comma;COMMA 2245;congruent;APPROXIMATELY EQUAL TO 00A9;copyright;COPYRIGHT SIGN 00A4;currency;CURRENCY SIGN 0064;d;LATIN SMALL LETTER D 2020;dagger;DAGGER 2021;daggerdbl;DOUBLE DAGGER 010F;dcaron;LATIN SMALL LETTER D WITH CARON 0111;dcroat;LATIN SMALL LETTER D WITH STROKE 00B0;degree;DEGREE SIGN 03B4;delta;GREEK SMALL LETTER DELTA 2666;diamond;BLACK DIAMOND SUIT 00A8;dieresis;DIAERESIS 0385;dieresistonos;GREEK DIALYTIKA TONOS 00F7;divide;DIVISION SIGN 2593;dkshade;DARK SHADE 2584;dnblock;LOWER HALF BLOCK 0024;dollar;DOLLAR SIGN 20AB;dong;DONG SIGN 02D9;dotaccent;DOT ABOVE 0323;dotbelowcomb;COMBINING DOT BELOW 0131;dotlessi;LATIN SMALL LETTER DOTLESS I 22C5;dotmath;DOT OPERATOR 0065;e;LATIN SMALL LETTER E 00E9;eacute;LATIN SMALL LETTER E WITH ACUTE 0115;ebreve;LATIN SMALL LETTER E WITH BREVE 011B;ecaron;LATIN SMALL LETTER E WITH CARON 00EA;ecircumflex;LATIN SMALL LETTER E WITH CIRCUMFLEX 00EB;edieresis;LATIN SMALL LETTER E WITH DIAERESIS 0117;edotaccent;LATIN SMALL LETTER E WITH DOT ABOVE 00E8;egrave;LATIN SMALL LETTER E WITH GRAVE 0038;eight;DIGIT EIGHT 2208;element;ELEMENT OF 2026;ellipsis;HORIZONTAL ELLIPSIS 0113;emacron;LATIN SMALL LETTER E WITH MACRON 2014;emdash;EM DASH 2205;emptyset;EMPTY SET 2013;endash;EN DASH 014B;eng;LATIN SMALL LETTER ENG 0119;eogonek;LATIN SMALL LETTER E WITH OGONEK 03B5;epsilon;GREEK SMALL LETTER EPSILON 03AD;epsilontonos;GREEK SMALL LETTER EPSILON WITH TONOS 003D;equal;EQUALS SIGN 2261;equivalence;IDENTICAL TO 212E;estimated;ESTIMATED SYMBOL 03B7;eta;GREEK SMALL LETTER ETA 03AE;etatonos;GREEK SMALL LETTER ETA WITH TONOS 00F0;eth;LATIN SMALL LETTER ETH 0021;exclam;EXCLAMATION MARK 203C;exclamdbl;DOUBLE EXCLAMATION MARK 00A1;exclamdown;INVERTED EXCLAMATION MARK 2203;existential;THERE EXISTS 0066;f;LATIN SMALL LETTER F 2640;female;FEMALE SIGN 2012;figuredash;FIGURE DASH 25A0;filledbox;BLACK SQUARE 25AC;filledrect;BLACK RECTANGLE 0035;five;DIGIT FIVE 215D;fiveeighths;VULGAR FRACTION FIVE EIGHTHS 0192;florin;LATIN SMALL LETTER F WITH HOOK 0034;four;DIGIT FOUR 2044;fraction;FRACTION SLASH 20A3;franc;FRENCH FRANC SIGN 0067;g;LATIN SMALL LETTER G 03B3;gamma;GREEK SMALL LETTER GAMMA 011F;gbreve;LATIN SMALL LETTER G WITH BREVE 01E7;gcaron;LATIN SMALL LETTER G WITH CARON 011D;gcircumflex;LATIN SMALL LETTER G WITH CIRCUMFLEX 0121;gdotaccent;LATIN SMALL LETTER G WITH DOT ABOVE 00DF;germandbls;LATIN SMALL LETTER SHARP S 2207;gradient;NABLA 0060;grave;GRAVE ACCENT 0300;gravecomb;COMBINING GRAVE ACCENT 003E;greater;GREATER-THAN SIGN 2265;greaterequal;GREATER-THAN OR EQUAL TO 00AB;guillemotleft;LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 00BB;guillemotright;RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 2039;guilsinglleft;SINGLE LEFT-POINTING ANGLE QUOTATION MARK 203A;guilsinglright;SINGLE RIGHT-POINTING ANGLE QUOTATION MARK 0068;h;LATIN SMALL LETTER H 0127;hbar;LATIN SMALL LETTER H WITH STROKE 0125;hcircumflex;LATIN SMALL LETTER H WITH CIRCUMFLEX 2665;heart;BLACK HEART SUIT 0309;hookabovecomb;COMBINING HOOK ABOVE 2302;house;HOUSE 02DD;hungarumlaut;DOUBLE ACUTE ACCENT 002D;hyphen;HYPHEN-MINUS 0069;i;LATIN SMALL LETTER I 00ED;iacute;LATIN SMALL LETTER I WITH ACUTE 012D;ibreve;LATIN SMALL LETTER I WITH BREVE 00EE;icircumflex;LATIN SMALL LETTER I WITH CIRCUMFLEX 00EF;idieresis;LATIN SMALL LETTER I WITH DIAERESIS 00EC;igrave;LATIN SMALL LETTER I WITH GRAVE 0133;ij;LATIN SMALL LIGATURE IJ 012B;imacron;LATIN SMALL LETTER I WITH MACRON 221E;infinity;INFINITY 222B;integral;INTEGRAL 2321;integralbt;BOTTOM HALF INTEGRAL 2320;integraltp;TOP HALF INTEGRAL 2229;intersection;INTERSECTION 25D8;invbullet;INVERSE BULLET 25D9;invcircle;INVERSE WHITE CIRCLE 263B;invsmileface;BLACK SMILING FACE 012F;iogonek;LATIN SMALL LETTER I WITH OGONEK 03B9;iota;GREEK SMALL LETTER IOTA 03CA;iotadieresis;GREEK SMALL LETTER IOTA WITH DIALYTIKA 0390;iotadieresistonos;GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS 03AF;iotatonos;GREEK SMALL LETTER IOTA WITH TONOS 0129;itilde;LATIN SMALL LETTER I WITH TILDE 006A;j;LATIN SMALL LETTER J 0135;jcircumflex;LATIN SMALL LETTER J WITH CIRCUMFLEX 006B;k;LATIN SMALL LETTER K 03BA;kappa;GREEK SMALL LETTER KAPPA 0138;kgreenlandic;LATIN SMALL LETTER KRA 006C;l;LATIN SMALL LETTER L 013A;lacute;LATIN SMALL LETTER L WITH ACUTE 03BB;lambda;GREEK SMALL LETTER LAMDA 013E;lcaron;LATIN SMALL LETTER L WITH CARON 0140;ldot;LATIN SMALL LETTER L WITH MIDDLE DOT 003C;less;LESS-THAN SIGN 2264;lessequal;LESS-THAN OR EQUAL TO 258C;lfblock;LEFT HALF BLOCK 20A4;lira;LIRA SIGN 2227;logicaland;LOGICAL AND 00AC;logicalnot;NOT SIGN 2228;logicalor;LOGICAL OR 017F;longs;LATIN SMALL LETTER LONG S 25CA;lozenge;LOZENGE 0142;lslash;LATIN SMALL LETTER L WITH STROKE 2591;ltshade;LIGHT SHADE 006D;m;LATIN SMALL LETTER M 00AF;macron;MACRON 2642;male;MALE SIGN 2212;minus;MINUS SIGN 2032;minute;PRIME 00B5;mu;MICRO SIGN 00D7;multiply;MULTIPLICATION SIGN 266A;musicalnote;EIGHTH NOTE 266B;musicalnotedbl;BEAMED EIGHTH NOTES 006E;n;LATIN SMALL LETTER N 0144;nacute;LATIN SMALL LETTER N WITH ACUTE 0149;napostrophe;LATIN SMALL LETTER N PRECEDED BY APOSTROPHE 0148;ncaron;LATIN SMALL LETTER N WITH CARON 0039;nine;DIGIT NINE 2209;notelement;NOT AN ELEMENT OF 2260;notequal;NOT EQUAL TO 2284;notsubset;NOT A SUBSET OF 00F1;ntilde;LATIN SMALL LETTER N WITH TILDE 03BD;nu;GREEK SMALL LETTER NU 0023;numbersign;NUMBER SIGN 006F;o;LATIN SMALL LETTER O 00F3;oacute;LATIN SMALL LETTER O WITH ACUTE 014F;obreve;LATIN SMALL LETTER O WITH BREVE 00F4;ocircumflex;LATIN SMALL LETTER O WITH CIRCUMFLEX 00F6;odieresis;LATIN SMALL LETTER O WITH DIAERESIS 0153;oe;LATIN SMALL LIGATURE OE 02DB;ogonek;OGONEK 00F2;ograve;LATIN SMALL LETTER O WITH GRAVE 01A1;ohorn;LATIN SMALL LETTER O WITH HORN 0151;ohungarumlaut;LATIN SMALL LETTER O WITH DOUBLE ACUTE 014D;omacron;LATIN SMALL LETTER O WITH MACRON 03C9;omega;GREEK SMALL LETTER OMEGA 03D6;omega1;GREEK PI SYMBOL 03CE;omegatonos;GREEK SMALL LETTER OMEGA WITH TONOS 03BF;omicron;GREEK SMALL LETTER OMICRON 03CC;omicrontonos;GREEK SMALL LETTER OMICRON WITH TONOS 0031;one;DIGIT ONE 2024;onedotenleader;ONE DOT LEADER 215B;oneeighth;VULGAR FRACTION ONE EIGHTH 00BD;onehalf;VULGAR FRACTION ONE HALF 00BC;onequarter;VULGAR FRACTION ONE QUARTER 2153;onethird;VULGAR FRACTION ONE THIRD 25E6;openbullet;WHITE BULLET 00AA;ordfeminine;FEMININE ORDINAL INDICATOR 00BA;ordmasculine;MASCULINE ORDINAL INDICATOR 221F;orthogonal;RIGHT ANGLE 00F8;oslash;LATIN SMALL LETTER O WITH STROKE 01FF;oslashacute;LATIN SMALL LETTER O WITH STROKE AND ACUTE 00F5;otilde;LATIN SMALL LETTER O WITH TILDE 0070;p;LATIN SMALL LETTER P 00B6;paragraph;PILCROW SIGN 0028;parenleft;LEFT PARENTHESIS 0029;parenright;RIGHT PARENTHESIS 2202;partialdiff;PARTIAL DIFFERENTIAL 0025;percent;PERCENT SIGN 002E;period;FULL STOP 00B7;periodcentered;MIDDLE DOT 22A5;perpendicular;UP TACK 2030;perthousand;PER MILLE SIGN 20A7;peseta;PESETA SIGN 03C6;phi;GREEK SMALL LETTER PHI 03D5;phi1;GREEK PHI SYMBOL 03C0;pi;GREEK SMALL LETTER PI 002B;plus;PLUS SIGN 00B1;plusminus;PLUS-MINUS SIGN 211E;prescription;PRESCRIPTION TAKE 220F;product;N-ARY PRODUCT 2282;propersubset;SUBSET OF 2283;propersuperset;SUPERSET OF 221D;proportional;PROPORTIONAL TO 03C8;psi;GREEK SMALL LETTER PSI 0071;q;LATIN SMALL LETTER Q 003F;question;QUESTION MARK 00BF;questiondown;INVERTED QUESTION MARK 0022;quotedbl;QUOTATION MARK 201E;quotedblbase;DOUBLE LOW-9 QUOTATION MARK 201C;quotedblleft;LEFT DOUBLE QUOTATION MARK 201D;quotedblright;RIGHT DOUBLE QUOTATION MARK 2018;quoteleft;LEFT SINGLE QUOTATION MARK 201B;quotereversed;SINGLE HIGH-REVERSED-9 QUOTATION MARK 2019;quoteright;RIGHT SINGLE QUOTATION MARK 201A;quotesinglbase;SINGLE LOW-9 QUOTATION MARK 0027;quotesingle;APOSTROPHE 0072;r;LATIN SMALL LETTER R 0155;racute;LATIN SMALL LETTER R WITH ACUTE 221A;radical;SQUARE ROOT 0159;rcaron;LATIN SMALL LETTER R WITH CARON 2286;reflexsubset;SUBSET OF OR EQUAL TO 2287;reflexsuperset;SUPERSET OF OR EQUAL TO 00AE;registered;REGISTERED SIGN 2310;revlogicalnot;REVERSED NOT SIGN 03C1;rho;GREEK SMALL LETTER RHO 02DA;ring;RING ABOVE 2590;rtblock;RIGHT HALF BLOCK 0073;s;LATIN SMALL LETTER S 015B;sacute;LATIN SMALL LETTER S WITH ACUTE 0161;scaron;LATIN SMALL LETTER S WITH CARON 015F;scedilla;LATIN SMALL LETTER S WITH CEDILLA 015D;scircumflex;LATIN SMALL LETTER S WITH CIRCUMFLEX 2033;second;DOUBLE PRIME 00A7;section;SECTION SIGN 003B;semicolon;SEMICOLON 0037;seven;DIGIT SEVEN 215E;seveneighths;VULGAR FRACTION SEVEN EIGHTHS 2592;shade;MEDIUM SHADE 03C3;sigma;GREEK SMALL LETTER SIGMA 03C2;sigma1;GREEK SMALL LETTER FINAL SIGMA 223C;similar;TILDE OPERATOR 0036;six;DIGIT SIX 002F;slash;SOLIDUS 263A;smileface;WHITE SMILING FACE 0020;space;SPACE 2660;spade;BLACK SPADE SUIT 00A3;sterling;POUND SIGN 220B;suchthat;CONTAINS AS MEMBER 2211;summation;N-ARY SUMMATION 263C;sun;WHITE SUN WITH RAYS 0074;t;LATIN SMALL LETTER T 03C4;tau;GREEK SMALL LETTER TAU 0167;tbar;LATIN SMALL LETTER T WITH STROKE 0165;tcaron;LATIN SMALL LETTER T WITH CARON 2234;therefore;THEREFORE 03B8;theta;GREEK SMALL LETTER THETA 03D1;theta1;GREEK THETA SYMBOL 00FE;thorn;LATIN SMALL LETTER THORN 0033;three;DIGIT THREE 215C;threeeighths;VULGAR FRACTION THREE EIGHTHS 00BE;threequarters;VULGAR FRACTION THREE QUARTERS 02DC;tilde;SMALL TILDE 0303;tildecomb;COMBINING TILDE 0384;tonos;GREEK TONOS 2122;trademark;TRADE MARK SIGN 25BC;triagdn;BLACK DOWN-POINTING TRIANGLE 25C4;triaglf;BLACK LEFT-POINTING POINTER 25BA;triagrt;BLACK RIGHT-POINTING POINTER 25B2;triagup;BLACK UP-POINTING TRIANGLE 0032;two;DIGIT TWO 2025;twodotenleader;TWO DOT LEADER 2154;twothirds;VULGAR FRACTION TWO THIRDS 0075;u;LATIN SMALL LETTER U 00FA;uacute;LATIN SMALL LETTER U WITH ACUTE 016D;ubreve;LATIN SMALL LETTER U WITH BREVE 00FB;ucircumflex;LATIN SMALL LETTER U WITH CIRCUMFLEX 00FC;udieresis;LATIN SMALL LETTER U WITH DIAERESIS 00F9;ugrave;LATIN SMALL LETTER U WITH GRAVE 01B0;uhorn;LATIN SMALL LETTER U WITH HORN 0171;uhungarumlaut;LATIN SMALL LETTER U WITH DOUBLE ACUTE 016B;umacron;LATIN SMALL LETTER U WITH MACRON 005F;underscore;LOW LINE 2017;underscoredbl;DOUBLE LOW LINE 222A;union;UNION 2200;universal;FOR ALL 0173;uogonek;LATIN SMALL LETTER U WITH OGONEK 2580;upblock;UPPER HALF BLOCK 03C5;upsilon;GREEK SMALL LETTER UPSILON 03CB;upsilondieresis;GREEK SMALL LETTER UPSILON WITH DIALYTIKA 03B0;upsilondieresistonos;GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS 03CD;upsilontonos;GREEK SMALL LETTER UPSILON WITH TONOS 016F;uring;LATIN SMALL LETTER U WITH RING ABOVE 0169;utilde;LATIN SMALL LETTER U WITH TILDE 0076;v;LATIN SMALL LETTER V 0077;w;LATIN SMALL LETTER W 1E83;wacute;LATIN SMALL LETTER W WITH ACUTE 0175;wcircumflex;LATIN SMALL LETTER W WITH CIRCUMFLEX 1E85;wdieresis;LATIN SMALL LETTER W WITH DIAERESIS 2118;weierstrass;SCRIPT CAPITAL P 1E81;wgrave;LATIN SMALL LETTER W WITH GRAVE 0078;x;LATIN SMALL LETTER X 03BE;xi;GREEK SMALL LETTER XI 0079;y;LATIN SMALL LETTER Y 00FD;yacute;LATIN SMALL LETTER Y WITH ACUTE 0177;ycircumflex;LATIN SMALL LETTER Y WITH CIRCUMFLEX 00FF;ydieresis;LATIN SMALL LETTER Y WITH DIAERESIS 00A5;yen;YEN SIGN 1EF3;ygrave;LATIN SMALL LETTER Y WITH GRAVE 007A;z;LATIN SMALL LETTER Z 017A;zacute;LATIN SMALL LETTER Z WITH ACUTE 017E;zcaron;LATIN SMALL LETTER Z WITH CARON 017C;zdotaccent;LATIN SMALL LETTER Z WITH DOT ABOVE 0030;zero;DIGIT ZERO 03B6;zeta;GREEK SMALL LETTER ZETA # END """ class AGLError(Exception): pass LEGACY_AGL2UV = {} AGL2UV = {} UV2AGL = {} def _builddicts(): import re lines = _aglText.splitlines() parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$") for line in lines: if not line or line[:1] == "#": continue m = parseAGL_RE.match(line) if not m: raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20])) unicodes = m.group(2) assert len(unicodes) % 5 == 4 unicodes = [int(unicode, 16) for unicode in unicodes.split()] glyphName = tostr(m.group(1)) LEGACY_AGL2UV[glyphName] = unicodes lines = _aglfnText.splitlines() parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$") for line in lines: if not line or line[:1] == "#": continue m = parseAGLFN_RE.match(line) if not m: raise AGLError("syntax error in aglfn.txt: %s" % repr(line[:20])) unicode = m.group(1) assert len(unicode) == 4 unicode = int(unicode, 16) glyphName = tostr(m.group(2)) AGL2UV[glyphName] = unicode UV2AGL[unicode] = glyphName _builddicts() def toUnicode(glyph, isZapfDingbats=False): """Convert glyph names to Unicode, such as ``'longs_t.oldstyle'`` --> ``u'ſt'`` If ``isZapfDingbats`` is ``True``, the implementation recognizes additional glyph names (as required by the AGL specification). """ # https://github.com/adobe-type-tools/agl-specification#2-the-mapping # # 1. Drop all the characters from the glyph name starting with # the first occurrence of a period (U+002E; FULL STOP), if any. glyph = glyph.split(".", 1)[0] # 2. Split the remaining string into a sequence of components, # using underscore (U+005F; LOW LINE) as the delimiter. components = glyph.split("_") # 3. Map each component to a character string according to the # procedure below, and concatenate those strings; the result # is the character string to which the glyph name is mapped. result = [_glyphComponentToUnicode(c, isZapfDingbats) for c in components] return "".join(result) def _glyphComponentToUnicode(component, isZapfDingbats): # If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats), # and the component is in the ITC Zapf Dingbats Glyph List, then # map it to the corresponding character in that list. dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None if dingbat: return dingbat # Otherwise, if the component is in AGL, then map it # to the corresponding character in that list. uchars = LEGACY_AGL2UV.get(component) if uchars: return "".join(map(chr, uchars)) # Otherwise, if the component is of the form "uni" (U+0075, # U+006E, and U+0069) followed by a sequence of uppercase # hexadecimal digits (0–9 and A–F, meaning U+0030 through # U+0039 and U+0041 through U+0046), if the length of that # sequence is a multiple of four, and if each group of four # digits represents a value in the ranges 0000 through D7FF # or E000 through FFFF, then interpret each as a Unicode scalar # value and map the component to the string made of those # scalar values. Note that the range and digit-length # restrictions mean that the "uni" glyph name prefix can be # used only with UVs in the Basic Multilingual Plane (BMP). uni = _uniToUnicode(component) if uni: return uni # Otherwise, if the component is of the form "u" (U+0075) # followed by a sequence of four to six uppercase hexadecimal # digits (0–9 and A–F, meaning U+0030 through U+0039 and # U+0041 through U+0046), and those digits represents a value # in the ranges 0000 through D7FF or E000 through 10FFFF, then # interpret it as a Unicode scalar value and map the component # to the string made of this scalar value. uni = _uToUnicode(component) if uni: return uni # Otherwise, map the component to an empty string. return "" # https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt _AGL_ZAPF_DINGBATS = ( " ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀" "❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇" "①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔" "↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰" ) def _zapfDingbatsToUnicode(glyph): """Helper for toUnicode().""" if len(glyph) < 2 or glyph[0] != "a": return None try: gid = int(glyph[1:]) except ValueError: return None if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS): return None uchar = _AGL_ZAPF_DINGBATS[gid] return uchar if uchar != " " else None _re_uni = re.compile("^uni([0-9A-F]+)$") def _uniToUnicode(component): """Helper for toUnicode() to handle "uniABCD" components.""" match = _re_uni.match(component) if match is None: return None digits = match.group(1) if len(digits) % 4 != 0: return None chars = [int(digits[i : i + 4], 16) for i in range(0, len(digits), 4)] if any(c >= 0xD800 and c <= 0xDFFF for c in chars): # The AGL specification explicitly excluded surrogate pairs. return None return "".join([chr(c) for c in chars]) _re_u = re.compile("^u([0-9A-F]{4,6})$") def _uToUnicode(component): """Helper for toUnicode() to handle "u1ABCD" components.""" match = _re_u.match(component) if match is None: return None digits = match.group(1) try: value = int(digits, 16) except ValueError: return None if (value >= 0x0000 and value <= 0xD7FF) or (value >= 0xE000 and value <= 0x10FFFF): return chr(value) return None PKaZZZ�9lттfontTools/fontBuilder.py__all__ = ["FontBuilder"] """ This module is *experimental*, meaning it still may evolve and change. The `FontBuilder` class is a convenient helper to construct working TTF or OTF fonts from scratch. Note that the various setup methods cannot be called in arbitrary order, due to various interdependencies between OpenType tables. Here is an order that works: fb = FontBuilder(...) fb.setupGlyphOrder(...) fb.setupCharacterMap(...) fb.setupGlyf(...) --or-- fb.setupCFF(...) fb.setupHorizontalMetrics(...) fb.setupHorizontalHeader() fb.setupNameTable(...) fb.setupOS2() fb.addOpenTypeFeatures(...) fb.setupPost() fb.save(...) Here is how to build a minimal TTF: ```python from fontTools.fontBuilder import FontBuilder from fontTools.pens.ttGlyphPen import TTGlyphPen def drawTestGlyph(pen): pen.moveTo((100, 100)) pen.lineTo((100, 1000)) pen.qCurveTo((200, 900), (400, 900), (500, 1000)) pen.lineTo((500, 100)) pen.closePath() fb = FontBuilder(1024, isTTF=True) fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"]) fb.setupCharacterMap({32: "space", 65: "A", 97: "a"}) advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0} familyName = "HelloTestFont" styleName = "TotallyNormal" version = "0.1" nameStrings = dict( familyName=dict(en=familyName, nl="HalloTestFont"), styleName=dict(en=styleName, nl="TotaalNormaal"), uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName, fullName=familyName + "-" + styleName, psName=familyName + "-" + styleName, version="Version " + version, ) pen = TTGlyphPen(None) drawTestGlyph(pen) glyph = pen.glyph() glyphs = {".notdef": glyph, "space": glyph, "A": glyph, "a": glyph, ".null": glyph} fb.setupGlyf(glyphs) metrics = {} glyphTable = fb.font["glyf"] for gn, advanceWidth in advanceWidths.items(): metrics[gn] = (advanceWidth, glyphTable[gn].xMin) fb.setupHorizontalMetrics(metrics) fb.setupHorizontalHeader(ascent=824, descent=-200) fb.setupNameTable(nameStrings) fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200) fb.setupPost() fb.save("test.ttf") ``` And here's how to build a minimal OTF: ```python from fontTools.fontBuilder import FontBuilder from fontTools.pens.t2CharStringPen import T2CharStringPen def drawTestGlyph(pen): pen.moveTo((100, 100)) pen.lineTo((100, 1000)) pen.curveTo((200, 900), (400, 900), (500, 1000)) pen.lineTo((500, 100)) pen.closePath() fb = FontBuilder(1024, isTTF=False) fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"]) fb.setupCharacterMap({32: "space", 65: "A", 97: "a"}) advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0} familyName = "HelloTestFont" styleName = "TotallyNormal" version = "0.1" nameStrings = dict( familyName=dict(en=familyName, nl="HalloTestFont"), styleName=dict(en=styleName, nl="TotaalNormaal"), uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName, fullName=familyName + "-" + styleName, psName=familyName + "-" + styleName, version="Version " + version, ) pen = T2CharStringPen(600, None) drawTestGlyph(pen) charString = pen.getCharString() charStrings = { ".notdef": charString, "space": charString, "A": charString, "a": charString, ".null": charString, } fb.setupCFF(nameStrings["psName"], {"FullName": nameStrings["psName"]}, charStrings, {}) lsb = {gn: cs.calcBounds(None)[0] for gn, cs in charStrings.items()} metrics = {} for gn, advanceWidth in advanceWidths.items(): metrics[gn] = (advanceWidth, lsb[gn]) fb.setupHorizontalMetrics(metrics) fb.setupHorizontalHeader(ascent=824, descent=200) fb.setupNameTable(nameStrings) fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200) fb.setupPost() fb.save("test.otf") ``` """ from .ttLib import TTFont, newTable from .ttLib.tables._c_m_a_p import cmap_classes from .ttLib.tables._g_l_y_f import flagCubic from .ttLib.tables.O_S_2f_2 import Panose from .misc.timeTools import timestampNow import struct from collections import OrderedDict _headDefaults = dict( tableVersion=1.0, fontRevision=1.0, checkSumAdjustment=0, magicNumber=0x5F0F3CF5, flags=0x0003, unitsPerEm=1000, created=0, modified=0, xMin=0, yMin=0, xMax=0, yMax=0, macStyle=0, lowestRecPPEM=3, fontDirectionHint=2, indexToLocFormat=0, glyphDataFormat=0, ) _maxpDefaultsTTF = dict( tableVersion=0x00010000, numGlyphs=0, maxPoints=0, maxContours=0, maxCompositePoints=0, maxCompositeContours=0, maxZones=2, maxTwilightPoints=0, maxStorage=0, maxFunctionDefs=0, maxInstructionDefs=0, maxStackElements=0, maxSizeOfInstructions=0, maxComponentElements=0, maxComponentDepth=0, ) _maxpDefaultsOTF = dict( tableVersion=0x00005000, numGlyphs=0, ) _postDefaults = dict( formatType=3.0, italicAngle=0, underlinePosition=0, underlineThickness=0, isFixedPitch=0, minMemType42=0, maxMemType42=0, minMemType1=0, maxMemType1=0, ) _hheaDefaults = dict( tableVersion=0x00010000, ascent=0, descent=0, lineGap=0, advanceWidthMax=0, minLeftSideBearing=0, minRightSideBearing=0, xMaxExtent=0, caretSlopeRise=1, caretSlopeRun=0, caretOffset=0, reserved0=0, reserved1=0, reserved2=0, reserved3=0, metricDataFormat=0, numberOfHMetrics=0, ) _vheaDefaults = dict( tableVersion=0x00010000, ascent=0, descent=0, lineGap=0, advanceHeightMax=0, minTopSideBearing=0, minBottomSideBearing=0, yMaxExtent=0, caretSlopeRise=0, caretSlopeRun=0, reserved0=0, reserved1=0, reserved2=0, reserved3=0, reserved4=0, metricDataFormat=0, numberOfVMetrics=0, ) _nameIDs = dict( copyright=0, familyName=1, styleName=2, uniqueFontIdentifier=3, fullName=4, version=5, psName=6, trademark=7, manufacturer=8, designer=9, description=10, vendorURL=11, designerURL=12, licenseDescription=13, licenseInfoURL=14, # reserved = 15, typographicFamily=16, typographicSubfamily=17, compatibleFullName=18, sampleText=19, postScriptCIDFindfontName=20, wwsFamilyName=21, wwsSubfamilyName=22, lightBackgroundPalette=23, darkBackgroundPalette=24, variationsPostScriptNamePrefix=25, ) # to insert in setupNameTable doc string: # print("\n".join(("%s (nameID %s)" % (k, v)) for k, v in sorted(_nameIDs.items(), key=lambda x: x[1]))) _panoseDefaults = Panose() _OS2Defaults = dict( version=3, xAvgCharWidth=0, usWeightClass=400, usWidthClass=5, fsType=0x0004, # default: Preview & Print embedding ySubscriptXSize=0, ySubscriptYSize=0, ySubscriptXOffset=0, ySubscriptYOffset=0, ySuperscriptXSize=0, ySuperscriptYSize=0, ySuperscriptXOffset=0, ySuperscriptYOffset=0, yStrikeoutSize=0, yStrikeoutPosition=0, sFamilyClass=0, panose=_panoseDefaults, ulUnicodeRange1=0, ulUnicodeRange2=0, ulUnicodeRange3=0, ulUnicodeRange4=0, achVendID="????", fsSelection=0, usFirstCharIndex=0, usLastCharIndex=0, sTypoAscender=0, sTypoDescender=0, sTypoLineGap=0, usWinAscent=0, usWinDescent=0, ulCodePageRange1=0, ulCodePageRange2=0, sxHeight=0, sCapHeight=0, usDefaultChar=0, # .notdef usBreakChar=32, # space usMaxContext=0, usLowerOpticalPointSize=0, usUpperOpticalPointSize=0, ) class FontBuilder(object): def __init__(self, unitsPerEm=None, font=None, isTTF=True, glyphDataFormat=0): """Initialize a FontBuilder instance. If the `font` argument is not given, a new `TTFont` will be constructed, and `unitsPerEm` must be given. If `isTTF` is True, the font will be a glyf-based TTF; if `isTTF` is False it will be a CFF-based OTF. The `glyphDataFormat` argument corresponds to the `head` table field that defines the format of the TrueType `glyf` table (default=0). TrueType glyphs historically can only contain quadratic splines and static components, but there's a proposal to add support for cubic Bezier curves as well as variable composites/components at https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1.md You can experiment with the new features by setting `glyphDataFormat` to 1. A ValueError is raised if `glyphDataFormat` is left at 0 but glyphs are added that contain cubic splines or varcomposites. This is to prevent accidentally creating fonts that are incompatible with existing TrueType implementations. If `font` is given, it must be a `TTFont` instance and `unitsPerEm` must _not_ be given. The `isTTF` and `glyphDataFormat` arguments will be ignored. """ if font is None: self.font = TTFont(recalcTimestamp=False) self.isTTF = isTTF now = timestampNow() assert unitsPerEm is not None self.setupHead( unitsPerEm=unitsPerEm, created=now, modified=now, glyphDataFormat=glyphDataFormat, ) self.setupMaxp() else: assert unitsPerEm is None self.font = font self.isTTF = "glyf" in font def save(self, file): """Save the font. The 'file' argument can be either a pathname or a writable file object. """ self.font.save(file) def _initTableWithValues(self, tableTag, defaults, values): table = self.font[tableTag] = newTable(tableTag) for k, v in defaults.items(): setattr(table, k, v) for k, v in values.items(): setattr(table, k, v) return table def _updateTableWithValues(self, tableTag, values): table = self.font[tableTag] for k, v in values.items(): setattr(table, k, v) def setupHead(self, **values): """Create a new `head` table and initialize it with default values, which can be overridden by keyword arguments. """ self._initTableWithValues("head", _headDefaults, values) def updateHead(self, **values): """Update the head table with the fields and values passed as keyword arguments. """ self._updateTableWithValues("head", values) def setupGlyphOrder(self, glyphOrder): """Set the glyph order for the font.""" self.font.setGlyphOrder(glyphOrder) def setupCharacterMap(self, cmapping, uvs=None, allowFallback=False): """Build the `cmap` table for the font. The `cmapping` argument should be a dict mapping unicode code points as integers to glyph names. The `uvs` argument, when passed, must be a list of tuples, describing Unicode Variation Sequences. These tuples have three elements: (unicodeValue, variationSelector, glyphName) `unicodeValue` and `variationSelector` are integer code points. `glyphName` may be None, to indicate this is the default variation. Text processors will then use the cmap to find the glyph name. Each Unicode Variation Sequence should be an officially supported sequence, but this is not policed. """ subTables = [] highestUnicode = max(cmapping) if cmapping else 0 if highestUnicode > 0xFFFF: cmapping_3_1 = dict((k, v) for k, v in cmapping.items() if k < 0x10000) subTable_3_10 = buildCmapSubTable(cmapping, 12, 3, 10) subTables.append(subTable_3_10) else: cmapping_3_1 = cmapping format = 4 subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1) try: subTable_3_1.compile(self.font) except struct.error: # format 4 overflowed, fall back to format 12 if not allowFallback: raise ValueError( "cmap format 4 subtable overflowed; sort glyph order by unicode to fix." ) format = 12 subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1) subTables.append(subTable_3_1) subTable_0_3 = buildCmapSubTable(cmapping_3_1, format, 0, 3) subTables.append(subTable_0_3) if uvs is not None: uvsDict = {} for unicodeValue, variationSelector, glyphName in uvs: if cmapping.get(unicodeValue) == glyphName: # this is a default variation glyphName = None if variationSelector not in uvsDict: uvsDict[variationSelector] = [] uvsDict[variationSelector].append((unicodeValue, glyphName)) uvsSubTable = buildCmapSubTable({}, 14, 0, 5) uvsSubTable.uvsDict = uvsDict subTables.append(uvsSubTable) self.font["cmap"] = newTable("cmap") self.font["cmap"].tableVersion = 0 self.font["cmap"].tables = subTables def setupNameTable(self, nameStrings, windows=True, mac=True): """Create the `name` table for the font. The `nameStrings` argument must be a dict, mapping nameIDs or descriptive names for the nameIDs to name record values. A value is either a string, or a dict, mapping language codes to strings, to allow localized name table entries. By default, both Windows (platformID=3) and Macintosh (platformID=1) name records are added, unless any of `windows` or `mac` arguments is False. The following descriptive names are available for nameIDs: copyright (nameID 0) familyName (nameID 1) styleName (nameID 2) uniqueFontIdentifier (nameID 3) fullName (nameID 4) version (nameID 5) psName (nameID 6) trademark (nameID 7) manufacturer (nameID 8) designer (nameID 9) description (nameID 10) vendorURL (nameID 11) designerURL (nameID 12) licenseDescription (nameID 13) licenseInfoURL (nameID 14) typographicFamily (nameID 16) typographicSubfamily (nameID 17) compatibleFullName (nameID 18) sampleText (nameID 19) postScriptCIDFindfontName (nameID 20) wwsFamilyName (nameID 21) wwsSubfamilyName (nameID 22) lightBackgroundPalette (nameID 23) darkBackgroundPalette (nameID 24) variationsPostScriptNamePrefix (nameID 25) """ nameTable = self.font["name"] = newTable("name") nameTable.names = [] for nameName, nameValue in nameStrings.items(): if isinstance(nameName, int): nameID = nameName else: nameID = _nameIDs[nameName] if isinstance(nameValue, str): nameValue = dict(en=nameValue) nameTable.addMultilingualName( nameValue, ttFont=self.font, nameID=nameID, windows=windows, mac=mac ) def setupOS2(self, **values): """Create a new `OS/2` table and initialize it with default values, which can be overridden by keyword arguments. """ self._initTableWithValues("OS/2", _OS2Defaults, values) if "xAvgCharWidth" not in values: assert ( "hmtx" in self.font ), "the 'hmtx' table must be setup before the 'OS/2' table" self.font["OS/2"].recalcAvgCharWidth(self.font) if not ( "ulUnicodeRange1" in values or "ulUnicodeRange2" in values or "ulUnicodeRange3" in values or "ulUnicodeRange3" in values ): assert ( "cmap" in self.font ), "the 'cmap' table must be setup before the 'OS/2' table" self.font["OS/2"].recalcUnicodeRanges(self.font) def setupCFF(self, psName, fontInfo, charStringsDict, privateDict): from .cffLib import ( CFFFontSet, TopDictIndex, TopDict, CharStrings, GlobalSubrsIndex, PrivateDict, ) assert not self.isTTF self.font.sfntVersion = "OTTO" fontSet = CFFFontSet() fontSet.major = 1 fontSet.minor = 0 fontSet.otFont = self.font fontSet.fontNames = [psName] fontSet.topDictIndex = TopDictIndex() globalSubrs = GlobalSubrsIndex() fontSet.GlobalSubrs = globalSubrs private = PrivateDict() for key, value in privateDict.items(): setattr(private, key, value) fdSelect = None fdArray = None topDict = TopDict() topDict.charset = self.font.getGlyphOrder() topDict.Private = private topDict.GlobalSubrs = fontSet.GlobalSubrs for key, value in fontInfo.items(): setattr(topDict, key, value) if "FontMatrix" not in fontInfo: scale = 1 / self.font["head"].unitsPerEm topDict.FontMatrix = [scale, 0, 0, scale, 0, 0] charStrings = CharStrings( None, topDict.charset, globalSubrs, private, fdSelect, fdArray ) for glyphName, charString in charStringsDict.items(): charString.private = private charString.globalSubrs = globalSubrs charStrings[glyphName] = charString topDict.CharStrings = charStrings fontSet.topDictIndex.append(topDict) self.font["CFF "] = newTable("CFF ") self.font["CFF "].cff = fontSet def setupCFF2(self, charStringsDict, fdArrayList=None, regions=None): from .cffLib import ( CFFFontSet, TopDictIndex, TopDict, CharStrings, GlobalSubrsIndex, PrivateDict, FDArrayIndex, FontDict, ) assert not self.isTTF self.font.sfntVersion = "OTTO" fontSet = CFFFontSet() fontSet.major = 2 fontSet.minor = 0 cff2GetGlyphOrder = self.font.getGlyphOrder fontSet.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder, None) globalSubrs = GlobalSubrsIndex() fontSet.GlobalSubrs = globalSubrs if fdArrayList is None: fdArrayList = [{}] fdSelect = None fdArray = FDArrayIndex() fdArray.strings = None fdArray.GlobalSubrs = globalSubrs for privateDict in fdArrayList: fontDict = FontDict() fontDict.setCFF2(True) private = PrivateDict() for key, value in privateDict.items(): setattr(private, key, value) fontDict.Private = private fdArray.append(fontDict) topDict = TopDict() topDict.cff2GetGlyphOrder = cff2GetGlyphOrder topDict.FDArray = fdArray scale = 1 / self.font["head"].unitsPerEm topDict.FontMatrix = [scale, 0, 0, scale, 0, 0] private = fdArray[0].Private charStrings = CharStrings(None, None, globalSubrs, private, fdSelect, fdArray) for glyphName, charString in charStringsDict.items(): charString.private = private charString.globalSubrs = globalSubrs charStrings[glyphName] = charString topDict.CharStrings = charStrings fontSet.topDictIndex.append(topDict) self.font["CFF2"] = newTable("CFF2") self.font["CFF2"].cff = fontSet if regions: self.setupCFF2Regions(regions) def setupCFF2Regions(self, regions): from .varLib.builder import buildVarRegionList, buildVarData, buildVarStore from .cffLib import VarStoreData assert "fvar" in self.font, "fvar must to be set up first" assert "CFF2" in self.font, "CFF2 must to be set up first" axisTags = [a.axisTag for a in self.font["fvar"].axes] varRegionList = buildVarRegionList(regions, axisTags) varData = buildVarData(list(range(len(regions))), None, optimize=False) varStore = buildVarStore(varRegionList, [varData]) vstore = VarStoreData(otVarStore=varStore) topDict = self.font["CFF2"].cff.topDictIndex[0] topDict.VarStore = vstore for fontDict in topDict.FDArray: fontDict.Private.vstore = vstore def setupGlyf(self, glyphs, calcGlyphBounds=True, validateGlyphFormat=True): """Create the `glyf` table from a dict, that maps glyph names to `fontTools.ttLib.tables._g_l_y_f.Glyph` objects, for example as made by `fontTools.pens.ttGlyphPen.TTGlyphPen`. If `calcGlyphBounds` is True, the bounds of all glyphs will be calculated. Only pass False if your glyph objects already have their bounding box values set. If `validateGlyphFormat` is True, raise ValueError if any of the glyphs contains cubic curves or is a variable composite but head.glyphDataFormat=0. Set it to False to skip the check if you know in advance all the glyphs are compatible with the specified glyphDataFormat. """ assert self.isTTF if validateGlyphFormat and self.font["head"].glyphDataFormat == 0: for name, g in glyphs.items(): if g.isVarComposite(): raise ValueError( f"Glyph {name!r} is a variable composite, but glyphDataFormat=0" ) elif g.numberOfContours > 0 and any(f & flagCubic for f in g.flags): raise ValueError( f"Glyph {name!r} has cubic Bezier outlines, but glyphDataFormat=0; " "either convert to quadratics with cu2qu or set glyphDataFormat=1." ) self.font["loca"] = newTable("loca") self.font["glyf"] = newTable("glyf") self.font["glyf"].glyphs = glyphs if hasattr(self.font, "glyphOrder"): self.font["glyf"].glyphOrder = self.font.glyphOrder if calcGlyphBounds: self.calcGlyphBounds() def setupFvar(self, axes, instances): """Adds an font variations table to the font. Args: axes (list): See below. instances (list): See below. ``axes`` should be a list of axes, with each axis either supplied as a py:class:`.designspaceLib.AxisDescriptor` object, or a tuple in the format ```tupletag, minValue, defaultValue, maxValue, name``. The ``name`` is either a string, or a dict, mapping language codes to strings, to allow localized name table entries. ```instances`` should be a list of instances, with each instance either supplied as a py:class:`.designspaceLib.InstanceDescriptor` object, or a dict with keys ``location`` (mapping of axis tags to float values), ``stylename`` and (optionally) ``postscriptfontname``. The ``stylename`` is either a string, or a dict, mapping language codes to strings, to allow localized name table entries. """ addFvar(self.font, axes, instances) def setupAvar(self, axes, mappings=None): """Adds an axis variations table to the font. Args: axes (list): A list of py:class:`.designspaceLib.AxisDescriptor` objects. """ from .varLib import _add_avar if "fvar" not in self.font: raise KeyError("'fvar' table is missing; can't add 'avar'.") axisTags = [axis.axisTag for axis in self.font["fvar"].axes] axes = OrderedDict(enumerate(axes)) # Only values are used _add_avar(self.font, axes, mappings, axisTags) def setupGvar(self, variations): gvar = self.font["gvar"] = newTable("gvar") gvar.version = 1 gvar.reserved = 0 gvar.variations = variations def calcGlyphBounds(self): """Calculate the bounding boxes of all glyphs in the `glyf` table. This is usually not called explicitly by client code. """ glyphTable = self.font["glyf"] for glyph in glyphTable.glyphs.values(): glyph.recalcBounds(glyphTable) def setupHorizontalMetrics(self, metrics): """Create a new `hmtx` table, for horizontal metrics. The `metrics` argument must be a dict, mapping glyph names to `(width, leftSidebearing)` tuples. """ self.setupMetrics("hmtx", metrics) def setupVerticalMetrics(self, metrics): """Create a new `vmtx` table, for horizontal metrics. The `metrics` argument must be a dict, mapping glyph names to `(height, topSidebearing)` tuples. """ self.setupMetrics("vmtx", metrics) def setupMetrics(self, tableTag, metrics): """See `setupHorizontalMetrics()` and `setupVerticalMetrics()`.""" assert tableTag in ("hmtx", "vmtx") mtxTable = self.font[tableTag] = newTable(tableTag) roundedMetrics = {} for gn in metrics: w, lsb = metrics[gn] roundedMetrics[gn] = int(round(w)), int(round(lsb)) mtxTable.metrics = roundedMetrics def setupHorizontalHeader(self, **values): """Create a new `hhea` table initialize it with default values, which can be overridden by keyword arguments. """ self._initTableWithValues("hhea", _hheaDefaults, values) def setupVerticalHeader(self, **values): """Create a new `vhea` table initialize it with default values, which can be overridden by keyword arguments. """ self._initTableWithValues("vhea", _vheaDefaults, values) def setupVerticalOrigins(self, verticalOrigins, defaultVerticalOrigin=None): """Create a new `VORG` table. The `verticalOrigins` argument must be a dict, mapping glyph names to vertical origin values. The `defaultVerticalOrigin` argument should be the most common vertical origin value. If omitted, this value will be derived from the actual values in the `verticalOrigins` argument. """ if defaultVerticalOrigin is None: # find the most frequent vorg value bag = {} for gn in verticalOrigins: vorg = verticalOrigins[gn] if vorg not in bag: bag[vorg] = 1 else: bag[vorg] += 1 defaultVerticalOrigin = sorted( bag, key=lambda vorg: bag[vorg], reverse=True )[0] self._initTableWithValues( "VORG", {}, dict(VOriginRecords={}, defaultVertOriginY=defaultVerticalOrigin), ) vorgTable = self.font["VORG"] vorgTable.majorVersion = 1 vorgTable.minorVersion = 0 for gn in verticalOrigins: vorgTable[gn] = verticalOrigins[gn] def setupPost(self, keepGlyphNames=True, **values): """Create a new `post` table and initialize it with default values, which can be overridden by keyword arguments. """ isCFF2 = "CFF2" in self.font postTable = self._initTableWithValues("post", _postDefaults, values) if (self.isTTF or isCFF2) and keepGlyphNames: postTable.formatType = 2.0 postTable.extraNames = [] postTable.mapping = {} else: postTable.formatType = 3.0 def setupMaxp(self): """Create a new `maxp` table. This is called implicitly by FontBuilder itself and is usually not called by client code. """ if self.isTTF: defaults = _maxpDefaultsTTF else: defaults = _maxpDefaultsOTF self._initTableWithValues("maxp", defaults, {}) def setupDummyDSIG(self): """This adds an empty DSIG table to the font to make some MS applications happy. This does not properly sign the font. """ values = dict( ulVersion=1, usFlag=0, usNumSigs=0, signatureRecords=[], ) self._initTableWithValues("DSIG", {}, values) def addOpenTypeFeatures(self, features, filename=None, tables=None, debug=False): """Add OpenType features to the font from a string containing Feature File syntax. The `filename` argument is used in error messages and to determine where to look for "include" files. The optional `tables` argument can be a list of OTL tables tags to build, allowing the caller to only build selected OTL tables. See `fontTools.feaLib` for details. The optional `debug` argument controls whether to add source debugging information to the font in the `Debg` table. """ from .feaLib.builder import addOpenTypeFeaturesFromString addOpenTypeFeaturesFromString( self.font, features, filename=filename, tables=tables, debug=debug ) def addFeatureVariations(self, conditionalSubstitutions, featureTag="rvrn"): """Add conditional substitutions to a Variable Font. See `fontTools.varLib.featureVars.addFeatureVariations`. """ from .varLib import featureVars if "fvar" not in self.font: raise KeyError("'fvar' table is missing; can't add FeatureVariations.") featureVars.addFeatureVariations( self.font, conditionalSubstitutions, featureTag=featureTag ) def setupCOLR( self, colorLayers, version=None, varStore=None, varIndexMap=None, clipBoxes=None, allowLayerReuse=True, ): """Build new COLR table using color layers dictionary. Cf. `fontTools.colorLib.builder.buildCOLR`. """ from fontTools.colorLib.builder import buildCOLR glyphMap = self.font.getReverseGlyphMap() self.font["COLR"] = buildCOLR( colorLayers, version=version, glyphMap=glyphMap, varStore=varStore, varIndexMap=varIndexMap, clipBoxes=clipBoxes, allowLayerReuse=allowLayerReuse, ) def setupCPAL( self, palettes, paletteTypes=None, paletteLabels=None, paletteEntryLabels=None, ): """Build new CPAL table using list of palettes. Optionally build CPAL v1 table using paletteTypes, paletteLabels and paletteEntryLabels. Cf. `fontTools.colorLib.builder.buildCPAL`. """ from fontTools.colorLib.builder import buildCPAL self.font["CPAL"] = buildCPAL( palettes, paletteTypes=paletteTypes, paletteLabels=paletteLabels, paletteEntryLabels=paletteEntryLabels, nameTable=self.font.get("name"), ) def setupStat(self, axes, locations=None, elidedFallbackName=2): """Build a new 'STAT' table. See `fontTools.otlLib.builder.buildStatTable` for details about the arguments. """ from .otlLib.builder import buildStatTable buildStatTable(self.font, axes, locations, elidedFallbackName) def buildCmapSubTable(cmapping, format, platformID, platEncID): subTable = cmap_classes[format](format) subTable.cmap = cmapping subTable.platformID = platformID subTable.platEncID = platEncID subTable.language = 0 return subTable def addFvar(font, axes, instances): from .ttLib.tables._f_v_a_r import Axis, NamedInstance assert axes fvar = newTable("fvar") nameTable = font["name"] for axis_def in axes: axis = Axis() if isinstance(axis_def, tuple): ( axis.axisTag, axis.minValue, axis.defaultValue, axis.maxValue, name, ) = axis_def else: (axis.axisTag, axis.minValue, axis.defaultValue, axis.maxValue, name) = ( axis_def.tag, axis_def.minimum, axis_def.default, axis_def.maximum, axis_def.name, ) if axis_def.hidden: axis.flags = 0x0001 # HIDDEN_AXIS if isinstance(name, str): name = dict(en=name) axis.axisNameID = nameTable.addMultilingualName(name, ttFont=font) fvar.axes.append(axis) for instance in instances: if isinstance(instance, dict): coordinates = instance["location"] name = instance["stylename"] psname = instance.get("postscriptfontname") else: coordinates = instance.location name = instance.localisedStyleName or instance.styleName psname = instance.postScriptFontName if isinstance(name, str): name = dict(en=name) inst = NamedInstance() inst.subfamilyNameID = nameTable.addMultilingualName(name, ttFont=font) if psname is not None: inst.postscriptNameID = nameTable.addName(psname) inst.coordinates = coordinates fvar.instances.append(inst) font["fvar"] = fvar PKaZZZ+��ZfontTools/help.pyimport pkgutil import sys import fontTools import importlib import os from pathlib import Path def main(): """Show this help""" path = fontTools.__path__ descriptions = {} for pkg in sorted( mod.name for mod in pkgutil.walk_packages([fontTools.__path__[0]], prefix="fontTools.") ): try: imports = __import__(pkg, globals(), locals(), ["main"]) except ImportError as e: continue try: description = imports.main.__doc__ if description: pkg = pkg.replace("fontTools.", "").replace(".__main__", "") # show the docstring's first line only descriptions[pkg] = description.splitlines()[0] except AttributeError as e: pass for pkg, description in descriptions.items(): print("fonttools %-25s %s" % (pkg, description), file=sys.stderr) if __name__ == "__main__": print("fonttools v%s\n" % fontTools.__version__, file=sys.stderr) main() PKaZZZ�C��7�7fontTools/tfmLib.py"""Module for reading TFM (TeX Font Metrics) files. The TFM format is described in the TFtoPL WEB source code, whose typeset form can be found on `CTAN <http://mirrors.ctan.org/info/knuth-pdf/texware/tftopl.pdf>`_. >>> from fontTools.tfmLib import TFM >>> tfm = TFM("Tests/tfmLib/data/cmr10.tfm") >>> >>> # Accessing an attribute gets you metadata. >>> tfm.checksum 1274110073 >>> tfm.designsize 10.0 >>> tfm.codingscheme 'TeX text' >>> tfm.family 'CMR' >>> tfm.seven_bit_safe_flag False >>> tfm.face 234 >>> tfm.extraheader {} >>> tfm.fontdimens {'SLANT': 0.0, 'SPACE': 0.33333396911621094, 'STRETCH': 0.16666698455810547, 'SHRINK': 0.11111164093017578, 'XHEIGHT': 0.4305553436279297, 'QUAD': 1.0000028610229492, 'EXTRASPACE': 0.11111164093017578} >>> # Accessing a character gets you its metrics. >>> # “width” is always available, other metrics are available only when >>> # applicable. All values are relative to “designsize”. >>> tfm.chars[ord("g")] {'width': 0.5000019073486328, 'height': 0.4305553436279297, 'depth': 0.1944446563720703, 'italic': 0.013888359069824219} >>> # Kerning and ligature can be accessed as well. >>> tfm.kerning[ord("c")] {104: -0.02777862548828125, 107: -0.02777862548828125} >>> tfm.ligatures[ord("f")] {105: ('LIG', 12), 102: ('LIG', 11), 108: ('LIG', 13)} """ from types import SimpleNamespace from fontTools.misc.sstruct import calcsize, unpack, unpack2 SIZES_FORMAT = """ > lf: h # length of the entire file, in words lh: h # length of the header data, in words bc: h # smallest character code in the font ec: h # largest character code in the font nw: h # number of words in the width table nh: h # number of words in the height table nd: h # number of words in the depth table ni: h # number of words in the italic correction table nl: h # number of words in the ligature/kern table nk: h # number of words in the kern table ne: h # number of words in the extensible character table np: h # number of font parameter words """ SIZES_SIZE = calcsize(SIZES_FORMAT) FIXED_FORMAT = "12.20F" HEADER_FORMAT1 = f""" > checksum: L designsize: {FIXED_FORMAT} """ HEADER_FORMAT2 = f""" {HEADER_FORMAT1} codingscheme: 40p """ HEADER_FORMAT3 = f""" {HEADER_FORMAT2} family: 20p """ HEADER_FORMAT4 = f""" {HEADER_FORMAT3} seven_bit_safe_flag: ? ignored: x ignored: x face: B """ HEADER_SIZE1 = calcsize(HEADER_FORMAT1) HEADER_SIZE2 = calcsize(HEADER_FORMAT2) HEADER_SIZE3 = calcsize(HEADER_FORMAT3) HEADER_SIZE4 = calcsize(HEADER_FORMAT4) LIG_KERN_COMMAND = """ > skip_byte: B next_char: B op_byte: B remainder: B """ BASE_PARAMS = [ "SLANT", "SPACE", "STRETCH", "SHRINK", "XHEIGHT", "QUAD", "EXTRASPACE", ] MATHSY_PARAMS = [ "NUM1", "NUM2", "NUM3", "DENOM1", "DENOM2", "SUP1", "SUP2", "SUP3", "SUB1", "SUB2", "SUPDROP", "SUBDROP", "DELIM1", "DELIM2", "AXISHEIGHT", ] MATHEX_PARAMS = [ "DEFAULTRULETHICKNESS", "BIGOPSPACING1", "BIGOPSPACING2", "BIGOPSPACING3", "BIGOPSPACING4", "BIGOPSPACING5", ] VANILLA = 0 MATHSY = 1 MATHEX = 2 UNREACHABLE = 0 PASSTHROUGH = 1 ACCESSABLE = 2 NO_TAG = 0 LIG_TAG = 1 LIST_TAG = 2 EXT_TAG = 3 STOP_FLAG = 128 KERN_FLAG = 128 class TFMException(Exception): def __init__(self, message): super().__init__(message) class TFM: def __init__(self, file): self._read(file) def __repr__(self): return ( f"<TFM" f" for {self.family}" f" in {self.codingscheme}" f" at {self.designsize:g}pt>" ) def _read(self, file): if hasattr(file, "read"): data = file.read() else: with open(file, "rb") as fp: data = fp.read() self._data = data if len(data) < SIZES_SIZE: raise TFMException("Too short input file") sizes = SimpleNamespace() unpack2(SIZES_FORMAT, data, sizes) # Do some file structure sanity checks. # TeX and TFtoPL do additional functional checks and might even correct # “errors” in the input file, but we instead try to output the file as # it is as long as it is parsable, even if the data make no sense. if sizes.lf < 0: raise TFMException("The file claims to have negative or zero length!") if len(data) < sizes.lf * 4: raise TFMException("The file has fewer bytes than it claims!") for name, length in vars(sizes).items(): if length < 0: raise TFMException("The subfile size: '{name}' is negative!") if sizes.lh < 2: raise TFMException(f"The header length is only {sizes.lh}!") if sizes.bc > sizes.ec + 1 or sizes.ec > 255: raise TFMException( f"The character code range {sizes.bc}..{sizes.ec} is illegal!" ) if sizes.nw == 0 or sizes.nh == 0 or sizes.nd == 0 or sizes.ni == 0: raise TFMException("Incomplete subfiles for character dimensions!") if sizes.ne > 256: raise TFMException(f"There are {ne} extensible recipes!") if sizes.lf != ( 6 + sizes.lh + (sizes.ec - sizes.bc + 1) + sizes.nw + sizes.nh + sizes.nd + sizes.ni + sizes.nl + sizes.nk + sizes.ne + sizes.np ): raise TFMException("Subfile sizes don’t add up to the stated total") # Subfile offsets, used in the helper function below. These all are # 32-bit word offsets not 8-bit byte offsets. char_base = 6 + sizes.lh - sizes.bc width_base = char_base + sizes.ec + 1 height_base = width_base + sizes.nw depth_base = height_base + sizes.nh italic_base = depth_base + sizes.nd lig_kern_base = italic_base + sizes.ni kern_base = lig_kern_base + sizes.nl exten_base = kern_base + sizes.nk param_base = exten_base + sizes.ne # Helper functions for accessing individual data. If this looks # nonidiomatic Python, I blame the effect of reading the literate WEB # documentation of TFtoPL. def char_info(c): return 4 * (char_base + c) def width_index(c): return data[char_info(c)] def noneexistent(c): return c < sizes.bc or c > sizes.ec or width_index(c) == 0 def height_index(c): return data[char_info(c) + 1] // 16 def depth_index(c): return data[char_info(c) + 1] % 16 def italic_index(c): return data[char_info(c) + 2] // 4 def tag(c): return data[char_info(c) + 2] % 4 def remainder(c): return data[char_info(c) + 3] def width(c): r = 4 * (width_base + width_index(c)) return read_fixed(r, "v")["v"] def height(c): r = 4 * (height_base + height_index(c)) return read_fixed(r, "v")["v"] def depth(c): r = 4 * (depth_base + depth_index(c)) return read_fixed(r, "v")["v"] def italic(c): r = 4 * (italic_base + italic_index(c)) return read_fixed(r, "v")["v"] def exten(c): return 4 * (exten_base + remainder(c)) def lig_step(i): return 4 * (lig_kern_base + i) def lig_kern_command(i): command = SimpleNamespace() unpack2(LIG_KERN_COMMAND, data[i:], command) return command def kern(i): r = 4 * (kern_base + i) return read_fixed(r, "v")["v"] def param(i): return 4 * (param_base + i) def read_fixed(index, key, obj=None): ret = unpack2(f">;{key}:{FIXED_FORMAT}", data[index:], obj) return ret[0] # Set all attributes to empty values regardless of the header size. unpack(HEADER_FORMAT4, [0] * HEADER_SIZE4, self) offset = 24 length = sizes.lh * 4 self.extraheader = {} if length >= HEADER_SIZE4: rest = unpack2(HEADER_FORMAT4, data[offset:], self)[1] if self.face < 18: s = self.face % 2 b = self.face // 2 self.face = "MBL"[b % 3] + "RI"[s] + "RCE"[b // 3] for i in range(sizes.lh - HEADER_SIZE4 // 4): rest = unpack2(f">;HEADER{i + 18}:l", rest, self.extraheader)[1] elif length >= HEADER_SIZE3: unpack2(HEADER_FORMAT3, data[offset:], self) elif length >= HEADER_SIZE2: unpack2(HEADER_FORMAT2, data[offset:], self) elif length >= HEADER_SIZE1: unpack2(HEADER_FORMAT1, data[offset:], self) self.fonttype = VANILLA scheme = self.codingscheme.upper() if scheme.startswith("TEX MATH SY"): self.fonttype = MATHSY elif scheme.startswith("TEX MATH EX"): self.fonttype = MATHEX self.fontdimens = {} for i in range(sizes.np): name = f"PARAMETER{i+1}" if i <= 6: name = BASE_PARAMS[i] elif self.fonttype == MATHSY and i <= 21: name = MATHSY_PARAMS[i - 7] elif self.fonttype == MATHEX and i <= 12: name = MATHEX_PARAMS[i - 7] read_fixed(param(i), name, self.fontdimens) lig_kern_map = {} self.right_boundary_char = None self.left_boundary_char = None if sizes.nl > 0: cmd = lig_kern_command(lig_step(0)) if cmd.skip_byte == 255: self.right_boundary_char = cmd.next_char cmd = lig_kern_command(lig_step((sizes.nl - 1))) if cmd.skip_byte == 255: self.left_boundary_char = 256 r = 256 * cmd.op_byte + cmd.remainder lig_kern_map[self.left_boundary_char] = r self.chars = {} for c in range(sizes.bc, sizes.ec + 1): if width_index(c) > 0: self.chars[c] = info = {} info["width"] = width(c) if height_index(c) > 0: info["height"] = height(c) if depth_index(c) > 0: info["depth"] = depth(c) if italic_index(c) > 0: info["italic"] = italic(c) char_tag = tag(c) if char_tag == NO_TAG: pass elif char_tag == LIG_TAG: lig_kern_map[c] = remainder(c) elif char_tag == LIST_TAG: info["nextlarger"] = remainder(c) elif char_tag == EXT_TAG: info["varchar"] = varchar = {} for i in range(4): part = data[exten(c) + i] if i == 3 or part > 0: name = "rep" if i == 0: name = "top" elif i == 1: name = "mid" elif i == 2: name = "bot" if noneexistent(part): varchar[name] = c else: varchar[name] = part self.ligatures = {} self.kerning = {} for c, i in sorted(lig_kern_map.items()): cmd = lig_kern_command(lig_step(i)) if cmd.skip_byte > STOP_FLAG: i = 256 * cmd.op_byte + cmd.remainder while i < sizes.nl: cmd = lig_kern_command(lig_step(i)) if cmd.skip_byte > STOP_FLAG: pass else: if cmd.op_byte >= KERN_FLAG: r = 256 * (cmd.op_byte - KERN_FLAG) + cmd.remainder self.kerning.setdefault(c, {})[cmd.next_char] = kern(r) else: r = cmd.op_byte if r == 4 or (r > 7 and r != 11): # Ligature step with nonstandard code, we output # the code verbatim. lig = r else: lig = "" if r % 4 > 1: lig += "/" lig += "LIG" if r % 2 != 0: lig += "/" while r > 3: lig += ">" r -= 4 self.ligatures.setdefault(c, {})[cmd.next_char] = ( lig, cmd.remainder, ) if cmd.skip_byte >= STOP_FLAG: break i += cmd.skip_byte + 1 if __name__ == "__main__": import sys tfm = TFM(sys.argv[1]) print( "\n".join( x for x in [ f"tfm.checksum={tfm.checksum}", f"tfm.designsize={tfm.designsize}", f"tfm.codingscheme={tfm.codingscheme}", f"tfm.fonttype={tfm.fonttype}", f"tfm.family={tfm.family}", f"tfm.seven_bit_safe_flag={tfm.seven_bit_safe_flag}", f"tfm.face={tfm.face}", f"tfm.extraheader={tfm.extraheader}", f"tfm.fontdimens={tfm.fontdimens}", f"tfm.right_boundary_char={tfm.right_boundary_char}", f"tfm.left_boundary_char={tfm.left_boundary_char}", f"tfm.kerning={tfm.kerning}", f"tfm.ligatures={tfm.ligatures}", f"tfm.chars={tfm.chars}", ] ) ) print(tfm) PKaZZZ ���AAfontTools/ttx.py"""\ usage: ttx [options] inputfile1 [... inputfileN] TTX -- From OpenType To XML And Back If an input file is a TrueType or OpenType font file, it will be decompiled to a TTX file (an XML-based text format). If an input file is a TTX file, it will be compiled to whatever format the data is in, a TrueType or OpenType/CFF font file. A special input value of - means read from the standard input. Output files are created so they are unique: an existing file is never overwritten. General options =============== -h Help print this message. --version show version and exit. -d <outputfolder> Specify a directory where the output files are to be created. -o <outputfile> Specify a file to write the output to. A special value of - would use the standard output. -f Overwrite existing output file(s), ie. don't append numbers. -v Verbose: more messages will be written to stdout about what is being done. -q Quiet: No messages will be written to stdout about what is being done. -a allow virtual glyphs ID's on compile or decompile. Dump options ============ -l List table info: instead of dumping to a TTX file, list some minimal info about each table. -t <table> Specify a table to dump. Multiple -t options are allowed. When no -t option is specified, all tables will be dumped. -x <table> Specify a table to exclude from the dump. Multiple -x options are allowed. -t and -x are mutually exclusive. -s Split tables: save the TTX data into separate TTX files per table and write one small TTX file that contains references to the individual table dumps. This file can be used as input to ttx, as long as the table files are in the same directory. -g Split glyf table: Save the glyf data into separate TTX files per glyph and write a small TTX for the glyf table which contains references to the individual TTGlyph elements. NOTE: specifying -g implies -s (no need for -s together with -g) -i Do NOT disassemble TT instructions: when this option is given, all TrueType programs (glyph programs, the font program and the pre-program) will be written to the TTX file as hex data instead of assembly. This saves some time and makes the TTX file smaller. -z <format> Specify a bitmap data export option for EBDT: {'raw', 'row', 'bitwise', 'extfile'} or for the CBDT: {'raw', 'extfile'} Each option does one of the following: -z raw export the bitmap data as a hex dump -z row export each row as hex data -z bitwise export each row as binary in an ASCII art style -z extfile export the data as external files with XML references If no export format is specified 'raw' format is used. -e Don't ignore decompilation errors, but show a full traceback and abort. -y <number> Select font number for TrueType Collection (.ttc/.otc), starting from 0. --unicodedata <UnicodeData.txt> Use custom database file to write character names in the comments of the cmap TTX output. --newline <value> Control how line endings are written in the XML file. It can be 'LF', 'CR', or 'CRLF'. If not specified, the default platform-specific line endings are used. Compile options =============== -m Merge with TrueType-input-file: specify a TrueType or OpenType font file to be merged with the TTX file. This option is only valid when at most one TTX file is specified. -b Don't recalc glyph bounding boxes: use the values in the TTX file as-is. --recalc-timestamp Set font 'modified' timestamp to current time. By default, the modification time of the TTX file will be used. --no-recalc-timestamp Keep the original font 'modified' timestamp. --flavor <type> Specify flavor of output font file. May be 'woff' or 'woff2'. Note that WOFF2 requires the Brotli Python extension, available at https://github.com/google/brotli --with-zopfli Use Zopfli instead of Zlib to compress WOFF. The Python extension is available at https://pypi.python.org/pypi/zopfli """ from fontTools.ttLib import TTFont, TTLibError from fontTools.misc.macCreatorType import getMacCreatorAndType from fontTools.unicode import setUnicodeData from fontTools.misc.textTools import Tag, tostr from fontTools.misc.timeTools import timestampSinceEpoch from fontTools.misc.loggingTools import Timer from fontTools.misc.cliTools import makeOutputFileName import os import sys import getopt import re import logging log = logging.getLogger("fontTools.ttx") opentypeheaderRE = re.compile("""sfntVersion=['"]OTTO["']""") class Options(object): listTables = False outputDir = None outputFile = None overWrite = False verbose = False quiet = False splitTables = False splitGlyphs = False disassembleInstructions = True mergeFile = None recalcBBoxes = True ignoreDecompileErrors = True bitmapGlyphDataFormat = "raw" unicodedata = None newlinestr = "\n" recalcTimestamp = None flavor = None useZopfli = False def __init__(self, rawOptions, numFiles): self.onlyTables = [] self.skipTables = [] self.fontNumber = -1 for option, value in rawOptions: # general options if option == "-h": print(__doc__) sys.exit(0) elif option == "--version": from fontTools import version print(version) sys.exit(0) elif option == "-d": if not os.path.isdir(value): raise getopt.GetoptError( "The -d option value must be an existing directory" ) self.outputDir = value elif option == "-o": self.outputFile = value elif option == "-f": self.overWrite = True elif option == "-v": self.verbose = True elif option == "-q": self.quiet = True # dump options elif option == "-l": self.listTables = True elif option == "-t": # pad with space if table tag length is less than 4 value = value.ljust(4) self.onlyTables.append(value) elif option == "-x": # pad with space if table tag length is less than 4 value = value.ljust(4) self.skipTables.append(value) elif option == "-s": self.splitTables = True elif option == "-g": # -g implies (and forces) splitTables self.splitGlyphs = True self.splitTables = True elif option == "-i": self.disassembleInstructions = False elif option == "-z": validOptions = ("raw", "row", "bitwise", "extfile") if value not in validOptions: raise getopt.GetoptError( "-z does not allow %s as a format. Use %s" % (option, validOptions) ) self.bitmapGlyphDataFormat = value elif option == "-y": self.fontNumber = int(value) # compile options elif option == "-m": self.mergeFile = value elif option == "-b": self.recalcBBoxes = False elif option == "-e": self.ignoreDecompileErrors = False elif option == "--unicodedata": self.unicodedata = value elif option == "--newline": validOptions = ("LF", "CR", "CRLF") if value == "LF": self.newlinestr = "\n" elif value == "CR": self.newlinestr = "\r" elif value == "CRLF": self.newlinestr = "\r\n" else: raise getopt.GetoptError( "Invalid choice for --newline: %r (choose from %s)" % (value, ", ".join(map(repr, validOptions))) ) elif option == "--recalc-timestamp": self.recalcTimestamp = True elif option == "--no-recalc-timestamp": self.recalcTimestamp = False elif option == "--flavor": self.flavor = value elif option == "--with-zopfli": self.useZopfli = True if self.verbose and self.quiet: raise getopt.GetoptError("-q and -v options are mutually exclusive") if self.verbose: self.logLevel = logging.DEBUG elif self.quiet: self.logLevel = logging.WARNING else: self.logLevel = logging.INFO if self.mergeFile and self.flavor: raise getopt.GetoptError("-m and --flavor options are mutually exclusive") if self.onlyTables and self.skipTables: raise getopt.GetoptError("-t and -x options are mutually exclusive") if self.mergeFile and numFiles > 1: raise getopt.GetoptError( "Must specify exactly one TTX source file when using -m" ) if self.flavor != "woff" and self.useZopfli: raise getopt.GetoptError("--with-zopfli option requires --flavor 'woff'") def ttList(input, output, options): ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True) reader = ttf.reader tags = sorted(reader.keys()) print('Listing table info for "%s":' % input) format = " %4s %10s %8s %8s" print(format % ("tag ", " checksum", " length", " offset")) print(format % ("----", "----------", "--------", "--------")) for tag in tags: entry = reader.tables[tag] if ttf.flavor == "woff2": # WOFF2 doesn't store table checksums, so they must be calculated from fontTools.ttLib.sfnt import calcChecksum data = entry.loadData(reader.transformBuffer) checkSum = calcChecksum(data) else: checkSum = int(entry.checkSum) if checkSum < 0: checkSum = checkSum + 0x100000000 checksum = "0x%08X" % checkSum print(format % (tag, checksum, entry.length, entry.offset)) print() ttf.close() @Timer(log, "Done dumping TTX in %(time).3f seconds") def ttDump(input, output, options): input_name = input if input == "-": input, input_name = sys.stdin.buffer, sys.stdin.name output_name = output if output == "-": output, output_name = sys.stdout, sys.stdout.name log.info('Dumping "%s" to "%s"...', input_name, output_name) if options.unicodedata: setUnicodeData(options.unicodedata) ttf = TTFont( input, 0, ignoreDecompileErrors=options.ignoreDecompileErrors, fontNumber=options.fontNumber, ) ttf.saveXML( output, tables=options.onlyTables, skipTables=options.skipTables, splitTables=options.splitTables, splitGlyphs=options.splitGlyphs, disassembleInstructions=options.disassembleInstructions, bitmapGlyphDataFormat=options.bitmapGlyphDataFormat, newlinestr=options.newlinestr, ) ttf.close() @Timer(log, "Done compiling TTX in %(time).3f seconds") def ttCompile(input, output, options): input_name = input if input == "-": input, input_name = sys.stdin, sys.stdin.name output_name = output if output == "-": output, output_name = sys.stdout.buffer, sys.stdout.name log.info('Compiling "%s" to "%s"...' % (input_name, output)) if options.useZopfli: from fontTools.ttLib import sfnt sfnt.USE_ZOPFLI = True ttf = TTFont( options.mergeFile, flavor=options.flavor, recalcBBoxes=options.recalcBBoxes, recalcTimestamp=options.recalcTimestamp, ) ttf.importXML(input) if options.recalcTimestamp is None and "head" in ttf and input is not sys.stdin: # use TTX file modification time for head "modified" timestamp mtime = os.path.getmtime(input) ttf["head"].modified = timestampSinceEpoch(mtime) ttf.save(output) def guessFileType(fileName): if fileName == "-": header = sys.stdin.buffer.peek(256) ext = "" else: base, ext = os.path.splitext(fileName) try: with open(fileName, "rb") as f: header = f.read(256) except IOError: return None if header.startswith(b"\xef\xbb\xbf<?xml"): header = header.lstrip(b"\xef\xbb\xbf") cr, tp = getMacCreatorAndType(fileName) if tp in ("sfnt", "FFIL"): return "TTF" if ext == ".dfont": return "TTF" head = Tag(header[:4]) if head == "OTTO": return "OTF" elif head == "ttcf": return "TTC" elif head in ("\0\1\0\0", "true"): return "TTF" elif head == "wOFF": return "WOFF" elif head == "wOF2": return "WOFF2" elif head == "<?xm": # Use 'latin1' because that can't fail. header = tostr(header, "latin1") if opentypeheaderRE.search(header): return "OTX" else: return "TTX" return None def parseOptions(args): rawOptions, files = getopt.getopt( args, "ld:o:fvqht:x:sgim:z:baey:", [ "unicodedata=", "recalc-timestamp", "no-recalc-timestamp", "flavor=", "version", "with-zopfli", "newline=", ], ) options = Options(rawOptions, len(files)) jobs = [] if not files: raise getopt.GetoptError("Must specify at least one input file") for input in files: if input != "-" and not os.path.isfile(input): raise getopt.GetoptError('File not found: "%s"' % input) tp = guessFileType(input) if tp in ("OTF", "TTF", "TTC", "WOFF", "WOFF2"): extension = ".ttx" if options.listTables: action = ttList else: action = ttDump elif tp == "TTX": extension = "." + options.flavor if options.flavor else ".ttf" action = ttCompile elif tp == "OTX": extension = "." + options.flavor if options.flavor else ".otf" action = ttCompile else: raise getopt.GetoptError('Unknown file type: "%s"' % input) if options.outputFile: output = options.outputFile else: if input == "-": raise getopt.GetoptError("Must provide -o when reading from stdin") output = makeOutputFileName( input, options.outputDir, extension, options.overWrite ) # 'touch' output file to avoid race condition in choosing file names if action != ttList: open(output, "a").close() jobs.append((action, input, output)) return jobs, options def process(jobs, options): for action, input, output in jobs: action(input, output, options) def main(args=None): """Convert OpenType fonts to XML and back""" from fontTools import configLogger if args is None: args = sys.argv[1:] try: jobs, options = parseOptions(args) except getopt.GetoptError as e: print("%s\nERROR: %s" % (__doc__, e), file=sys.stderr) sys.exit(2) configLogger(level=options.logLevel) try: process(jobs, options) except KeyboardInterrupt: log.error("(Cancelled.)") sys.exit(1) except SystemExit: raise except TTLibError as e: log.error(e) sys.exit(1) except: log.exception("Unhandled exception has occurred") sys.exit(1) if __name__ == "__main__": sys.exit(main()) PKaZZZ~[��fontTools/unicode.pydef _makeunicodes(f): lines = iter(f.readlines()) unicodes = {} for line in lines: if not line: continue num, name = line.split(";")[:2] if name[0] == "<": continue # "<control>", etc. num = int(num, 16) unicodes[num] = name return unicodes class _UnicodeCustom(object): def __init__(self, f): if isinstance(f, str): with open(f) as fd: codes = _makeunicodes(fd) else: codes = _makeunicodes(f) self.codes = codes def __getitem__(self, charCode): try: return self.codes[charCode] except KeyError: return "????" class _UnicodeBuiltin(object): def __getitem__(self, charCode): try: # use unicodedata backport to python2, if available: # https://github.com/mikekap/unicodedata2 import unicodedata2 as unicodedata except ImportError: import unicodedata try: return unicodedata.name(chr(charCode)) except ValueError: return "????" Unicode = _UnicodeBuiltin() def setUnicodeData(f): global Unicode Unicode = _UnicodeCustom(f) PKaZZZ�l$�����fontTools/cffLib/__init__.py"""cffLib: read/write Adobe CFF fonts OpenType fonts with PostScript outlines contain a completely independent font file, Adobe's *Compact Font Format*. So dealing with OpenType fonts requires also dealing with CFF. This module allows you to read and write fonts written in the CFF format. In 2016, OpenType 1.8 introduced the `CFF2 <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2>`_ format which, along with other changes, extended the CFF format to deal with the demands of variable fonts. This module parses both original CFF and CFF2. """ from fontTools.misc import sstruct from fontTools.misc import psCharStrings from fontTools.misc.arrayTools import unionRect, intRect from fontTools.misc.textTools import ( bytechr, byteord, bytesjoin, tobytes, tostr, safeEval, ) from fontTools.ttLib import TTFont from fontTools.ttLib.tables.otBase import OTTableWriter from fontTools.ttLib.tables.otBase import OTTableReader from fontTools.ttLib.tables import otTables as ot from io import BytesIO import struct import logging import re # mute cffLib debug messages when running ttx in verbose mode DEBUG = logging.DEBUG - 1 log = logging.getLogger(__name__) cffHeaderFormat = """ major: B minor: B hdrSize: B """ maxStackLimit = 513 # maxstack operator has been deprecated. max stack is now always 513. class StopHintCountEvent(Exception): pass class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler): stop_hintcount_ops = ( "op_hintmask", "op_cntrmask", "op_rmoveto", "op_hmoveto", "op_vmoveto", ) def __init__(self, localSubrs, globalSubrs, private=None): psCharStrings.SimpleT2Decompiler.__init__( self, localSubrs, globalSubrs, private ) def execute(self, charString): self.need_hintcount = True # until proven otherwise for op_name in self.stop_hintcount_ops: setattr(self, op_name, self.stop_hint_count) if hasattr(charString, "_desubroutinized"): # If a charstring has already been desubroutinized, we will still # need to execute it if we need to count hints in order to # compute the byte length for mask arguments, and haven't finished # counting hints pairs. if self.need_hintcount and self.callingStack: try: psCharStrings.SimpleT2Decompiler.execute(self, charString) except StopHintCountEvent: del self.callingStack[-1] return charString._patches = [] psCharStrings.SimpleT2Decompiler.execute(self, charString) desubroutinized = charString.program[:] for idx, expansion in reversed(charString._patches): assert idx >= 2 assert desubroutinized[idx - 1] in [ "callsubr", "callgsubr", ], desubroutinized[idx - 1] assert type(desubroutinized[idx - 2]) == int if expansion[-1] == "return": expansion = expansion[:-1] desubroutinized[idx - 2 : idx] = expansion if not self.private.in_cff2: if "endchar" in desubroutinized: # Cut off after first endchar desubroutinized = desubroutinized[ : desubroutinized.index("endchar") + 1 ] else: if not len(desubroutinized) or desubroutinized[-1] != "return": desubroutinized.append("return") charString._desubroutinized = desubroutinized del charString._patches def op_callsubr(self, index): subr = self.localSubrs[self.operandStack[-1] + self.localBias] psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) self.processSubr(index, subr) def op_callgsubr(self, index): subr = self.globalSubrs[self.operandStack[-1] + self.globalBias] psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) self.processSubr(index, subr) def stop_hint_count(self, *args): self.need_hintcount = False for op_name in self.stop_hintcount_ops: setattr(self, op_name, None) cs = self.callingStack[-1] if hasattr(cs, "_desubroutinized"): raise StopHintCountEvent() def op_hintmask(self, index): psCharStrings.SimpleT2Decompiler.op_hintmask(self, index) if self.need_hintcount: self.stop_hint_count() def processSubr(self, index, subr): cs = self.callingStack[-1] if not hasattr(cs, "_desubroutinized"): cs._patches.append((index, subr._desubroutinized)) class CFFFontSet(object): """A CFF font "file" can contain more than one font, although this is extremely rare (and not allowed within OpenType fonts). This class is the entry point for parsing a CFF table. To actually manipulate the data inside the CFF font, you will want to access the ``CFFFontSet``'s :class:`TopDict` object. To do this, a ``CFFFontSet`` object can either be treated as a dictionary (with appropriate ``keys()`` and ``values()`` methods) mapping font names to :class:`TopDict` objects, or as a list. .. code:: python from fontTools import ttLib tt = ttLib.TTFont("Tests/cffLib/data/LinLibertine_RBI.otf") tt["CFF "].cff # <fontTools.cffLib.CFFFontSet object at 0x101e24c90> tt["CFF "].cff[0] # Here's your actual font data # <fontTools.cffLib.TopDict object at 0x1020f1fd0> """ def decompile(self, file, otFont, isCFF2=None): """Parse a binary CFF file into an internal representation. ``file`` should be a file handle object. ``otFont`` is the top-level :py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file. If ``isCFF2`` is passed and set to ``True`` or ``False``, then the library makes an assertion that the CFF header is of the appropriate version. """ self.otFont = otFont sstruct.unpack(cffHeaderFormat, file.read(3), self) if isCFF2 is not None: # called from ttLib: assert 'major' as read from file matches the # expected version expected_major = 2 if isCFF2 else 1 if self.major != expected_major: raise ValueError( "Invalid CFF 'major' version: expected %d, found %d" % (expected_major, self.major) ) else: # use 'major' version from file to determine if isCFF2 assert self.major in (1, 2), "Unknown CFF format" isCFF2 = self.major == 2 if not isCFF2: self.offSize = struct.unpack("B", file.read(1))[0] file.seek(self.hdrSize) self.fontNames = list(tostr(s) for s in Index(file, isCFF2=isCFF2)) self.topDictIndex = TopDictIndex(file, isCFF2=isCFF2) self.strings = IndexedStrings(file) else: # isCFF2 self.topDictSize = struct.unpack(">H", file.read(2))[0] file.seek(self.hdrSize) self.fontNames = ["CFF2Font"] cff2GetGlyphOrder = otFont.getGlyphOrder # in CFF2, offsetSize is the size of the TopDict data. self.topDictIndex = TopDictIndex( file, cff2GetGlyphOrder, self.topDictSize, isCFF2=isCFF2 ) self.strings = None self.GlobalSubrs = GlobalSubrsIndex(file, isCFF2=isCFF2) self.topDictIndex.strings = self.strings self.topDictIndex.GlobalSubrs = self.GlobalSubrs def __len__(self): return len(self.fontNames) def keys(self): return list(self.fontNames) def values(self): return self.topDictIndex def __getitem__(self, nameOrIndex): """Return TopDict instance identified by name (str) or index (int or any object that implements `__index__`). """ if hasattr(nameOrIndex, "__index__"): index = nameOrIndex.__index__() elif isinstance(nameOrIndex, str): name = nameOrIndex try: index = self.fontNames.index(name) except ValueError: raise KeyError(nameOrIndex) else: raise TypeError(nameOrIndex) return self.topDictIndex[index] def compile(self, file, otFont, isCFF2=None): """Write the object back into binary representation onto the given file. ``file`` should be a file handle object. ``otFont`` is the top-level :py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file. If ``isCFF2`` is passed and set to ``True`` or ``False``, then the library makes an assertion that the CFF header is of the appropriate version. """ self.otFont = otFont if isCFF2 is not None: # called from ttLib: assert 'major' value matches expected version expected_major = 2 if isCFF2 else 1 if self.major != expected_major: raise ValueError( "Invalid CFF 'major' version: expected %d, found %d" % (expected_major, self.major) ) else: # use current 'major' value to determine output format assert self.major in (1, 2), "Unknown CFF format" isCFF2 = self.major == 2 if otFont.recalcBBoxes and not isCFF2: for topDict in self.topDictIndex: topDict.recalcFontBBox() if not isCFF2: strings = IndexedStrings() else: strings = None writer = CFFWriter(isCFF2) topCompiler = self.topDictIndex.getCompiler(strings, self, isCFF2=isCFF2) if isCFF2: self.hdrSize = 5 writer.add(sstruct.pack(cffHeaderFormat, self)) # Note: topDictSize will most likely change in CFFWriter.toFile(). self.topDictSize = topCompiler.getDataLength() writer.add(struct.pack(">H", self.topDictSize)) else: self.hdrSize = 4 self.offSize = 4 # will most likely change in CFFWriter.toFile(). writer.add(sstruct.pack(cffHeaderFormat, self)) writer.add(struct.pack("B", self.offSize)) if not isCFF2: fontNames = Index() for name in self.fontNames: fontNames.append(name) writer.add(fontNames.getCompiler(strings, self, isCFF2=isCFF2)) writer.add(topCompiler) if not isCFF2: writer.add(strings.getCompiler()) writer.add(self.GlobalSubrs.getCompiler(strings, self, isCFF2=isCFF2)) for topDict in self.topDictIndex: if not hasattr(topDict, "charset") or topDict.charset is None: charset = otFont.getGlyphOrder() topDict.charset = charset children = topCompiler.getChildren(strings) for child in children: writer.add(child) writer.toFile(file) def toXML(self, xmlWriter): """Write the object into XML representation onto the given :class:`fontTools.misc.xmlWriter.XMLWriter`. .. code:: python writer = xmlWriter.XMLWriter(sys.stdout) tt["CFF "].cff.toXML(writer) """ xmlWriter.simpletag("major", value=self.major) xmlWriter.newline() xmlWriter.simpletag("minor", value=self.minor) xmlWriter.newline() for fontName in self.fontNames: xmlWriter.begintag("CFFFont", name=tostr(fontName)) xmlWriter.newline() font = self[fontName] font.toXML(xmlWriter) xmlWriter.endtag("CFFFont") xmlWriter.newline() xmlWriter.newline() xmlWriter.begintag("GlobalSubrs") xmlWriter.newline() self.GlobalSubrs.toXML(xmlWriter) xmlWriter.endtag("GlobalSubrs") xmlWriter.newline() def fromXML(self, name, attrs, content, otFont=None): """Reads data from the XML element into the ``CFFFontSet`` object.""" self.otFont = otFont # set defaults. These will be replaced if there are entries for them # in the XML file. if not hasattr(self, "major"): self.major = 1 if not hasattr(self, "minor"): self.minor = 0 if name == "CFFFont": if self.major == 1: if not hasattr(self, "offSize"): # this will be recalculated when the cff is compiled. self.offSize = 4 if not hasattr(self, "hdrSize"): self.hdrSize = 4 if not hasattr(self, "GlobalSubrs"): self.GlobalSubrs = GlobalSubrsIndex() if not hasattr(self, "fontNames"): self.fontNames = [] self.topDictIndex = TopDictIndex() fontName = attrs["name"] self.fontNames.append(fontName) topDict = TopDict(GlobalSubrs=self.GlobalSubrs) topDict.charset = None # gets filled in later elif self.major == 2: if not hasattr(self, "hdrSize"): self.hdrSize = 5 if not hasattr(self, "GlobalSubrs"): self.GlobalSubrs = GlobalSubrsIndex() if not hasattr(self, "fontNames"): self.fontNames = ["CFF2Font"] cff2GetGlyphOrder = self.otFont.getGlyphOrder topDict = TopDict( GlobalSubrs=self.GlobalSubrs, cff2GetGlyphOrder=cff2GetGlyphOrder ) self.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder) self.topDictIndex.append(topDict) for element in content: if isinstance(element, str): continue name, attrs, content = element topDict.fromXML(name, attrs, content) if hasattr(topDict, "VarStore") and topDict.FDArray[0].vstore is None: fdArray = topDict.FDArray for fontDict in fdArray: if hasattr(fontDict, "Private"): fontDict.Private.vstore = topDict.VarStore elif name == "GlobalSubrs": subrCharStringClass = psCharStrings.T2CharString if not hasattr(self, "GlobalSubrs"): self.GlobalSubrs = GlobalSubrsIndex() for element in content: if isinstance(element, str): continue name, attrs, content = element subr = subrCharStringClass() subr.fromXML(name, attrs, content) self.GlobalSubrs.append(subr) elif name == "major": self.major = int(attrs["value"]) elif name == "minor": self.minor = int(attrs["value"]) def convertCFFToCFF2(self, otFont): """Converts this object from CFF format to CFF2 format. This conversion is done 'in-place'. The conversion cannot be reversed. This assumes a decompiled CFF table. (i.e. that the object has been filled via :meth:`decompile`.)""" self.major = 2 cff2GetGlyphOrder = self.otFont.getGlyphOrder topDictData = TopDictIndex(None, cff2GetGlyphOrder) topDictData.items = self.topDictIndex.items self.topDictIndex = topDictData topDict = topDictData[0] if hasattr(topDict, "Private"): privateDict = topDict.Private else: privateDict = None opOrder = buildOrder(topDictOperators2) topDict.order = opOrder topDict.cff2GetGlyphOrder = cff2GetGlyphOrder for entry in topDictOperators: key = entry[1] if key not in opOrder: if key in topDict.rawDict: del topDict.rawDict[key] if hasattr(topDict, key): delattr(topDict, key) if not hasattr(topDict, "FDArray"): fdArray = topDict.FDArray = FDArrayIndex() fdArray.strings = None fdArray.GlobalSubrs = topDict.GlobalSubrs topDict.GlobalSubrs.fdArray = fdArray charStrings = topDict.CharStrings if charStrings.charStringsAreIndexed: charStrings.charStringsIndex.fdArray = fdArray else: charStrings.fdArray = fdArray fontDict = FontDict() fontDict.setCFF2(True) fdArray.append(fontDict) fontDict.Private = privateDict privateOpOrder = buildOrder(privateDictOperators2) for entry in privateDictOperators: key = entry[1] if key not in privateOpOrder: if key in privateDict.rawDict: # print "Removing private dict", key del privateDict.rawDict[key] if hasattr(privateDict, key): delattr(privateDict, key) # print "Removing privateDict attr", key else: # clean up the PrivateDicts in the fdArray fdArray = topDict.FDArray privateOpOrder = buildOrder(privateDictOperators2) for fontDict in fdArray: fontDict.setCFF2(True) for key in fontDict.rawDict.keys(): if key not in fontDict.order: del fontDict.rawDict[key] if hasattr(fontDict, key): delattr(fontDict, key) privateDict = fontDict.Private for entry in privateDictOperators: key = entry[1] if key not in privateOpOrder: if key in privateDict.rawDict: # print "Removing private dict", key del privateDict.rawDict[key] if hasattr(privateDict, key): delattr(privateDict, key) # print "Removing privateDict attr", key # At this point, the Subrs and Charstrings are all still T2Charstring class # easiest to fix this by compiling, then decompiling again file = BytesIO() self.compile(file, otFont, isCFF2=True) file.seek(0) self.decompile(file, otFont, isCFF2=True) def desubroutinize(self): for fontName in self.fontNames: font = self[fontName] cs = font.CharStrings for g in font.charset: c, _ = cs.getItemAndSelector(g) c.decompile() subrs = getattr(c.private, "Subrs", []) decompiler = _DesubroutinizingT2Decompiler( subrs, c.globalSubrs, c.private ) decompiler.execute(c) c.program = c._desubroutinized del c._desubroutinized # Delete all the local subrs if hasattr(font, "FDArray"): for fd in font.FDArray: pd = fd.Private if hasattr(pd, "Subrs"): del pd.Subrs if "Subrs" in pd.rawDict: del pd.rawDict["Subrs"] else: pd = font.Private if hasattr(pd, "Subrs"): del pd.Subrs if "Subrs" in pd.rawDict: del pd.rawDict["Subrs"] # as well as the global subrs self.GlobalSubrs.clear() class CFFWriter(object): """Helper class for serializing CFF data to binary. Used by :meth:`CFFFontSet.compile`.""" def __init__(self, isCFF2): self.data = [] self.isCFF2 = isCFF2 def add(self, table): self.data.append(table) def toFile(self, file): lastPosList = None count = 1 while True: log.log(DEBUG, "CFFWriter.toFile() iteration: %d", count) count = count + 1 pos = 0 posList = [pos] for item in self.data: if hasattr(item, "getDataLength"): endPos = pos + item.getDataLength() if isinstance(item, TopDictIndexCompiler) and item.isCFF2: self.topDictSize = item.getDataLength() else: endPos = pos + len(item) if hasattr(item, "setPos"): item.setPos(pos, endPos) pos = endPos posList.append(pos) if posList == lastPosList: break lastPosList = posList log.log(DEBUG, "CFFWriter.toFile() writing to file.") begin = file.tell() if self.isCFF2: self.data[1] = struct.pack(">H", self.topDictSize) else: self.offSize = calcOffSize(lastPosList[-1]) self.data[1] = struct.pack("B", self.offSize) posList = [0] for item in self.data: if hasattr(item, "toFile"): item.toFile(file) else: file.write(item) posList.append(file.tell() - begin) assert posList == lastPosList def calcOffSize(largestOffset): if largestOffset < 0x100: offSize = 1 elif largestOffset < 0x10000: offSize = 2 elif largestOffset < 0x1000000: offSize = 3 else: offSize = 4 return offSize class IndexCompiler(object): """Base class for writing CFF `INDEX data <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#5-index-data>`_ to binary.""" def __init__(self, items, strings, parent, isCFF2=None): if isCFF2 is None and hasattr(parent, "isCFF2"): isCFF2 = parent.isCFF2 assert isCFF2 is not None self.isCFF2 = isCFF2 self.items = self.getItems(items, strings) self.parent = parent def getItems(self, items, strings): return items def getOffsets(self): # An empty INDEX contains only the count field. if self.items: pos = 1 offsets = [pos] for item in self.items: if hasattr(item, "getDataLength"): pos = pos + item.getDataLength() else: pos = pos + len(item) offsets.append(pos) else: offsets = [] return offsets def getDataLength(self): if self.isCFF2: countSize = 4 else: countSize = 2 if self.items: lastOffset = self.getOffsets()[-1] offSize = calcOffSize(lastOffset) dataLength = ( countSize + 1 # count + (len(self.items) + 1) * offSize # offSize + lastOffset # the offsets - 1 # size of object data ) else: # count. For empty INDEX tables, this is the only entry. dataLength = countSize return dataLength def toFile(self, file): offsets = self.getOffsets() if self.isCFF2: writeCard32(file, len(self.items)) else: writeCard16(file, len(self.items)) # An empty INDEX contains only the count field. if self.items: offSize = calcOffSize(offsets[-1]) writeCard8(file, offSize) offSize = -offSize pack = struct.pack for offset in offsets: binOffset = pack(">l", offset)[offSize:] assert len(binOffset) == -offSize file.write(binOffset) for item in self.items: if hasattr(item, "toFile"): item.toFile(file) else: data = tobytes(item, encoding="latin1") file.write(data) class IndexedStringsCompiler(IndexCompiler): def getItems(self, items, strings): return items.strings class TopDictIndexCompiler(IndexCompiler): """Helper class for writing the TopDict to binary.""" def getItems(self, items, strings): out = [] for item in items: out.append(item.getCompiler(strings, self)) return out def getChildren(self, strings): children = [] for topDict in self.items: children.extend(topDict.getChildren(strings)) return children def getOffsets(self): if self.isCFF2: offsets = [0, self.items[0].getDataLength()] return offsets else: return super(TopDictIndexCompiler, self).getOffsets() def getDataLength(self): if self.isCFF2: dataLength = self.items[0].getDataLength() return dataLength else: return super(TopDictIndexCompiler, self).getDataLength() def toFile(self, file): if self.isCFF2: self.items[0].toFile(file) else: super(TopDictIndexCompiler, self).toFile(file) class FDArrayIndexCompiler(IndexCompiler): """Helper class for writing the `Font DICT INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#10-font-dict-index-font-dicts-and-fdselect>`_ to binary.""" def getItems(self, items, strings): out = [] for item in items: out.append(item.getCompiler(strings, self)) return out def getChildren(self, strings): children = [] for fontDict in self.items: children.extend(fontDict.getChildren(strings)) return children def toFile(self, file): offsets = self.getOffsets() if self.isCFF2: writeCard32(file, len(self.items)) else: writeCard16(file, len(self.items)) offSize = calcOffSize(offsets[-1]) writeCard8(file, offSize) offSize = -offSize pack = struct.pack for offset in offsets: binOffset = pack(">l", offset)[offSize:] assert len(binOffset) == -offSize file.write(binOffset) for item in self.items: if hasattr(item, "toFile"): item.toFile(file) else: file.write(item) def setPos(self, pos, endPos): self.parent.rawDict["FDArray"] = pos class GlobalSubrsCompiler(IndexCompiler): """Helper class for writing the `global subroutine INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_ to binary.""" def getItems(self, items, strings): out = [] for cs in items: cs.compile(self.isCFF2) out.append(cs.bytecode) return out class SubrsCompiler(GlobalSubrsCompiler): """Helper class for writing the `local subroutine INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_ to binary.""" def setPos(self, pos, endPos): offset = pos - self.parent.pos self.parent.rawDict["Subrs"] = offset class CharStringsCompiler(GlobalSubrsCompiler): """Helper class for writing the `CharStrings INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_ to binary.""" def getItems(self, items, strings): out = [] for cs in items: cs.compile(self.isCFF2) out.append(cs.bytecode) return out def setPos(self, pos, endPos): self.parent.rawDict["CharStrings"] = pos class Index(object): """This class represents what the CFF spec calls an INDEX (an array of variable-sized objects). `Index` items can be addressed and set using Python list indexing.""" compilerClass = IndexCompiler def __init__(self, file=None, isCFF2=None): assert (isCFF2 is None) == (file is None) self.items = [] name = self.__class__.__name__ if file is None: return self._isCFF2 = isCFF2 log.log(DEBUG, "loading %s at %s", name, file.tell()) self.file = file if isCFF2: count = readCard32(file) else: count = readCard16(file) if count == 0: return self.items = [None] * count offSize = readCard8(file) log.log(DEBUG, " index count: %s offSize: %s", count, offSize) assert offSize <= 4, "offSize too large: %s" % offSize self.offsets = offsets = [] pad = b"\0" * (4 - offSize) for index in range(count + 1): chunk = file.read(offSize) chunk = pad + chunk (offset,) = struct.unpack(">L", chunk) offsets.append(int(offset)) self.offsetBase = file.tell() - 1 file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot log.log(DEBUG, " end of %s at %s", name, file.tell()) def __len__(self): return len(self.items) def __getitem__(self, index): item = self.items[index] if item is not None: return item offset = self.offsets[index] + self.offsetBase size = self.offsets[index + 1] - self.offsets[index] file = self.file file.seek(offset) data = file.read(size) assert len(data) == size item = self.produceItem(index, data, file, offset) self.items[index] = item return item def __setitem__(self, index, item): self.items[index] = item def produceItem(self, index, data, file, offset): return data def append(self, item): """Add an item to an INDEX.""" self.items.append(item) def getCompiler(self, strings, parent, isCFF2=None): return self.compilerClass(self, strings, parent, isCFF2=isCFF2) def clear(self): """Empty the INDEX.""" del self.items[:] class GlobalSubrsIndex(Index): """This index contains all the global subroutines in the font. A global subroutine is a set of ``CharString`` data which is accessible to any glyph in the font, and are used to store repeated instructions - for example, components may be encoded as global subroutines, but so could hinting instructions. Remember that when interpreting a ``callgsubr`` instruction (or indeed a ``callsubr`` instruction) that you will need to add the "subroutine number bias" to number given: .. code:: python tt = ttLib.TTFont("Almendra-Bold.otf") u = tt["CFF "].cff[0].CharStrings["udieresis"] u.decompile() u.toXML(XMLWriter(sys.stdout)) # <some stuff> # -64 callgsubr <-- Subroutine which implements the dieresis mark # <other stuff> tt["CFF "].cff[0].GlobalSubrs[-64] # <-- WRONG # <T2CharString (bytecode) at 103451d10> tt["CFF "].cff[0].GlobalSubrs[-64 + 107] # <-- RIGHT # <T2CharString (source) at 103451390> ("The bias applied depends on the number of subrs (gsubrs). If the number of subrs (gsubrs) is less than 1240, the bias is 107. Otherwise if it is less than 33900, it is 1131; otherwise it is 32768.", `Subroutine Operators <https://docs.microsoft.com/en-us/typography/opentype/otspec180/cff2charstr#section4.4>`) """ compilerClass = GlobalSubrsCompiler subrClass = psCharStrings.T2CharString charStringClass = psCharStrings.T2CharString def __init__( self, file=None, globalSubrs=None, private=None, fdSelect=None, fdArray=None, isCFF2=None, ): super(GlobalSubrsIndex, self).__init__(file, isCFF2=isCFF2) self.globalSubrs = globalSubrs self.private = private if fdSelect: self.fdSelect = fdSelect if fdArray: self.fdArray = fdArray def produceItem(self, index, data, file, offset): if self.private is not None: private = self.private elif hasattr(self, "fdArray") and self.fdArray is not None: if hasattr(self, "fdSelect") and self.fdSelect is not None: fdIndex = self.fdSelect[index] else: fdIndex = 0 private = self.fdArray[fdIndex].Private else: private = None return self.subrClass(data, private=private, globalSubrs=self.globalSubrs) def toXML(self, xmlWriter): """Write the subroutines index into XML representation onto the given :class:`fontTools.misc.xmlWriter.XMLWriter`. .. code:: python writer = xmlWriter.XMLWriter(sys.stdout) tt["CFF "].cff[0].GlobalSubrs.toXML(writer) """ xmlWriter.comment( "The 'index' attribute is only for humans; " "it is ignored when parsed." ) xmlWriter.newline() for i in range(len(self)): subr = self[i] if subr.needsDecompilation(): xmlWriter.begintag("CharString", index=i, raw=1) else: xmlWriter.begintag("CharString", index=i) xmlWriter.newline() subr.toXML(xmlWriter) xmlWriter.endtag("CharString") xmlWriter.newline() def fromXML(self, name, attrs, content): if name != "CharString": return subr = self.subrClass() subr.fromXML(name, attrs, content) self.append(subr) def getItemAndSelector(self, index): sel = None if hasattr(self, "fdSelect"): sel = self.fdSelect[index] return self[index], sel class SubrsIndex(GlobalSubrsIndex): """This index contains a glyph's local subroutines. A local subroutine is a private set of ``CharString`` data which is accessible only to the glyph to which the index is attached.""" compilerClass = SubrsCompiler class TopDictIndex(Index): """This index represents the array of ``TopDict`` structures in the font (again, usually only one entry is present). Hence the following calls are equivalent: .. code:: python tt["CFF "].cff[0] # <fontTools.cffLib.TopDict object at 0x102ed6e50> tt["CFF "].cff.topDictIndex[0] # <fontTools.cffLib.TopDict object at 0x102ed6e50> """ compilerClass = TopDictIndexCompiler def __init__(self, file=None, cff2GetGlyphOrder=None, topSize=0, isCFF2=None): assert (isCFF2 is None) == (file is None) self.cff2GetGlyphOrder = cff2GetGlyphOrder if file is not None and isCFF2: self._isCFF2 = isCFF2 self.items = [] name = self.__class__.__name__ log.log(DEBUG, "loading %s at %s", name, file.tell()) self.file = file count = 1 self.items = [None] * count self.offsets = [0, topSize] self.offsetBase = file.tell() # pretend we've read the whole lot file.seek(self.offsetBase + topSize) log.log(DEBUG, " end of %s at %s", name, file.tell()) else: super(TopDictIndex, self).__init__(file, isCFF2=isCFF2) def produceItem(self, index, data, file, offset): top = TopDict( self.strings, file, offset, self.GlobalSubrs, self.cff2GetGlyphOrder, isCFF2=self._isCFF2, ) top.decompile(data) return top def toXML(self, xmlWriter): for i in range(len(self)): xmlWriter.begintag("FontDict", index=i) xmlWriter.newline() self[i].toXML(xmlWriter) xmlWriter.endtag("FontDict") xmlWriter.newline() class FDArrayIndex(Index): compilerClass = FDArrayIndexCompiler def toXML(self, xmlWriter): for i in range(len(self)): xmlWriter.begintag("FontDict", index=i) xmlWriter.newline() self[i].toXML(xmlWriter) xmlWriter.endtag("FontDict") xmlWriter.newline() def produceItem(self, index, data, file, offset): fontDict = FontDict( self.strings, file, offset, self.GlobalSubrs, isCFF2=self._isCFF2, vstore=self.vstore, ) fontDict.decompile(data) return fontDict def fromXML(self, name, attrs, content): if name != "FontDict": return fontDict = FontDict() for element in content: if isinstance(element, str): continue name, attrs, content = element fontDict.fromXML(name, attrs, content) self.append(fontDict) class VarStoreData(object): def __init__(self, file=None, otVarStore=None): self.file = file self.data = None self.otVarStore = otVarStore self.font = TTFont() # dummy font for the decompile function. def decompile(self): if self.file: # read data in from file. Assume position is correct. length = readCard16(self.file) self.data = self.file.read(length) globalState = {} reader = OTTableReader(self.data, globalState) self.otVarStore = ot.VarStore() self.otVarStore.decompile(reader, self.font) return self def compile(self): writer = OTTableWriter() self.otVarStore.compile(writer, self.font) # Note that this omits the initial Card16 length from the CFF2 # VarStore data block self.data = writer.getAllData() def writeXML(self, xmlWriter, name): self.otVarStore.toXML(xmlWriter, self.font) def xmlRead(self, name, attrs, content, parent): self.otVarStore = ot.VarStore() for element in content: if isinstance(element, tuple): name, attrs, content = element self.otVarStore.fromXML(name, attrs, content, self.font) else: pass return None def __len__(self): return len(self.data) def getNumRegions(self, vsIndex): if vsIndex is None: vsIndex = 0 varData = self.otVarStore.VarData[vsIndex] numRegions = varData.VarRegionCount return numRegions class FDSelect(object): def __init__(self, file=None, numGlyphs=None, format=None): if file: # read data in from file self.format = readCard8(file) if self.format == 0: from array import array self.gidArray = array("B", file.read(numGlyphs)).tolist() elif self.format == 3: gidArray = [None] * numGlyphs nRanges = readCard16(file) fd = None prev = None for i in range(nRanges): first = readCard16(file) if prev is not None: for glyphID in range(prev, first): gidArray[glyphID] = fd prev = first fd = readCard8(file) if prev is not None: first = readCard16(file) for glyphID in range(prev, first): gidArray[glyphID] = fd self.gidArray = gidArray elif self.format == 4: gidArray = [None] * numGlyphs nRanges = readCard32(file) fd = None prev = None for i in range(nRanges): first = readCard32(file) if prev is not None: for glyphID in range(prev, first): gidArray[glyphID] = fd prev = first fd = readCard16(file) if prev is not None: first = readCard32(file) for glyphID in range(prev, first): gidArray[glyphID] = fd self.gidArray = gidArray else: assert False, "unsupported FDSelect format: %s" % format else: # reading from XML. Make empty gidArray, and leave format as passed in. # format is None will result in the smallest representation being used. self.format = format self.gidArray = [] def __len__(self): return len(self.gidArray) def __getitem__(self, index): return self.gidArray[index] def __setitem__(self, index, fdSelectValue): self.gidArray[index] = fdSelectValue def append(self, fdSelectValue): self.gidArray.append(fdSelectValue) class CharStrings(object): """The ``CharStrings`` in the font represent the instructions for drawing each glyph. This object presents a dictionary interface to the font's CharStrings, indexed by glyph name: .. code:: python tt["CFF "].cff[0].CharStrings["a"] # <T2CharString (bytecode) at 103451e90> See :class:`fontTools.misc.psCharStrings.T1CharString` and :class:`fontTools.misc.psCharStrings.T2CharString` for how to decompile, compile and interpret the glyph drawing instructions in the returned objects. """ def __init__( self, file, charset, globalSubrs, private, fdSelect, fdArray, isCFF2=None, varStore=None, ): self.globalSubrs = globalSubrs self.varStore = varStore if file is not None: self.charStringsIndex = SubrsIndex( file, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2 ) self.charStrings = charStrings = {} for i in range(len(charset)): charStrings[charset[i]] = i # read from OTF file: charStrings.values() are indices into # charStringsIndex. self.charStringsAreIndexed = 1 else: self.charStrings = {} # read from ttx file: charStrings.values() are actual charstrings self.charStringsAreIndexed = 0 self.private = private if fdSelect is not None: self.fdSelect = fdSelect if fdArray is not None: self.fdArray = fdArray def keys(self): return list(self.charStrings.keys()) def values(self): if self.charStringsAreIndexed: return self.charStringsIndex else: return list(self.charStrings.values()) def has_key(self, name): return name in self.charStrings __contains__ = has_key def __len__(self): return len(self.charStrings) def __getitem__(self, name): charString = self.charStrings[name] if self.charStringsAreIndexed: charString = self.charStringsIndex[charString] return charString def __setitem__(self, name, charString): if self.charStringsAreIndexed: index = self.charStrings[name] self.charStringsIndex[index] = charString else: self.charStrings[name] = charString def getItemAndSelector(self, name): if self.charStringsAreIndexed: index = self.charStrings[name] return self.charStringsIndex.getItemAndSelector(index) else: if hasattr(self, "fdArray"): if hasattr(self, "fdSelect"): sel = self.charStrings[name].fdSelectIndex else: sel = 0 else: sel = None return self.charStrings[name], sel def toXML(self, xmlWriter): names = sorted(self.keys()) for name in names: charStr, fdSelectIndex = self.getItemAndSelector(name) if charStr.needsDecompilation(): raw = [("raw", 1)] else: raw = [] if fdSelectIndex is None: xmlWriter.begintag("CharString", [("name", name)] + raw) else: xmlWriter.begintag( "CharString", [("name", name), ("fdSelectIndex", fdSelectIndex)] + raw, ) xmlWriter.newline() charStr.toXML(xmlWriter) xmlWriter.endtag("CharString") xmlWriter.newline() def fromXML(self, name, attrs, content): for element in content: if isinstance(element, str): continue name, attrs, content = element if name != "CharString": continue fdID = -1 if hasattr(self, "fdArray"): try: fdID = safeEval(attrs["fdSelectIndex"]) except KeyError: fdID = 0 private = self.fdArray[fdID].Private else: private = self.private glyphName = attrs["name"] charStringClass = psCharStrings.T2CharString charString = charStringClass(private=private, globalSubrs=self.globalSubrs) charString.fromXML(name, attrs, content) if fdID >= 0: charString.fdSelectIndex = fdID self[glyphName] = charString def readCard8(file): return byteord(file.read(1)) def readCard16(file): (value,) = struct.unpack(">H", file.read(2)) return value def readCard32(file): (value,) = struct.unpack(">L", file.read(4)) return value def writeCard8(file, value): file.write(bytechr(value)) def writeCard16(file, value): file.write(struct.pack(">H", value)) def writeCard32(file, value): file.write(struct.pack(">L", value)) def packCard8(value): return bytechr(value) def packCard16(value): return struct.pack(">H", value) def packCard32(value): return struct.pack(">L", value) def buildOperatorDict(table): d = {} for op, name, arg, default, conv in table: d[op] = (name, arg) return d def buildOpcodeDict(table): d = {} for op, name, arg, default, conv in table: if isinstance(op, tuple): op = bytechr(op[0]) + bytechr(op[1]) else: op = bytechr(op) d[name] = (op, arg) return d def buildOrder(table): l = [] for op, name, arg, default, conv in table: l.append(name) return l def buildDefaults(table): d = {} for op, name, arg, default, conv in table: if default is not None: d[name] = default return d def buildConverters(table): d = {} for op, name, arg, default, conv in table: d[name] = conv return d class SimpleConverter(object): def read(self, parent, value): if not hasattr(parent, "file"): return self._read(parent, value) file = parent.file pos = file.tell() try: return self._read(parent, value) finally: file.seek(pos) def _read(self, parent, value): return value def write(self, parent, value): return value def xmlWrite(self, xmlWriter, name, value): xmlWriter.simpletag(name, value=value) xmlWriter.newline() def xmlRead(self, name, attrs, content, parent): return attrs["value"] class ASCIIConverter(SimpleConverter): def _read(self, parent, value): return tostr(value, encoding="ascii") def write(self, parent, value): return tobytes(value, encoding="ascii") def xmlWrite(self, xmlWriter, name, value): xmlWriter.simpletag(name, value=tostr(value, encoding="ascii")) xmlWriter.newline() def xmlRead(self, name, attrs, content, parent): return tobytes(attrs["value"], encoding=("ascii")) class Latin1Converter(SimpleConverter): def _read(self, parent, value): return tostr(value, encoding="latin1") def write(self, parent, value): return tobytes(value, encoding="latin1") def xmlWrite(self, xmlWriter, name, value): value = tostr(value, encoding="latin1") if name in ["Notice", "Copyright"]: value = re.sub(r"[\r\n]\s+", " ", value) xmlWriter.simpletag(name, value=value) xmlWriter.newline() def xmlRead(self, name, attrs, content, parent): return tobytes(attrs["value"], encoding=("latin1")) def parseNum(s): try: value = int(s) except: value = float(s) return value def parseBlendList(s): valueList = [] for element in s: if isinstance(element, str): continue name, attrs, content = element blendList = attrs["value"].split() blendList = [eval(val) for val in blendList] valueList.append(blendList) if len(valueList) == 1: valueList = valueList[0] return valueList class NumberConverter(SimpleConverter): def xmlWrite(self, xmlWriter, name, value): if isinstance(value, list): xmlWriter.begintag(name) xmlWriter.newline() xmlWriter.indent() blendValue = " ".join([str(val) for val in value]) xmlWriter.simpletag(kBlendDictOpName, value=blendValue) xmlWriter.newline() xmlWriter.dedent() xmlWriter.endtag(name) xmlWriter.newline() else: xmlWriter.simpletag(name, value=value) xmlWriter.newline() def xmlRead(self, name, attrs, content, parent): valueString = attrs.get("value", None) if valueString is None: value = parseBlendList(content) else: value = parseNum(attrs["value"]) return value class ArrayConverter(SimpleConverter): def xmlWrite(self, xmlWriter, name, value): if value and isinstance(value[0], list): xmlWriter.begintag(name) xmlWriter.newline() xmlWriter.indent() for valueList in value: blendValue = " ".join([str(val) for val in valueList]) xmlWriter.simpletag(kBlendDictOpName, value=blendValue) xmlWriter.newline() xmlWriter.dedent() xmlWriter.endtag(name) xmlWriter.newline() else: value = " ".join([str(val) for val in value]) xmlWriter.simpletag(name, value=value) xmlWriter.newline() def xmlRead(self, name, attrs, content, parent): valueString = attrs.get("value", None) if valueString is None: valueList = parseBlendList(content) else: values = valueString.split() valueList = [parseNum(value) for value in values] return valueList class TableConverter(SimpleConverter): def xmlWrite(self, xmlWriter, name, value): xmlWriter.begintag(name) xmlWriter.newline() value.toXML(xmlWriter) xmlWriter.endtag(name) xmlWriter.newline() def xmlRead(self, name, attrs, content, parent): ob = self.getClass()() for element in content: if isinstance(element, str): continue name, attrs, content = element ob.fromXML(name, attrs, content) return ob class PrivateDictConverter(TableConverter): def getClass(self): return PrivateDict def _read(self, parent, value): size, offset = value file = parent.file isCFF2 = parent._isCFF2 try: vstore = parent.vstore except AttributeError: vstore = None priv = PrivateDict(parent.strings, file, offset, isCFF2=isCFF2, vstore=vstore) file.seek(offset) data = file.read(size) assert len(data) == size priv.decompile(data) return priv def write(self, parent, value): return (0, 0) # dummy value class SubrsConverter(TableConverter): def getClass(self): return SubrsIndex def _read(self, parent, value): file = parent.file isCFF2 = parent._isCFF2 file.seek(parent.offset + value) # Offset(self) return SubrsIndex(file, isCFF2=isCFF2) def write(self, parent, value): return 0 # dummy value class CharStringsConverter(TableConverter): def _read(self, parent, value): file = parent.file isCFF2 = parent._isCFF2 charset = parent.charset varStore = getattr(parent, "VarStore", None) globalSubrs = parent.GlobalSubrs if hasattr(parent, "FDArray"): fdArray = parent.FDArray if hasattr(parent, "FDSelect"): fdSelect = parent.FDSelect else: fdSelect = None private = None else: fdSelect, fdArray = None, None private = parent.Private file.seek(value) # Offset(0) charStrings = CharStrings( file, charset, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2, varStore=varStore, ) return charStrings def write(self, parent, value): return 0 # dummy value def xmlRead(self, name, attrs, content, parent): if hasattr(parent, "FDArray"): # if it is a CID-keyed font, then the private Dict is extracted from the # parent.FDArray fdArray = parent.FDArray if hasattr(parent, "FDSelect"): fdSelect = parent.FDSelect else: fdSelect = None private = None else: # if it is a name-keyed font, then the private dict is in the top dict, # and # there is no fdArray. private, fdSelect, fdArray = parent.Private, None, None charStrings = CharStrings( None, None, parent.GlobalSubrs, private, fdSelect, fdArray, varStore=getattr(parent, "VarStore", None), ) charStrings.fromXML(name, attrs, content) return charStrings class CharsetConverter(SimpleConverter): def _read(self, parent, value): isCID = hasattr(parent, "ROS") if value > 2: numGlyphs = parent.numGlyphs file = parent.file file.seek(value) log.log(DEBUG, "loading charset at %s", value) format = readCard8(file) if format == 0: charset = parseCharset0(numGlyphs, file, parent.strings, isCID) elif format == 1 or format == 2: charset = parseCharset(numGlyphs, file, parent.strings, isCID, format) else: raise NotImplementedError assert len(charset) == numGlyphs log.log(DEBUG, " charset end at %s", file.tell()) # make sure glyph names are unique allNames = {} newCharset = [] for glyphName in charset: if glyphName in allNames: # make up a new glyphName that's unique n = allNames[glyphName] while (glyphName + "#" + str(n)) in allNames: n += 1 allNames[glyphName] = n + 1 glyphName = glyphName + "#" + str(n) allNames[glyphName] = 1 newCharset.append(glyphName) charset = newCharset else: # offset == 0 -> no charset data. if isCID or "CharStrings" not in parent.rawDict: # We get here only when processing fontDicts from the FDArray of # CFF-CID fonts. Only the real topDict references the chrset. assert value == 0 charset = None elif value == 0: charset = cffISOAdobeStrings elif value == 1: charset = cffIExpertStrings elif value == 2: charset = cffExpertSubsetStrings if charset and (len(charset) != parent.numGlyphs): charset = charset[: parent.numGlyphs] return charset def write(self, parent, value): return 0 # dummy value def xmlWrite(self, xmlWriter, name, value): # XXX only write charset when not in OT/TTX context, where we # dump charset as a separate "GlyphOrder" table. # # xmlWriter.simpletag("charset") xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element") xmlWriter.newline() def xmlRead(self, name, attrs, content, parent): pass class CharsetCompiler(object): def __init__(self, strings, charset, parent): assert charset[0] == ".notdef" isCID = hasattr(parent.dictObj, "ROS") data0 = packCharset0(charset, isCID, strings) data = packCharset(charset, isCID, strings) if len(data) < len(data0): self.data = data else: self.data = data0 self.parent = parent def setPos(self, pos, endPos): self.parent.rawDict["charset"] = pos def getDataLength(self): return len(self.data) def toFile(self, file): file.write(self.data) def getStdCharSet(charset): # check to see if we can use a predefined charset value. predefinedCharSetVal = None predefinedCharSets = [ (cffISOAdobeStringCount, cffISOAdobeStrings, 0), (cffExpertStringCount, cffIExpertStrings, 1), (cffExpertSubsetStringCount, cffExpertSubsetStrings, 2), ] lcs = len(charset) for cnt, pcs, csv in predefinedCharSets: if predefinedCharSetVal is not None: break if lcs > cnt: continue predefinedCharSetVal = csv for i in range(lcs): if charset[i] != pcs[i]: predefinedCharSetVal = None break return predefinedCharSetVal def getCIDfromName(name, strings): return int(name[3:]) def getSIDfromName(name, strings): return strings.getSID(name) def packCharset0(charset, isCID, strings): fmt = 0 data = [packCard8(fmt)] if isCID: getNameID = getCIDfromName else: getNameID = getSIDfromName for name in charset[1:]: data.append(packCard16(getNameID(name, strings))) return bytesjoin(data) def packCharset(charset, isCID, strings): fmt = 1 ranges = [] first = None end = 0 if isCID: getNameID = getCIDfromName else: getNameID = getSIDfromName for name in charset[1:]: SID = getNameID(name, strings) if first is None: first = SID elif end + 1 != SID: nLeft = end - first if nLeft > 255: fmt = 2 ranges.append((first, nLeft)) first = SID end = SID if end: nLeft = end - first if nLeft > 255: fmt = 2 ranges.append((first, nLeft)) data = [packCard8(fmt)] if fmt == 1: nLeftFunc = packCard8 else: nLeftFunc = packCard16 for first, nLeft in ranges: data.append(packCard16(first) + nLeftFunc(nLeft)) return bytesjoin(data) def parseCharset0(numGlyphs, file, strings, isCID): charset = [".notdef"] if isCID: for i in range(numGlyphs - 1): CID = readCard16(file) charset.append("cid" + str(CID).zfill(5)) else: for i in range(numGlyphs - 1): SID = readCard16(file) charset.append(strings[SID]) return charset def parseCharset(numGlyphs, file, strings, isCID, fmt): charset = [".notdef"] count = 1 if fmt == 1: nLeftFunc = readCard8 else: nLeftFunc = readCard16 while count < numGlyphs: first = readCard16(file) nLeft = nLeftFunc(file) if isCID: for CID in range(first, first + nLeft + 1): charset.append("cid" + str(CID).zfill(5)) else: for SID in range(first, first + nLeft + 1): charset.append(strings[SID]) count = count + nLeft + 1 return charset class EncodingCompiler(object): def __init__(self, strings, encoding, parent): assert not isinstance(encoding, str) data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings) data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings) if len(data0) < len(data1): self.data = data0 else: self.data = data1 self.parent = parent def setPos(self, pos, endPos): self.parent.rawDict["Encoding"] = pos def getDataLength(self): return len(self.data) def toFile(self, file): file.write(self.data) class EncodingConverter(SimpleConverter): def _read(self, parent, value): if value == 0: return "StandardEncoding" elif value == 1: return "ExpertEncoding" else: assert value > 1 file = parent.file file.seek(value) log.log(DEBUG, "loading Encoding at %s", value) fmt = readCard8(file) haveSupplement = fmt & 0x80 if haveSupplement: raise NotImplementedError("Encoding supplements are not yet supported") fmt = fmt & 0x7F if fmt == 0: encoding = parseEncoding0( parent.charset, file, haveSupplement, parent.strings ) elif fmt == 1: encoding = parseEncoding1( parent.charset, file, haveSupplement, parent.strings ) return encoding def write(self, parent, value): if value == "StandardEncoding": return 0 elif value == "ExpertEncoding": return 1 return 0 # dummy value def xmlWrite(self, xmlWriter, name, value): if value in ("StandardEncoding", "ExpertEncoding"): xmlWriter.simpletag(name, name=value) xmlWriter.newline() return xmlWriter.begintag(name) xmlWriter.newline() for code in range(len(value)): glyphName = value[code] if glyphName != ".notdef": xmlWriter.simpletag("map", code=hex(code), name=glyphName) xmlWriter.newline() xmlWriter.endtag(name) xmlWriter.newline() def xmlRead(self, name, attrs, content, parent): if "name" in attrs: return attrs["name"] encoding = [".notdef"] * 256 for element in content: if isinstance(element, str): continue name, attrs, content = element code = safeEval(attrs["code"]) glyphName = attrs["name"] encoding[code] = glyphName return encoding def parseEncoding0(charset, file, haveSupplement, strings): nCodes = readCard8(file) encoding = [".notdef"] * 256 for glyphID in range(1, nCodes + 1): code = readCard8(file) if code != 0: encoding[code] = charset[glyphID] return encoding def parseEncoding1(charset, file, haveSupplement, strings): nRanges = readCard8(file) encoding = [".notdef"] * 256 glyphID = 1 for i in range(nRanges): code = readCard8(file) nLeft = readCard8(file) for glyphID in range(glyphID, glyphID + nLeft + 1): encoding[code] = charset[glyphID] code = code + 1 glyphID = glyphID + 1 return encoding def packEncoding0(charset, encoding, strings): fmt = 0 m = {} for code in range(len(encoding)): name = encoding[code] if name != ".notdef": m[name] = code codes = [] for name in charset[1:]: code = m.get(name) codes.append(code) while codes and codes[-1] is None: codes.pop() data = [packCard8(fmt), packCard8(len(codes))] for code in codes: if code is None: code = 0 data.append(packCard8(code)) return bytesjoin(data) def packEncoding1(charset, encoding, strings): fmt = 1 m = {} for code in range(len(encoding)): name = encoding[code] if name != ".notdef": m[name] = code ranges = [] first = None end = 0 for name in charset[1:]: code = m.get(name, -1) if first is None: first = code elif end + 1 != code: nLeft = end - first ranges.append((first, nLeft)) first = code end = code nLeft = end - first ranges.append((first, nLeft)) # remove unencoded glyphs at the end. while ranges and ranges[-1][0] == -1: ranges.pop() data = [packCard8(fmt), packCard8(len(ranges))] for first, nLeft in ranges: if first == -1: # unencoded first = 0 data.append(packCard8(first) + packCard8(nLeft)) return bytesjoin(data) class FDArrayConverter(TableConverter): def _read(self, parent, value): try: vstore = parent.VarStore except AttributeError: vstore = None file = parent.file isCFF2 = parent._isCFF2 file.seek(value) fdArray = FDArrayIndex(file, isCFF2=isCFF2) fdArray.vstore = vstore fdArray.strings = parent.strings fdArray.GlobalSubrs = parent.GlobalSubrs return fdArray def write(self, parent, value): return 0 # dummy value def xmlRead(self, name, attrs, content, parent): fdArray = FDArrayIndex() for element in content: if isinstance(element, str): continue name, attrs, content = element fdArray.fromXML(name, attrs, content) return fdArray class FDSelectConverter(SimpleConverter): def _read(self, parent, value): file = parent.file file.seek(value) fdSelect = FDSelect(file, parent.numGlyphs) return fdSelect def write(self, parent, value): return 0 # dummy value # The FDSelect glyph data is written out to XML in the charstring keys, # so we write out only the format selector def xmlWrite(self, xmlWriter, name, value): xmlWriter.simpletag(name, [("format", value.format)]) xmlWriter.newline() def xmlRead(self, name, attrs, content, parent): fmt = safeEval(attrs["format"]) file = None numGlyphs = None fdSelect = FDSelect(file, numGlyphs, fmt) return fdSelect class VarStoreConverter(SimpleConverter): def _read(self, parent, value): file = parent.file file.seek(value) varStore = VarStoreData(file) varStore.decompile() return varStore def write(self, parent, value): return 0 # dummy value def xmlWrite(self, xmlWriter, name, value): value.writeXML(xmlWriter, name) def xmlRead(self, name, attrs, content, parent): varStore = VarStoreData() varStore.xmlRead(name, attrs, content, parent) return varStore def packFDSelect0(fdSelectArray): fmt = 0 data = [packCard8(fmt)] for index in fdSelectArray: data.append(packCard8(index)) return bytesjoin(data) def packFDSelect3(fdSelectArray): fmt = 3 fdRanges = [] lenArray = len(fdSelectArray) lastFDIndex = -1 for i in range(lenArray): fdIndex = fdSelectArray[i] if lastFDIndex != fdIndex: fdRanges.append([i, fdIndex]) lastFDIndex = fdIndex sentinelGID = i + 1 data = [packCard8(fmt)] data.append(packCard16(len(fdRanges))) for fdRange in fdRanges: data.append(packCard16(fdRange[0])) data.append(packCard8(fdRange[1])) data.append(packCard16(sentinelGID)) return bytesjoin(data) def packFDSelect4(fdSelectArray): fmt = 4 fdRanges = [] lenArray = len(fdSelectArray) lastFDIndex = -1 for i in range(lenArray): fdIndex = fdSelectArray[i] if lastFDIndex != fdIndex: fdRanges.append([i, fdIndex]) lastFDIndex = fdIndex sentinelGID = i + 1 data = [packCard8(fmt)] data.append(packCard32(len(fdRanges))) for fdRange in fdRanges: data.append(packCard32(fdRange[0])) data.append(packCard16(fdRange[1])) data.append(packCard32(sentinelGID)) return bytesjoin(data) class FDSelectCompiler(object): def __init__(self, fdSelect, parent): fmt = fdSelect.format fdSelectArray = fdSelect.gidArray if fmt == 0: self.data = packFDSelect0(fdSelectArray) elif fmt == 3: self.data = packFDSelect3(fdSelectArray) elif fmt == 4: self.data = packFDSelect4(fdSelectArray) else: # choose smaller of the two formats data0 = packFDSelect0(fdSelectArray) data3 = packFDSelect3(fdSelectArray) if len(data0) < len(data3): self.data = data0 fdSelect.format = 0 else: self.data = data3 fdSelect.format = 3 self.parent = parent def setPos(self, pos, endPos): self.parent.rawDict["FDSelect"] = pos def getDataLength(self): return len(self.data) def toFile(self, file): file.write(self.data) class VarStoreCompiler(object): def __init__(self, varStoreData, parent): self.parent = parent if not varStoreData.data: varStoreData.compile() data = [packCard16(len(varStoreData.data)), varStoreData.data] self.data = bytesjoin(data) def setPos(self, pos, endPos): self.parent.rawDict["VarStore"] = pos def getDataLength(self): return len(self.data) def toFile(self, file): file.write(self.data) class ROSConverter(SimpleConverter): def xmlWrite(self, xmlWriter, name, value): registry, order, supplement = value xmlWriter.simpletag( name, [ ("Registry", tostr(registry)), ("Order", tostr(order)), ("Supplement", supplement), ], ) xmlWriter.newline() def xmlRead(self, name, attrs, content, parent): return (attrs["Registry"], attrs["Order"], safeEval(attrs["Supplement"])) topDictOperators = [ # opcode name argument type default converter (25, "maxstack", "number", None, None), ((12, 30), "ROS", ("SID", "SID", "number"), None, ROSConverter()), ((12, 20), "SyntheticBase", "number", None, None), (0, "version", "SID", None, None), (1, "Notice", "SID", None, Latin1Converter()), ((12, 0), "Copyright", "SID", None, Latin1Converter()), (2, "FullName", "SID", None, Latin1Converter()), ((12, 38), "FontName", "SID", None, Latin1Converter()), (3, "FamilyName", "SID", None, Latin1Converter()), (4, "Weight", "SID", None, None), ((12, 1), "isFixedPitch", "number", 0, None), ((12, 2), "ItalicAngle", "number", 0, None), ((12, 3), "UnderlinePosition", "number", -100, None), ((12, 4), "UnderlineThickness", "number", 50, None), ((12, 5), "PaintType", "number", 0, None), ((12, 6), "CharstringType", "number", 2, None), ((12, 7), "FontMatrix", "array", [0.001, 0, 0, 0.001, 0, 0], None), (13, "UniqueID", "number", None, None), (5, "FontBBox", "array", [0, 0, 0, 0], None), ((12, 8), "StrokeWidth", "number", 0, None), (14, "XUID", "array", None, None), ((12, 21), "PostScript", "SID", None, None), ((12, 22), "BaseFontName", "SID", None, None), ((12, 23), "BaseFontBlend", "delta", None, None), ((12, 31), "CIDFontVersion", "number", 0, None), ((12, 32), "CIDFontRevision", "number", 0, None), ((12, 33), "CIDFontType", "number", 0, None), ((12, 34), "CIDCount", "number", 8720, None), (15, "charset", "number", None, CharsetConverter()), ((12, 35), "UIDBase", "number", None, None), (16, "Encoding", "number", 0, EncodingConverter()), (18, "Private", ("number", "number"), None, PrivateDictConverter()), ((12, 37), "FDSelect", "number", None, FDSelectConverter()), ((12, 36), "FDArray", "number", None, FDArrayConverter()), (17, "CharStrings", "number", None, CharStringsConverter()), (24, "VarStore", "number", None, VarStoreConverter()), ] topDictOperators2 = [ # opcode name argument type default converter (25, "maxstack", "number", None, None), ((12, 7), "FontMatrix", "array", [0.001, 0, 0, 0.001, 0, 0], None), ((12, 37), "FDSelect", "number", None, FDSelectConverter()), ((12, 36), "FDArray", "number", None, FDArrayConverter()), (17, "CharStrings", "number", None, CharStringsConverter()), (24, "VarStore", "number", None, VarStoreConverter()), ] # Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order, # in order for the font to compile back from xml. kBlendDictOpName = "blend" blendOp = 23 privateDictOperators = [ # opcode name argument type default converter (22, "vsindex", "number", None, None), ( blendOp, kBlendDictOpName, "blendList", None, None, ), # This is for reading to/from XML: it not written to CFF. (6, "BlueValues", "delta", None, None), (7, "OtherBlues", "delta", None, None), (8, "FamilyBlues", "delta", None, None), (9, "FamilyOtherBlues", "delta", None, None), ((12, 9), "BlueScale", "number", 0.039625, None), ((12, 10), "BlueShift", "number", 7, None), ((12, 11), "BlueFuzz", "number", 1, None), (10, "StdHW", "number", None, None), (11, "StdVW", "number", None, None), ((12, 12), "StemSnapH", "delta", None, None), ((12, 13), "StemSnapV", "delta", None, None), ((12, 14), "ForceBold", "number", 0, None), ((12, 15), "ForceBoldThreshold", "number", None, None), # deprecated ((12, 16), "lenIV", "number", None, None), # deprecated ((12, 17), "LanguageGroup", "number", 0, None), ((12, 18), "ExpansionFactor", "number", 0.06, None), ((12, 19), "initialRandomSeed", "number", 0, None), (20, "defaultWidthX", "number", 0, None), (21, "nominalWidthX", "number", 0, None), (19, "Subrs", "number", None, SubrsConverter()), ] privateDictOperators2 = [ # opcode name argument type default converter (22, "vsindex", "number", None, None), ( blendOp, kBlendDictOpName, "blendList", None, None, ), # This is for reading to/from XML: it not written to CFF. (6, "BlueValues", "delta", None, None), (7, "OtherBlues", "delta", None, None), (8, "FamilyBlues", "delta", None, None), (9, "FamilyOtherBlues", "delta", None, None), ((12, 9), "BlueScale", "number", 0.039625, None), ((12, 10), "BlueShift", "number", 7, None), ((12, 11), "BlueFuzz", "number", 1, None), (10, "StdHW", "number", None, None), (11, "StdVW", "number", None, None), ((12, 12), "StemSnapH", "delta", None, None), ((12, 13), "StemSnapV", "delta", None, None), ((12, 17), "LanguageGroup", "number", 0, None), ((12, 18), "ExpansionFactor", "number", 0.06, None), (19, "Subrs", "number", None, SubrsConverter()), ] def addConverters(table): for i in range(len(table)): op, name, arg, default, conv = table[i] if conv is not None: continue if arg in ("delta", "array"): conv = ArrayConverter() elif arg == "number": conv = NumberConverter() elif arg == "SID": conv = ASCIIConverter() elif arg == "blendList": conv = None else: assert False table[i] = op, name, arg, default, conv addConverters(privateDictOperators) addConverters(topDictOperators) class TopDictDecompiler(psCharStrings.DictDecompiler): operators = buildOperatorDict(topDictOperators) class PrivateDictDecompiler(psCharStrings.DictDecompiler): operators = buildOperatorDict(privateDictOperators) class DictCompiler(object): maxBlendStack = 0 def __init__(self, dictObj, strings, parent, isCFF2=None): if strings: assert isinstance(strings, IndexedStrings) if isCFF2 is None and hasattr(parent, "isCFF2"): isCFF2 = parent.isCFF2 assert isCFF2 is not None self.isCFF2 = isCFF2 self.dictObj = dictObj self.strings = strings self.parent = parent rawDict = {} for name in dictObj.order: value = getattr(dictObj, name, None) if value is None: continue conv = dictObj.converters[name] value = conv.write(dictObj, value) if value == dictObj.defaults.get(name): continue rawDict[name] = value self.rawDict = rawDict def setPos(self, pos, endPos): pass def getDataLength(self): return len(self.compile("getDataLength")) def compile(self, reason): log.log(DEBUG, "-- compiling %s for %s", self.__class__.__name__, reason) rawDict = self.rawDict data = [] for name in self.dictObj.order: value = rawDict.get(name) if value is None: continue op, argType = self.opcodes[name] if isinstance(argType, tuple): l = len(argType) assert len(value) == l, "value doesn't match arg type" for i in range(l): arg = argType[i] v = value[i] arghandler = getattr(self, "arg_" + arg) data.append(arghandler(v)) else: arghandler = getattr(self, "arg_" + argType) data.append(arghandler(value)) data.append(op) data = bytesjoin(data) return data def toFile(self, file): data = self.compile("toFile") file.write(data) def arg_number(self, num): if isinstance(num, list): data = [encodeNumber(val) for val in num] data.append(encodeNumber(1)) data.append(bytechr(blendOp)) datum = bytesjoin(data) else: datum = encodeNumber(num) return datum def arg_SID(self, s): return psCharStrings.encodeIntCFF(self.strings.getSID(s)) def arg_array(self, value): data = [] for num in value: data.append(self.arg_number(num)) return bytesjoin(data) def arg_delta(self, value): if not value: return b"" val0 = value[0] if isinstance(val0, list): data = self.arg_delta_blend(value) else: out = [] last = 0 for v in value: out.append(v - last) last = v data = [] for num in out: data.append(encodeNumber(num)) return bytesjoin(data) def arg_delta_blend(self, value): """A delta list with blend lists has to be *all* blend lists. The value is a list is arranged as follows:: [ [V0, d0..dn] [V1, d0..dn] ... [Vm, d0..dn] ] ``V`` is the absolute coordinate value from the default font, and ``d0-dn`` are the delta values from the *n* regions. Each ``V`` is an absolute coordinate from the default font. We want to return a list:: [ [v0, v1..vm] [d0..dn] ... [d0..dn] numBlends blendOp ] where each ``v`` is relative to the previous default font value. """ numMasters = len(value[0]) numBlends = len(value) numStack = (numBlends * numMasters) + 1 if numStack > self.maxBlendStack: # Figure out the max number of value we can blend # and divide this list up into chunks of that size. numBlendValues = int((self.maxBlendStack - 1) / numMasters) out = [] while True: numVal = min(len(value), numBlendValues) if numVal == 0: break valList = value[0:numVal] out1 = self.arg_delta_blend(valList) out.extend(out1) value = value[numVal:] else: firstList = [0] * numBlends deltaList = [None] * numBlends i = 0 prevVal = 0 while i < numBlends: # For PrivateDict BlueValues, the default font # values are absolute, not relative. # Must convert these back to relative coordinates # befor writing to CFF2. defaultValue = value[i][0] firstList[i] = defaultValue - prevVal prevVal = defaultValue deltaList[i] = value[i][1:] i += 1 relValueList = firstList for blendList in deltaList: relValueList.extend(blendList) out = [encodeNumber(val) for val in relValueList] out.append(encodeNumber(numBlends)) out.append(bytechr(blendOp)) return out def encodeNumber(num): if isinstance(num, float): return psCharStrings.encodeFloat(num) else: return psCharStrings.encodeIntCFF(num) class TopDictCompiler(DictCompiler): opcodes = buildOpcodeDict(topDictOperators) def getChildren(self, strings): isCFF2 = self.isCFF2 children = [] if self.dictObj.cff2GetGlyphOrder is None: if hasattr(self.dictObj, "charset") and self.dictObj.charset: if hasattr(self.dictObj, "ROS"): # aka isCID charsetCode = None else: charsetCode = getStdCharSet(self.dictObj.charset) if charsetCode is None: children.append( CharsetCompiler(strings, self.dictObj.charset, self) ) else: self.rawDict["charset"] = charsetCode if hasattr(self.dictObj, "Encoding") and self.dictObj.Encoding: encoding = self.dictObj.Encoding if not isinstance(encoding, str): children.append(EncodingCompiler(strings, encoding, self)) else: if hasattr(self.dictObj, "VarStore"): varStoreData = self.dictObj.VarStore varStoreComp = VarStoreCompiler(varStoreData, self) children.append(varStoreComp) if hasattr(self.dictObj, "FDSelect"): # I have not yet supported merging a ttx CFF-CID font, as there are # interesting issues about merging the FDArrays. Here I assume that # either the font was read from XML, and the FDSelect indices are all # in the charstring data, or the FDSelect array is already fully defined. fdSelect = self.dictObj.FDSelect # probably read in from XML; assume fdIndex in CharString data if len(fdSelect) == 0: charStrings = self.dictObj.CharStrings for name in self.dictObj.charset: fdSelect.append(charStrings[name].fdSelectIndex) fdSelectComp = FDSelectCompiler(fdSelect, self) children.append(fdSelectComp) if hasattr(self.dictObj, "CharStrings"): items = [] charStrings = self.dictObj.CharStrings for name in self.dictObj.charset: items.append(charStrings[name]) charStringsComp = CharStringsCompiler(items, strings, self, isCFF2=isCFF2) children.append(charStringsComp) if hasattr(self.dictObj, "FDArray"): # I have not yet supported merging a ttx CFF-CID font, as there are # interesting issues about merging the FDArrays. Here I assume that the # FDArray info is correct and complete. fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self) children.append(fdArrayIndexComp) children.extend(fdArrayIndexComp.getChildren(strings)) if hasattr(self.dictObj, "Private"): privComp = self.dictObj.Private.getCompiler(strings, self) children.append(privComp) children.extend(privComp.getChildren(strings)) return children class FontDictCompiler(DictCompiler): opcodes = buildOpcodeDict(topDictOperators) def __init__(self, dictObj, strings, parent, isCFF2=None): super(FontDictCompiler, self).__init__(dictObj, strings, parent, isCFF2=isCFF2) # # We now take some effort to detect if there were any key/value pairs # supplied that were ignored in the FontDict context, and issue a warning # for those cases. # ignoredNames = [] dictObj = self.dictObj for name in sorted(set(dictObj.converters) - set(dictObj.order)): if name in dictObj.rawDict: # The font was directly read from binary. In this # case, we want to report *all* "useless" key/value # pairs that are in the font, not just the ones that # are different from the default. ignoredNames.append(name) else: # The font was probably read from a TTX file. We only # warn about keys whos value is not the default. The # ones that have the default value will not be written # to binary anyway. default = dictObj.defaults.get(name) if default is not None: conv = dictObj.converters[name] default = conv.read(dictObj, default) if getattr(dictObj, name, None) != default: ignoredNames.append(name) if ignoredNames: log.warning( "Some CFF FDArray/FontDict keys were ignored upon compile: " + " ".join(sorted(ignoredNames)) ) def getChildren(self, strings): children = [] if hasattr(self.dictObj, "Private"): privComp = self.dictObj.Private.getCompiler(strings, self) children.append(privComp) children.extend(privComp.getChildren(strings)) return children class PrivateDictCompiler(DictCompiler): maxBlendStack = maxStackLimit opcodes = buildOpcodeDict(privateDictOperators) def setPos(self, pos, endPos): size = endPos - pos self.parent.rawDict["Private"] = size, pos self.pos = pos def getChildren(self, strings): children = [] if hasattr(self.dictObj, "Subrs"): children.append(self.dictObj.Subrs.getCompiler(strings, self)) return children class BaseDict(object): def __init__(self, strings=None, file=None, offset=None, isCFF2=None): assert (isCFF2 is None) == (file is None) self.rawDict = {} self.skipNames = [] self.strings = strings if file is None: return self._isCFF2 = isCFF2 self.file = file if offset is not None: log.log(DEBUG, "loading %s at %s", self.__class__.__name__, offset) self.offset = offset def decompile(self, data): log.log(DEBUG, " length %s is %d", self.__class__.__name__, len(data)) dec = self.decompilerClass(self.strings, self) dec.decompile(data) self.rawDict = dec.getDict() self.postDecompile() def postDecompile(self): pass def getCompiler(self, strings, parent, isCFF2=None): return self.compilerClass(self, strings, parent, isCFF2=isCFF2) def __getattr__(self, name): if name[:2] == name[-2:] == "__": # to make deepcopy() and pickle.load() work, we need to signal with # AttributeError that dunder methods like '__deepcopy__' or '__getstate__' # aren't implemented. For more details, see: # https://github.com/fonttools/fonttools/pull/1488 raise AttributeError(name) value = self.rawDict.get(name, None) if value is None: value = self.defaults.get(name) if value is None: raise AttributeError(name) conv = self.converters[name] value = conv.read(self, value) setattr(self, name, value) return value def toXML(self, xmlWriter): for name in self.order: if name in self.skipNames: continue value = getattr(self, name, None) # XXX For "charset" we never skip calling xmlWrite even if the # value is None, so we always write the following XML comment: # # <!-- charset is dumped separately as the 'GlyphOrder' element --> # # Charset is None when 'CFF ' table is imported from XML into an # empty TTFont(). By writing this comment all the time, we obtain # the same XML output whether roundtripping XML-to-XML or # dumping binary-to-XML if value is None and name != "charset": continue conv = self.converters[name] conv.xmlWrite(xmlWriter, name, value) ignoredNames = set(self.rawDict) - set(self.order) if ignoredNames: xmlWriter.comment( "some keys were ignored: %s" % " ".join(sorted(ignoredNames)) ) xmlWriter.newline() def fromXML(self, name, attrs, content): conv = self.converters[name] value = conv.xmlRead(name, attrs, content, self) setattr(self, name, value) class TopDict(BaseDict): """The ``TopDict`` represents the top-level dictionary holding font information. CFF2 tables contain a restricted set of top-level entries as described `here <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#7-top-dict-data>`_, but CFF tables may contain a wider range of information. This information can be accessed through attributes or through the dictionary returned through the ``rawDict`` property: .. code:: python font = tt["CFF "].cff[0] font.FamilyName # 'Linux Libertine O' font.rawDict["FamilyName"] # 'Linux Libertine O' More information is available in the CFF file's private dictionary, accessed via the ``Private`` property: .. code:: python tt["CFF "].cff[0].Private.BlueValues # [-15, 0, 515, 515, 666, 666] """ defaults = buildDefaults(topDictOperators) converters = buildConverters(topDictOperators) compilerClass = TopDictCompiler order = buildOrder(topDictOperators) decompilerClass = TopDictDecompiler def __init__( self, strings=None, file=None, offset=None, GlobalSubrs=None, cff2GetGlyphOrder=None, isCFF2=None, ): super(TopDict, self).__init__(strings, file, offset, isCFF2=isCFF2) self.cff2GetGlyphOrder = cff2GetGlyphOrder self.GlobalSubrs = GlobalSubrs if isCFF2: self.defaults = buildDefaults(topDictOperators2) self.charset = cff2GetGlyphOrder() self.order = buildOrder(topDictOperators2) else: self.defaults = buildDefaults(topDictOperators) self.order = buildOrder(topDictOperators) def getGlyphOrder(self): """Returns a list of glyph names in the CFF font.""" return self.charset def postDecompile(self): offset = self.rawDict.get("CharStrings") if offset is None: return # get the number of glyphs beforehand. self.file.seek(offset) if self._isCFF2: self.numGlyphs = readCard32(self.file) else: self.numGlyphs = readCard16(self.file) def toXML(self, xmlWriter): if hasattr(self, "CharStrings"): self.decompileAllCharStrings() if hasattr(self, "ROS"): self.skipNames = ["Encoding"] if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"): # these values have default values, but I only want them to show up # in CID fonts. self.skipNames = [ "CIDFontVersion", "CIDFontRevision", "CIDFontType", "CIDCount", ] BaseDict.toXML(self, xmlWriter) def decompileAllCharStrings(self): # Make sure that all the Private Dicts have been instantiated. for i, charString in enumerate(self.CharStrings.values()): try: charString.decompile() except: log.error("Error in charstring %s", i) raise def recalcFontBBox(self): fontBBox = None for charString in self.CharStrings.values(): bounds = charString.calcBounds(self.CharStrings) if bounds is not None: if fontBBox is not None: fontBBox = unionRect(fontBBox, bounds) else: fontBBox = bounds if fontBBox is None: self.FontBBox = self.defaults["FontBBox"][:] else: self.FontBBox = list(intRect(fontBBox)) class FontDict(BaseDict): # # Since fonttools used to pass a lot of fields that are not relevant in the FDArray # FontDict, there are 'ttx' files in the wild that contain all these. These got in # the ttx files because fonttools writes explicit values for all the TopDict default # values. These are not actually illegal in the context of an FDArray FontDict - you # can legally, per spec, put any arbitrary key/value pair in a FontDict - but are # useless since current major company CFF interpreters ignore anything but the set # listed in this file. So, we just silently skip them. An exception is Weight: this # is not used by any interpreter, but some foundries have asked that this be # supported in FDArray FontDicts just to preserve information about the design when # the font is being inspected. # # On top of that, there are fonts out there that contain such useless FontDict values. # # By subclassing TopDict, we *allow* all key/values from TopDict, both when reading # from binary or when reading from XML, but by overriding `order` with a limited # list of names, we ensure that only the useful names ever get exported to XML and # ever get compiled into the binary font. # # We override compilerClass so we can warn about "useless" key/value pairs, either # from the original binary font or from TTX input. # # See: # - https://github.com/fonttools/fonttools/issues/740 # - https://github.com/fonttools/fonttools/issues/601 # - https://github.com/adobe-type-tools/afdko/issues/137 # defaults = {} converters = buildConverters(topDictOperators) compilerClass = FontDictCompiler orderCFF = ["FontName", "FontMatrix", "Weight", "Private"] orderCFF2 = ["Private"] decompilerClass = TopDictDecompiler def __init__( self, strings=None, file=None, offset=None, GlobalSubrs=None, isCFF2=None, vstore=None, ): super(FontDict, self).__init__(strings, file, offset, isCFF2=isCFF2) self.vstore = vstore self.setCFF2(isCFF2) def setCFF2(self, isCFF2): # isCFF2 may be None. if isCFF2: self.order = self.orderCFF2 self._isCFF2 = True else: self.order = self.orderCFF self._isCFF2 = False class PrivateDict(BaseDict): defaults = buildDefaults(privateDictOperators) converters = buildConverters(privateDictOperators) order = buildOrder(privateDictOperators) decompilerClass = PrivateDictDecompiler compilerClass = PrivateDictCompiler def __init__(self, strings=None, file=None, offset=None, isCFF2=None, vstore=None): super(PrivateDict, self).__init__(strings, file, offset, isCFF2=isCFF2) self.vstore = vstore if isCFF2: self.defaults = buildDefaults(privateDictOperators2) self.order = buildOrder(privateDictOperators2) # Provide dummy values. This avoids needing to provide # an isCFF2 state in a lot of places. self.nominalWidthX = self.defaultWidthX = None else: self.defaults = buildDefaults(privateDictOperators) self.order = buildOrder(privateDictOperators) @property def in_cff2(self): return self._isCFF2 def getNumRegions(self, vi=None): # called from misc/psCharStrings.py # if getNumRegions is being called, we can assume that VarStore exists. if vi is None: if hasattr(self, "vsindex"): vi = self.vsindex else: vi = 0 numRegions = self.vstore.getNumRegions(vi) return numRegions class IndexedStrings(object): """SID -> string mapping.""" def __init__(self, file=None): if file is None: strings = [] else: strings = [tostr(s, encoding="latin1") for s in Index(file, isCFF2=False)] self.strings = strings def getCompiler(self): return IndexedStringsCompiler(self, None, self, isCFF2=False) def __len__(self): return len(self.strings) def __getitem__(self, SID): if SID < cffStandardStringCount: return cffStandardStrings[SID] else: return self.strings[SID - cffStandardStringCount] def getSID(self, s): if not hasattr(self, "stringMapping"): self.buildStringMapping() s = tostr(s, encoding="latin1") if s in cffStandardStringMapping: SID = cffStandardStringMapping[s] elif s in self.stringMapping: SID = self.stringMapping[s] else: SID = len(self.strings) + cffStandardStringCount self.strings.append(s) self.stringMapping[s] = SID return SID def getStrings(self): return self.strings def buildStringMapping(self): self.stringMapping = {} for index in range(len(self.strings)): self.stringMapping[self.strings[index]] = index + cffStandardStringCount # The 391 Standard Strings as used in the CFF format. # from Adobe Technical None #5176, version 1.0, 18 March 1998 cffStandardStrings = [ ".notdef", "space", "exclam", "quotedbl", "numbersign", "dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright", "asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", "less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", "underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", "sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle", "quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl", "endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis", "perthousand", "questiondown", "grave", "acute", "circumflex", "tilde", "macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut", "ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE", "ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls", "onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", "Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn", "threequarters", "twosuperior", "registered", "minus", "eth", "multiply", "threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", "Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute", "acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute", "ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis", "igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde", "scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall", "Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall", "parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", "zerooldstyle", "oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "commasuperior", "threequartersemdash", "periodsuperior", "questionsmall", "asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", "tsuperior", "ff", "ffi", "ffl", "parenleftinferior", "parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall", "Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", "Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall", "Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall", "questiondownsmall", "oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", "zerosuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior", "centinferior", "dollarinferior", "periodinferior", "commainferior", "Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall", "Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall", "Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall", "Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall", "Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall", "001.000", "001.001", "001.002", "001.003", "Black", "Bold", "Book", "Light", "Medium", "Regular", "Roman", "Semibold", ] cffStandardStringCount = 391 assert len(cffStandardStrings) == cffStandardStringCount # build reverse mapping cffStandardStringMapping = {} for _i in range(cffStandardStringCount): cffStandardStringMapping[cffStandardStrings[_i]] = _i cffISOAdobeStrings = [ ".notdef", "space", "exclam", "quotedbl", "numbersign", "dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright", "asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", "less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", "underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", "sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle", "quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl", "endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis", "perthousand", "questiondown", "grave", "acute", "circumflex", "tilde", "macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut", "ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE", "ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls", "onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", "Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn", "threequarters", "twosuperior", "registered", "minus", "eth", "multiply", "threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", "Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute", "acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute", "ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis", "igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde", "scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis", "zcaron", ] cffISOAdobeStringCount = 229 assert len(cffISOAdobeStrings) == cffISOAdobeStringCount cffIExpertStrings = [ ".notdef", "space", "exclamsmall", "Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall", "parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon", "commasuperior", "threequartersemdash", "periodsuperior", "questionsmall", "asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", "tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", "parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall", "Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", "Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall", "Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall", "onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior", "centinferior", "dollarinferior", "periodinferior", "commainferior", "Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall", "Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall", "Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall", "Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall", "Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall", ] cffExpertStringCount = 166 assert len(cffIExpertStrings) == cffExpertStringCount cffExpertSubsetStrings = [ ".notdef", "space", "dollaroldstyle", "dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon", "commasuperior", "threequartersemdash", "periodsuperior", "asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", "tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", "parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah", "centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf", "threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior", "centinferior", "dollarinferior", "periodinferior", "commainferior", ] cffExpertSubsetStringCount = 87 assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount PKaZZZߟ'w�w�wfontTools/cffLib/specializer.py# -*- coding: utf-8 -*- """T2CharString operator specializer and generalizer. PostScript glyph drawing operations can be expressed in multiple different ways. For example, as well as the ``lineto`` operator, there is also a ``hlineto`` operator which draws a horizontal line, removing the need to specify a ``dx`` coordinate, and a ``vlineto`` operator which draws a vertical line, removing the need to specify a ``dy`` coordinate. As well as decompiling :class:`fontTools.misc.psCharStrings.T2CharString` objects into lists of operations, this module allows for conversion between general and specific forms of the operation. """ from fontTools.cffLib import maxStackLimit def stringToProgram(string): if isinstance(string, str): string = string.split() program = [] for token in string: try: token = int(token) except ValueError: try: token = float(token) except ValueError: pass program.append(token) return program def programToString(program): return " ".join(str(x) for x in program) def programToCommands(program, getNumRegions=None): """Takes a T2CharString program list and returns list of commands. Each command is a two-tuple of commandname,arg-list. The commandname might be empty string if no commandname shall be emitted (used for glyph width, hintmask/cntrmask argument, as well as stray arguments at the end of the program (🤷). 'getNumRegions' may be None, or a callable object. It must return the number of regions. 'getNumRegions' takes a single argument, vsindex. If the vsindex argument is None, getNumRegions returns the default number of regions for the charstring, else it returns the numRegions for the vsindex. The Charstring may or may not start with a width value. If the first non-blend operator has an odd number of arguments, then the first argument is a width, and is popped off. This is complicated with blend operators, as there may be more than one before the first hint or moveto operator, and each one reduces several arguments to just one list argument. We have to sum the number of arguments that are not part of the blend arguments, and all the 'numBlends' values. We could instead have said that by definition, if there is a blend operator, there is no width value, since CFF2 Charstrings don't have width values. I discussed this with Behdad, and we are allowing for an initial width value in this case because developers may assemble a CFF2 charstring from CFF Charstrings, which could have width values. """ seenWidthOp = False vsIndex = None lenBlendStack = 0 lastBlendIndex = 0 commands = [] stack = [] it = iter(program) for token in it: if not isinstance(token, str): stack.append(token) continue if token == "blend": assert getNumRegions is not None numSourceFonts = 1 + getNumRegions(vsIndex) # replace the blend op args on the stack with a single list # containing all the blend op args. numBlends = stack[-1] numBlendArgs = numBlends * numSourceFonts + 1 # replace first blend op by a list of the blend ops. stack[-numBlendArgs:] = [stack[-numBlendArgs:]] lenBlendStack += numBlends + len(stack) - 1 lastBlendIndex = len(stack) # if a blend op exists, this is or will be a CFF2 charstring. continue elif token == "vsindex": vsIndex = stack[-1] assert type(vsIndex) is int elif (not seenWidthOp) and token in { "hstem", "hstemhm", "vstem", "vstemhm", "cntrmask", "hintmask", "hmoveto", "vmoveto", "rmoveto", "endchar", }: seenWidthOp = True parity = token in {"hmoveto", "vmoveto"} if lenBlendStack: # lenBlendStack has the number of args represented by the last blend # arg and all the preceding args. We need to now add the number of # args following the last blend arg. numArgs = lenBlendStack + len(stack[lastBlendIndex:]) else: numArgs = len(stack) if numArgs and (numArgs % 2) ^ parity: width = stack.pop(0) commands.append(("", [width])) if token in {"hintmask", "cntrmask"}: if stack: commands.append(("", stack)) commands.append((token, [])) commands.append(("", [next(it)])) else: commands.append((token, stack)) stack = [] if stack: commands.append(("", stack)) return commands def _flattenBlendArgs(args): token_list = [] for arg in args: if isinstance(arg, list): token_list.extend(arg) token_list.append("blend") else: token_list.append(arg) return token_list def commandsToProgram(commands): """Takes a commands list as returned by programToCommands() and converts it back to a T2CharString program list.""" program = [] for op, args in commands: if any(isinstance(arg, list) for arg in args): args = _flattenBlendArgs(args) program.extend(args) if op: program.append(op) return program def _everyN(el, n): """Group the list el into groups of size n""" if len(el) % n != 0: raise ValueError(el) for i in range(0, len(el), n): yield el[i : i + n] class _GeneralizerDecombinerCommandsMap(object): @staticmethod def rmoveto(args): if len(args) != 2: raise ValueError(args) yield ("rmoveto", args) @staticmethod def hmoveto(args): if len(args) != 1: raise ValueError(args) yield ("rmoveto", [args[0], 0]) @staticmethod def vmoveto(args): if len(args) != 1: raise ValueError(args) yield ("rmoveto", [0, args[0]]) @staticmethod def rlineto(args): if not args: raise ValueError(args) for args in _everyN(args, 2): yield ("rlineto", args) @staticmethod def hlineto(args): if not args: raise ValueError(args) it = iter(args) try: while True: yield ("rlineto", [next(it), 0]) yield ("rlineto", [0, next(it)]) except StopIteration: pass @staticmethod def vlineto(args): if not args: raise ValueError(args) it = iter(args) try: while True: yield ("rlineto", [0, next(it)]) yield ("rlineto", [next(it), 0]) except StopIteration: pass @staticmethod def rrcurveto(args): if not args: raise ValueError(args) for args in _everyN(args, 6): yield ("rrcurveto", args) @staticmethod def hhcurveto(args): if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args) if len(args) % 2 == 1: yield ("rrcurveto", [args[1], args[0], args[2], args[3], args[4], 0]) args = args[5:] for args in _everyN(args, 4): yield ("rrcurveto", [args[0], 0, args[1], args[2], args[3], 0]) @staticmethod def vvcurveto(args): if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args) if len(args) % 2 == 1: yield ("rrcurveto", [args[0], args[1], args[2], args[3], 0, args[4]]) args = args[5:] for args in _everyN(args, 4): yield ("rrcurveto", [0, args[0], args[1], args[2], 0, args[3]]) @staticmethod def hvcurveto(args): if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}: raise ValueError(args) last_args = None if len(args) % 2 == 1: lastStraight = len(args) % 8 == 5 args, last_args = args[:-5], args[-5:] it = _everyN(args, 4) try: while True: args = next(it) yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]]) args = next(it) yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0]) except StopIteration: pass if last_args: args = last_args if lastStraight: yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]]) else: yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]]) @staticmethod def vhcurveto(args): if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}: raise ValueError(args) last_args = None if len(args) % 2 == 1: lastStraight = len(args) % 8 == 5 args, last_args = args[:-5], args[-5:] it = _everyN(args, 4) try: while True: args = next(it) yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0]) args = next(it) yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]]) except StopIteration: pass if last_args: args = last_args if lastStraight: yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]]) else: yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]]) @staticmethod def rcurveline(args): if len(args) < 8 or len(args) % 6 != 2: raise ValueError(args) args, last_args = args[:-2], args[-2:] for args in _everyN(args, 6): yield ("rrcurveto", args) yield ("rlineto", last_args) @staticmethod def rlinecurve(args): if len(args) < 8 or len(args) % 2 != 0: raise ValueError(args) args, last_args = args[:-6], args[-6:] for args in _everyN(args, 2): yield ("rlineto", args) yield ("rrcurveto", last_args) def _convertBlendOpToArgs(blendList): # args is list of blend op args. Since we are supporting # recursive blend op calls, some of these args may also # be a list of blend op args, and need to be converted before # we convert the current list. if any([isinstance(arg, list) for arg in blendList]): args = [ i for e in blendList for i in (_convertBlendOpToArgs(e) if isinstance(e, list) else [e]) ] else: args = blendList # We now know that blendList contains a blend op argument list, even if # some of the args are lists that each contain a blend op argument list. # Convert from: # [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn] # to: # [ [x0] + [delta tuple for x0], # ..., # [xn] + [delta tuple for xn] ] numBlends = args[-1] # Can't use args.pop() when the args are being used in a nested list # comprehension. See calling context args = args[:-1] numRegions = len(args) // numBlends - 1 if not (numBlends * (numRegions + 1) == len(args)): raise ValueError(blendList) defaultArgs = [[arg] for arg in args[:numBlends]] deltaArgs = args[numBlends:] numDeltaValues = len(deltaArgs) deltaList = [ deltaArgs[i : i + numRegions] for i in range(0, numDeltaValues, numRegions) ] blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)] return blend_args def generalizeCommands(commands, ignoreErrors=False): result = [] mapping = _GeneralizerDecombinerCommandsMap for op, args in commands: # First, generalize any blend args in the arg list. if any([isinstance(arg, list) for arg in args]): try: args = [ n for arg in args for n in ( _convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg] ) ] except ValueError: if ignoreErrors: # Store op as data, such that consumers of commands do not have to # deal with incorrect number of arguments. result.append(("", args)) result.append(("", [op])) else: raise func = getattr(mapping, op, None) if not func: result.append((op, args)) continue try: for command in func(args): result.append(command) except ValueError: if ignoreErrors: # Store op as data, such that consumers of commands do not have to # deal with incorrect number of arguments. result.append(("", args)) result.append(("", [op])) else: raise return result def generalizeProgram(program, getNumRegions=None, **kwargs): return commandsToProgram( generalizeCommands(programToCommands(program, getNumRegions), **kwargs) ) def _categorizeVector(v): """ Takes X,Y vector v and returns one of r, h, v, or 0 depending on which of X and/or Y are zero, plus tuple of nonzero ones. If both are zero, it returns a single zero still. >>> _categorizeVector((0,0)) ('0', (0,)) >>> _categorizeVector((1,0)) ('h', (1,)) >>> _categorizeVector((0,2)) ('v', (2,)) >>> _categorizeVector((1,2)) ('r', (1, 2)) """ if not v[0]: if not v[1]: return "0", v[:1] else: return "v", v[1:] else: if not v[1]: return "h", v[:1] else: return "r", v def _mergeCategories(a, b): if a == "0": return b if b == "0": return a if a == b: return a return None def _negateCategory(a): if a == "h": return "v" if a == "v": return "h" assert a in "0r" return a def _convertToBlendCmds(args): # return a list of blend commands, and # the remaining non-blended args, if any. num_args = len(args) stack_use = 0 new_args = [] i = 0 while i < num_args: arg = args[i] if not isinstance(arg, list): new_args.append(arg) i += 1 stack_use += 1 else: prev_stack_use = stack_use # The arg is a tuple of blend values. # These are each (master 0,delta 1..delta n, 1) # Combine as many successive tuples as we can, # up to the max stack limit. num_sources = len(arg) - 1 blendlist = [arg] i += 1 stack_use += 1 + num_sources # 1 for the num_blends arg while (i < num_args) and isinstance(args[i], list): blendlist.append(args[i]) i += 1 stack_use += num_sources if stack_use + num_sources > maxStackLimit: # if we are here, max stack is the CFF2 max stack. # I use the CFF2 max stack limit here rather than # the 'maxstack' chosen by the client, as the default # maxstack may have been used unintentionally. For all # the other operators, this just produces a little less # optimization, but here it puts a hard (and low) limit # on the number of source fonts that can be used. break # blendList now contains as many single blend tuples as can be # combined without exceeding the CFF2 stack limit. num_blends = len(blendlist) # append the 'num_blends' default font values blend_args = [] for arg in blendlist: blend_args.append(arg[0]) for arg in blendlist: assert arg[-1] == 1 blend_args.extend(arg[1:-1]) blend_args.append(num_blends) new_args.append(blend_args) stack_use = prev_stack_use + num_blends return new_args def _addArgs(a, b): if isinstance(b, list): if isinstance(a, list): if len(a) != len(b) or a[-1] != b[-1]: raise ValueError() return [_addArgs(va, vb) for va, vb in zip(a[:-1], b[:-1])] + [a[-1]] else: a, b = b, a if isinstance(a, list): assert a[-1] == 1 return [_addArgs(a[0], b)] + a[1:] return a + b def specializeCommands( commands, ignoreErrors=False, generalizeFirst=True, preserveTopology=False, maxstack=48, ): # We perform several rounds of optimizations. They are carefully ordered and are: # # 0. Generalize commands. # This ensures that they are in our expected simple form, with each line/curve only # having arguments for one segment, and using the generic form (rlineto/rrcurveto). # If caller is sure the input is in this form, they can turn off generalization to # save time. # # 1. Combine successive rmoveto operations. # # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants. # We specialize into some, made-up, variants as well, which simplifies following # passes. # # 3. Merge or delete redundant operations, to the extent requested. # OpenType spec declares point numbers in CFF undefined. As such, we happily # change topology. If client relies on point numbers (in GPOS anchors, or for # hinting purposes(what?)) they can turn this off. # # 4. Peephole optimization to revert back some of the h/v variants back into their # original "relative" operator (rline/rrcurveto) if that saves a byte. # # 5. Combine adjacent operators when possible, minding not to go over max stack size. # # 6. Resolve any remaining made-up operators into real operators. # # I have convinced myself that this produces optimal bytecode (except for, possibly # one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-) # A dynamic-programming approach can do the same but would be significantly slower. # # 7. For any args which are blend lists, convert them to a blend command. # 0. Generalize commands. if generalizeFirst: commands = generalizeCommands(commands, ignoreErrors=ignoreErrors) else: commands = list(commands) # Make copy since we modify in-place later. # 1. Combine successive rmoveto operations. for i in range(len(commands) - 1, 0, -1): if "rmoveto" == commands[i][0] == commands[i - 1][0]: v1, v2 = commands[i - 1][1], commands[i][1] commands[i - 1] = ("rmoveto", [v1[0] + v2[0], v1[1] + v2[1]]) del commands[i] # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants. # # We, in fact, specialize into more, made-up, variants that special-case when both # X and Y components are zero. This simplifies the following optimization passes. # This case is rare, but OCD does not let me skip it. # # After this round, we will have four variants that use the following mnemonics: # # - 'r' for relative, ie. non-zero X and non-zero Y, # - 'h' for horizontal, ie. zero X and non-zero Y, # - 'v' for vertical, ie. non-zero X and zero Y, # - '0' for zeros, ie. zero X and zero Y. # # The '0' pseudo-operators are not part of the spec, but help simplify the following # optimization rounds. We resolve them at the end. So, after this, we will have four # moveto and four lineto variants: # # - 0moveto, 0lineto # - hmoveto, hlineto # - vmoveto, vlineto # - rmoveto, rlineto # # and sixteen curveto variants. For example, a '0hcurveto' operator means a curve # dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3. # An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3. # # There are nine different variants of curves without the '0'. Those nine map exactly # to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto, # vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of # arguments and one without. Eg. an hhcurveto with an extra argument (odd number of # arguments) is in fact an rhcurveto. The operators in the spec are designed such that # all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve. # # Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest # of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be # thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always # encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute # the '0' with either 'h' or 'v' and it works. # # When we get to curve splines however, things become more complicated... XXX finish this. # There's one more complexity with splines. If one side of the spline is not horizontal or # vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode. # Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and # only hvcurveto and vhcurveto operators can encode a spline ending with 'r'. # This limits our merge opportunities later. # for i in range(len(commands)): op, args = commands[i] if op in {"rmoveto", "rlineto"}: c, args = _categorizeVector(args) commands[i] = c + op[1:], args continue if op == "rrcurveto": c1, args1 = _categorizeVector(args[:2]) c2, args2 = _categorizeVector(args[-2:]) commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2 continue # 3. Merge or delete redundant operations, to the extent requested. # # TODO # A 0moveto that comes before all other path operations can be removed. # though I find conflicting evidence for this. # # TODO # "If hstem and vstem hints are both declared at the beginning of a # CharString, and this sequence is followed directly by the hintmask or # cntrmask operators, then the vstem hint operator (or, if applicable, # the vstemhm operator) need not be included." # # "The sequence and form of a CFF2 CharString program may be represented as: # {hs* vs* cm* hm* mt subpath}? {mt subpath}*" # # https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1 # # For Type2 CharStrings the sequence is: # w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar" # Some other redundancies change topology (point numbers). if not preserveTopology: for i in range(len(commands) - 1, -1, -1): op, args = commands[i] # A 00curveto is demoted to a (specialized) lineto. if op == "00curveto": assert len(args) == 4 c, args = _categorizeVector(args[1:3]) op = c + "lineto" commands[i] = op, args # and then... # A 0lineto can be deleted. if op == "0lineto": del commands[i] continue # Merge adjacent hlineto's and vlineto's. # In CFF2 charstrings from variable fonts, each # arg item may be a list of blendable values, one from # each source font. if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]): _, other_args = commands[i - 1] assert len(args) == 1 and len(other_args) == 1 try: new_args = [_addArgs(args[0], other_args[0])] except ValueError: continue commands[i - 1] = (op, new_args) del commands[i] continue # 4. Peephole optimization to revert back some of the h/v variants back into their # original "relative" operator (rline/rrcurveto) if that saves a byte. for i in range(1, len(commands) - 1): op, args = commands[i] prv, nxt = commands[i - 1][0], commands[i + 1][0] if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto": assert len(args) == 1 args = [0, args[0]] if op[0] == "v" else [args[0], 0] commands[i] = ("rlineto", args) continue if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto": assert (op[0] == "r") ^ (op[1] == "r") if op[0] == "v": pos = 0 elif op[0] != "r": pos = 1 elif op[1] == "v": pos = 4 else: pos = 5 # Insert, while maintaining the type of args (can be tuple or list). args = args[:pos] + type(args)((0,)) + args[pos:] commands[i] = ("rrcurveto", args) continue # 5. Combine adjacent operators when possible, minding not to go over max stack size. for i in range(len(commands) - 1, 0, -1): op1, args1 = commands[i - 1] op2, args2 = commands[i] new_op = None # Merge logic... if {op1, op2} <= {"rlineto", "rrcurveto"}: if op1 == op2: new_op = op1 else: if op2 == "rrcurveto" and len(args2) == 6: new_op = "rlinecurve" elif len(args2) == 2: new_op = "rcurveline" elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}: new_op = op2 elif {op1, op2} == {"vlineto", "hlineto"}: new_op = op1 elif "curveto" == op1[2:] == op2[2:]: d0, d1 = op1[:2] d2, d3 = op2[:2] if d1 == "r" or d2 == "r" or d0 == d3 == "r": continue d = _mergeCategories(d1, d2) if d is None: continue if d0 == "r": d = _mergeCategories(d, d3) if d is None: continue new_op = "r" + d + "curveto" elif d3 == "r": d0 = _mergeCategories(d0, _negateCategory(d)) if d0 is None: continue new_op = d0 + "r" + "curveto" else: d0 = _mergeCategories(d0, d3) if d0 is None: continue new_op = d0 + d + "curveto" # Make sure the stack depth does not exceed (maxstack - 1), so # that subroutinizer can insert subroutine calls at any point. if new_op and len(args1) + len(args2) < maxstack: commands[i - 1] = (new_op, args1 + args2) del commands[i] # 6. Resolve any remaining made-up operators into real operators. for i in range(len(commands)): op, args = commands[i] if op in {"0moveto", "0lineto"}: commands[i] = "h" + op[1:], args continue if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}: op0, op1 = op[:2] if (op0 == "r") ^ (op1 == "r"): assert len(args) % 2 == 1 if op0 == "0": op0 = "h" if op1 == "0": op1 = "h" if op0 == "r": op0 = op1 if op1 == "r": op1 = _negateCategory(op0) assert {op0, op1} <= {"h", "v"}, (op0, op1) if len(args) % 2: if op0 != op1: # vhcurveto / hvcurveto if (op0 == "h") ^ (len(args) % 8 == 1): # Swap last two args order args = args[:-2] + args[-1:] + args[-2:-1] else: # hhcurveto / vvcurveto if op0 == "h": # hhcurveto # Swap first two args order args = args[1:2] + args[:1] + args[2:] commands[i] = op0 + op1 + "curveto", args continue # 7. For any series of args which are blend lists, convert the series to a single blend arg. for i in range(len(commands)): op, args = commands[i] if any(isinstance(arg, list) for arg in args): commands[i] = op, _convertToBlendCmds(args) return commands def specializeProgram(program, getNumRegions=None, **kwargs): return commandsToProgram( specializeCommands(programToCommands(program, getNumRegions), **kwargs) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: import doctest sys.exit(doctest.testmod().failed) import argparse parser = argparse.ArgumentParser( "fonttools cffLib.specialer", description="CFF CharString generalizer/specializer", ) parser.add_argument("program", metavar="command", nargs="*", help="Commands.") parser.add_argument( "--num-regions", metavar="NumRegions", nargs="*", default=None, help="Number of variable-font regions for blend opertaions.", ) options = parser.parse_args(sys.argv[1:]) getNumRegions = ( None if options.num_regions is None else lambda vsIndex: int(options.num_regions[0 if vsIndex is None else vsIndex]) ) program = stringToProgram(options.program) print("Program:") print(programToString(program)) commands = programToCommands(program, getNumRegions) print("Commands:") print(commands) program2 = commandsToProgram(commands) print("Program from commands:") print(programToString(program2)) assert program == program2 print("Generalized program:") print(programToString(generalizeProgram(program, getNumRegions))) print("Specialized program:") print(programToString(specializeProgram(program, getNumRegions))) PKaZZZ�q��fontTools/cffLib/width.py# -*- coding: utf-8 -*- """T2CharString glyph width optimizer. CFF glyphs whose width equals the CFF Private dictionary's ``defaultWidthX`` value do not need to specify their width in their charstring, saving bytes. This module determines the optimum ``defaultWidthX`` and ``nominalWidthX`` values for a font, when provided with a list of glyph widths.""" from fontTools.ttLib import TTFont from collections import defaultdict from operator import add from functools import reduce class missingdict(dict): def __init__(self, missing_func): self.missing_func = missing_func def __missing__(self, v): return self.missing_func(v) def cumSum(f, op=add, start=0, decreasing=False): keys = sorted(f.keys()) minx, maxx = keys[0], keys[-1] total = reduce(op, f.values(), start) if decreasing: missing = lambda x: start if x > maxx else total domain = range(maxx, minx - 1, -1) else: missing = lambda x: start if x < minx else total domain = range(minx, maxx + 1) out = missingdict(missing) v = start for x in domain: v = op(v, f[x]) out[x] = v return out def byteCost(widths, default, nominal): if not hasattr(widths, "items"): d = defaultdict(int) for w in widths: d[w] += 1 widths = d cost = 0 for w, freq in widths.items(): if w == default: continue diff = abs(w - nominal) if diff <= 107: cost += freq elif diff <= 1131: cost += freq * 2 else: cost += freq * 5 return cost def optimizeWidthsBruteforce(widths): """Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts.""" d = defaultdict(int) for w in widths: d[w] += 1 # Maximum number of bytes using default can possibly save maxDefaultAdvantage = 5 * max(d.values()) minw, maxw = min(widths), max(widths) domain = list(range(minw, maxw + 1)) bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain) bestCost = len(widths) * 5 + 1 for nominal in domain: if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage: continue for default in domain: cost = byteCost(widths, default, nominal) if cost < bestCost: bestCost = cost bestDefault = default bestNominal = nominal return bestDefault, bestNominal def optimizeWidths(widths): """Given a list of glyph widths, or dictionary mapping glyph width to number of glyphs having that, returns a tuple of best CFF default and nominal glyph widths. This algorithm is linear in UPEM+numGlyphs.""" if not hasattr(widths, "items"): d = defaultdict(int) for w in widths: d[w] += 1 widths = d keys = sorted(widths.keys()) minw, maxw = keys[0], keys[-1] domain = list(range(minw, maxw + 1)) # Cumulative sum/max forward/backward. cumFrqU = cumSum(widths, op=add) cumMaxU = cumSum(widths, op=max) cumFrqD = cumSum(widths, op=add, decreasing=True) cumMaxD = cumSum(widths, op=max, decreasing=True) # Cost per nominal choice, without default consideration. nomnCostU = missingdict( lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3 ) nomnCostD = missingdict( lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3 ) nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x]) # Cost-saving per nominal choice, by best default choice. dfltCostU = missingdict( lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5) ) dfltCostD = missingdict( lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5) ) dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x])) # Combined cost per nominal choice. bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x]) # Best nominal. nominal = min(domain, key=lambda x: bestCost[x]) # Work back the best default. bestC = bestCost[nominal] dfltC = nomnCost[nominal] - bestCost[nominal] ends = [] if dfltC == dfltCostU[nominal]: starts = [nominal, nominal - 108, nominal - 1132] for start in starts: while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]: start -= 1 ends.append(start) else: starts = [nominal, nominal + 108, nominal + 1132] for start in starts: while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]: start += 1 ends.append(start) default = min(ends, key=lambda default: byteCost(widths, default, nominal)) return default, nominal def main(args=None): """Calculate optimum defaultWidthX/nominalWidthX values""" import argparse parser = argparse.ArgumentParser( "fonttools cffLib.width", description=main.__doc__, ) parser.add_argument( "inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files" ) parser.add_argument( "-b", "--brute-force", dest="brute", action="store_true", help="Use brute-force approach (VERY slow)", ) args = parser.parse_args(args) for fontfile in args.inputs: font = TTFont(fontfile) hmtx = font["hmtx"] widths = [m[0] for m in hmtx.metrics.values()] if args.brute: default, nominal = optimizeWidthsBruteforce(widths) else: default, nominal = optimizeWidths(widths) print( "glyphs=%d default=%d nominal=%d byteCost=%d" % (len(widths), default, nominal, byteCost(widths, default, nominal)) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: import doctest sys.exit(doctest.testmod().failed) main() PKaZZZfontTools/colorLib/__init__.pyPKaZZZ�"��Y�YfontTools/colorLib/builder.py""" colorLib.builder: Build COLR/CPAL tables from scratch """ import collections import copy import enum from functools import partial from math import ceil, log from typing import ( Any, Dict, Generator, Iterable, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from fontTools.misc.arrayTools import intRect from fontTools.misc.fixedTools import fixedToFloat from fontTools.misc.treeTools import build_n_ary_tree from fontTools.ttLib.tables import C_O_L_R_ from fontTools.ttLib.tables import C_P_A_L_ from fontTools.ttLib.tables import _n_a_m_e from fontTools.ttLib.tables import otTables as ot from fontTools.ttLib.tables.otTables import ExtendMode, CompositeMode from .errors import ColorLibError from .geometry import round_start_circle_stable_containment from .table_builder import BuildCallback, TableBuilder # TODO move type aliases to colorLib.types? T = TypeVar("T") _Kwargs = Mapping[str, Any] _PaintInput = Union[int, _Kwargs, ot.Paint, Tuple[str, "_PaintInput"]] _PaintInputList = Sequence[_PaintInput] _ColorGlyphsDict = Dict[str, Union[_PaintInputList, _PaintInput]] _ColorGlyphsV0Dict = Dict[str, Sequence[Tuple[str, int]]] _ClipBoxInput = Union[ Tuple[int, int, int, int, int], # format 1, variable Tuple[int, int, int, int], # format 0, non-variable ot.ClipBox, ] MAX_PAINT_COLR_LAYER_COUNT = 255 _DEFAULT_ALPHA = 1.0 _MAX_REUSE_LEN = 32 def _beforeBuildPaintRadialGradient(paint, source): x0 = source["x0"] y0 = source["y0"] r0 = source["r0"] x1 = source["x1"] y1 = source["y1"] r1 = source["r1"] # TODO apparently no builder_test confirms this works (?) # avoid abrupt change after rounding when c0 is near c1's perimeter c = round_start_circle_stable_containment((x0, y0), r0, (x1, y1), r1) x0, y0 = c.centre r0 = c.radius # update source to ensure paint is built with corrected values source["x0"] = x0 source["y0"] = y0 source["r0"] = r0 source["x1"] = x1 source["y1"] = y1 source["r1"] = r1 return paint, source def _defaultColorStop(): colorStop = ot.ColorStop() colorStop.Alpha = _DEFAULT_ALPHA return colorStop def _defaultVarColorStop(): colorStop = ot.VarColorStop() colorStop.Alpha = _DEFAULT_ALPHA return colorStop def _defaultColorLine(): colorLine = ot.ColorLine() colorLine.Extend = ExtendMode.PAD return colorLine def _defaultVarColorLine(): colorLine = ot.VarColorLine() colorLine.Extend = ExtendMode.PAD return colorLine def _defaultPaintSolid(): paint = ot.Paint() paint.Alpha = _DEFAULT_ALPHA return paint def _buildPaintCallbacks(): return { ( BuildCallback.BEFORE_BUILD, ot.Paint, ot.PaintFormat.PaintRadialGradient, ): _beforeBuildPaintRadialGradient, ( BuildCallback.BEFORE_BUILD, ot.Paint, ot.PaintFormat.PaintVarRadialGradient, ): _beforeBuildPaintRadialGradient, (BuildCallback.CREATE_DEFAULT, ot.ColorStop): _defaultColorStop, (BuildCallback.CREATE_DEFAULT, ot.VarColorStop): _defaultVarColorStop, (BuildCallback.CREATE_DEFAULT, ot.ColorLine): _defaultColorLine, (BuildCallback.CREATE_DEFAULT, ot.VarColorLine): _defaultVarColorLine, ( BuildCallback.CREATE_DEFAULT, ot.Paint, ot.PaintFormat.PaintSolid, ): _defaultPaintSolid, ( BuildCallback.CREATE_DEFAULT, ot.Paint, ot.PaintFormat.PaintVarSolid, ): _defaultPaintSolid, } def populateCOLRv0( table: ot.COLR, colorGlyphsV0: _ColorGlyphsV0Dict, glyphMap: Optional[Mapping[str, int]] = None, ): """Build v0 color layers and add to existing COLR table. Args: table: a raw ``otTables.COLR()`` object (not ttLib's ``table_C_O_L_R_``). colorGlyphsV0: map of base glyph names to lists of (layer glyph names, color palette index) tuples. Can be empty. glyphMap: a map from glyph names to glyph indices, as returned from ``TTFont.getReverseGlyphMap()``, to optionally sort base records by GID. """ if glyphMap is not None: colorGlyphItems = sorted( colorGlyphsV0.items(), key=lambda item: glyphMap[item[0]] ) else: colorGlyphItems = colorGlyphsV0.items() baseGlyphRecords = [] layerRecords = [] for baseGlyph, layers in colorGlyphItems: baseRec = ot.BaseGlyphRecord() baseRec.BaseGlyph = baseGlyph baseRec.FirstLayerIndex = len(layerRecords) baseRec.NumLayers = len(layers) baseGlyphRecords.append(baseRec) for layerGlyph, paletteIndex in layers: layerRec = ot.LayerRecord() layerRec.LayerGlyph = layerGlyph layerRec.PaletteIndex = paletteIndex layerRecords.append(layerRec) table.BaseGlyphRecordArray = table.LayerRecordArray = None if baseGlyphRecords: table.BaseGlyphRecordArray = ot.BaseGlyphRecordArray() table.BaseGlyphRecordArray.BaseGlyphRecord = baseGlyphRecords if layerRecords: table.LayerRecordArray = ot.LayerRecordArray() table.LayerRecordArray.LayerRecord = layerRecords table.BaseGlyphRecordCount = len(baseGlyphRecords) table.LayerRecordCount = len(layerRecords) def buildCOLR( colorGlyphs: _ColorGlyphsDict, version: Optional[int] = None, *, glyphMap: Optional[Mapping[str, int]] = None, varStore: Optional[ot.VarStore] = None, varIndexMap: Optional[ot.DeltaSetIndexMap] = None, clipBoxes: Optional[Dict[str, _ClipBoxInput]] = None, allowLayerReuse: bool = True, ) -> C_O_L_R_.table_C_O_L_R_: """Build COLR table from color layers mapping. Args: colorGlyphs: map of base glyph name to, either list of (layer glyph name, color palette index) tuples for COLRv0; or a single ``Paint`` (dict) or list of ``Paint`` for COLRv1. version: the version of COLR table. If None, the version is determined by the presence of COLRv1 paints or variation data (varStore), which require version 1; otherwise, if all base glyphs use only simple color layers, version 0 is used. glyphMap: a map from glyph names to glyph indices, as returned from TTFont.getReverseGlyphMap(), to optionally sort base records by GID. varStore: Optional ItemVarationStore for deltas associated with v1 layer. varIndexMap: Optional DeltaSetIndexMap for deltas associated with v1 layer. clipBoxes: Optional map of base glyph name to clip box 4- or 5-tuples: (xMin, yMin, xMax, yMax) or (xMin, yMin, xMax, yMax, varIndexBase). Returns: A new COLR table. """ self = C_O_L_R_.table_C_O_L_R_() if varStore is not None and version == 0: raise ValueError("Can't add VarStore to COLRv0") if version in (None, 0) and not varStore: # split color glyphs into v0 and v1 and encode separately colorGlyphsV0, colorGlyphsV1 = _split_color_glyphs_by_version(colorGlyphs) if version == 0 and colorGlyphsV1: raise ValueError("Can't encode COLRv1 glyphs in COLRv0") else: # unless explicitly requested for v1 or have variations, in which case # we encode all color glyph as v1 colorGlyphsV0, colorGlyphsV1 = {}, colorGlyphs colr = ot.COLR() populateCOLRv0(colr, colorGlyphsV0, glyphMap) colr.LayerList, colr.BaseGlyphList = buildColrV1( colorGlyphsV1, glyphMap, allowLayerReuse=allowLayerReuse, ) if version is None: version = 1 if (varStore or colorGlyphsV1) else 0 elif version not in (0, 1): raise NotImplementedError(version) self.version = colr.Version = version if version == 0: self.ColorLayers = self._decompileColorLayersV0(colr) else: colr.ClipList = buildClipList(clipBoxes) if clipBoxes else None colr.VarIndexMap = varIndexMap colr.VarStore = varStore self.table = colr return self def buildClipList(clipBoxes: Dict[str, _ClipBoxInput]) -> ot.ClipList: clipList = ot.ClipList() clipList.Format = 1 clipList.clips = {name: buildClipBox(box) for name, box in clipBoxes.items()} return clipList def buildClipBox(clipBox: _ClipBoxInput) -> ot.ClipBox: if isinstance(clipBox, ot.ClipBox): return clipBox n = len(clipBox) clip = ot.ClipBox() if n not in (4, 5): raise ValueError(f"Invalid ClipBox: expected 4 or 5 values, found {n}") clip.xMin, clip.yMin, clip.xMax, clip.yMax = intRect(clipBox[:4]) clip.Format = int(n == 5) + 1 if n == 5: clip.VarIndexBase = int(clipBox[4]) return clip class ColorPaletteType(enum.IntFlag): USABLE_WITH_LIGHT_BACKGROUND = 0x0001 USABLE_WITH_DARK_BACKGROUND = 0x0002 @classmethod def _missing_(cls, value): # enforce reserved bits if isinstance(value, int) and (value < 0 or value & 0xFFFC != 0): raise ValueError(f"{value} is not a valid {cls.__name__}") return super()._missing_(value) # None, 'abc' or {'en': 'abc', 'de': 'xyz'} _OptionalLocalizedString = Union[None, str, Dict[str, str]] def buildPaletteLabels( labels: Iterable[_OptionalLocalizedString], nameTable: _n_a_m_e.table__n_a_m_e ) -> List[Optional[int]]: return [ ( nameTable.addMultilingualName(l, mac=False) if isinstance(l, dict) else ( C_P_A_L_.table_C_P_A_L_.NO_NAME_ID if l is None else nameTable.addMultilingualName({"en": l}, mac=False) ) ) for l in labels ] def buildCPAL( palettes: Sequence[Sequence[Tuple[float, float, float, float]]], paletteTypes: Optional[Sequence[ColorPaletteType]] = None, paletteLabels: Optional[Sequence[_OptionalLocalizedString]] = None, paletteEntryLabels: Optional[Sequence[_OptionalLocalizedString]] = None, nameTable: Optional[_n_a_m_e.table__n_a_m_e] = None, ) -> C_P_A_L_.table_C_P_A_L_: """Build CPAL table from list of color palettes. Args: palettes: list of lists of colors encoded as tuples of (R, G, B, A) floats in the range [0..1]. paletteTypes: optional list of ColorPaletteType, one for each palette. paletteLabels: optional list of palette labels. Each lable can be either: None (no label), a string (for for default English labels), or a localized string (as a dict keyed with BCP47 language codes). paletteEntryLabels: optional list of palette entry labels, one for each palette entry (see paletteLabels). nameTable: optional name table where to store palette and palette entry labels. Required if either paletteLabels or paletteEntryLabels is set. Return: A new CPAL v0 or v1 table, if custom palette types or labels are specified. """ if len({len(p) for p in palettes}) != 1: raise ColorLibError("color palettes have different lengths") if (paletteLabels or paletteEntryLabels) and not nameTable: raise TypeError( "nameTable is required if palette or palette entries have labels" ) cpal = C_P_A_L_.table_C_P_A_L_() cpal.numPaletteEntries = len(palettes[0]) cpal.palettes = [] for i, palette in enumerate(palettes): colors = [] for j, color in enumerate(palette): if not isinstance(color, tuple) or len(color) != 4: raise ColorLibError( f"In palette[{i}][{j}]: expected (R, G, B, A) tuple, got {color!r}" ) if any(v > 1 or v < 0 for v in color): raise ColorLibError( f"palette[{i}][{j}] has invalid out-of-range [0..1] color: {color!r}" ) # input colors are RGBA, CPAL encodes them as BGRA red, green, blue, alpha = color colors.append( C_P_A_L_.Color(*(round(v * 255) for v in (blue, green, red, alpha))) ) cpal.palettes.append(colors) if any(v is not None for v in (paletteTypes, paletteLabels, paletteEntryLabels)): cpal.version = 1 if paletteTypes is not None: if len(paletteTypes) != len(palettes): raise ColorLibError( f"Expected {len(palettes)} paletteTypes, got {len(paletteTypes)}" ) cpal.paletteTypes = [ColorPaletteType(t).value for t in paletteTypes] else: cpal.paletteTypes = [C_P_A_L_.table_C_P_A_L_.DEFAULT_PALETTE_TYPE] * len( palettes ) if paletteLabels is not None: if len(paletteLabels) != len(palettes): raise ColorLibError( f"Expected {len(palettes)} paletteLabels, got {len(paletteLabels)}" ) cpal.paletteLabels = buildPaletteLabels(paletteLabels, nameTable) else: cpal.paletteLabels = [C_P_A_L_.table_C_P_A_L_.NO_NAME_ID] * len(palettes) if paletteEntryLabels is not None: if len(paletteEntryLabels) != cpal.numPaletteEntries: raise ColorLibError( f"Expected {cpal.numPaletteEntries} paletteEntryLabels, " f"got {len(paletteEntryLabels)}" ) cpal.paletteEntryLabels = buildPaletteLabels(paletteEntryLabels, nameTable) else: cpal.paletteEntryLabels = [ C_P_A_L_.table_C_P_A_L_.NO_NAME_ID ] * cpal.numPaletteEntries else: cpal.version = 0 return cpal # COLR v1 tables # See draft proposal at: https://github.com/googlefonts/colr-gradients-spec def _is_colrv0_layer(layer: Any) -> bool: # Consider as COLRv0 layer any sequence of length 2 (be it tuple or list) in which # the first element is a str (the layerGlyph) and the second element is an int # (CPAL paletteIndex). # https://github.com/googlefonts/ufo2ft/issues/426 try: layerGlyph, paletteIndex = layer except (TypeError, ValueError): return False else: return isinstance(layerGlyph, str) and isinstance(paletteIndex, int) def _split_color_glyphs_by_version( colorGlyphs: _ColorGlyphsDict, ) -> Tuple[_ColorGlyphsV0Dict, _ColorGlyphsDict]: colorGlyphsV0 = {} colorGlyphsV1 = {} for baseGlyph, layers in colorGlyphs.items(): if all(_is_colrv0_layer(l) for l in layers): colorGlyphsV0[baseGlyph] = layers else: colorGlyphsV1[baseGlyph] = layers # sanity check assert set(colorGlyphs) == (set(colorGlyphsV0) | set(colorGlyphsV1)) return colorGlyphsV0, colorGlyphsV1 def _reuse_ranges(num_layers: int) -> Generator[Tuple[int, int], None, None]: # TODO feels like something itertools might have already for lbound in range(num_layers): # Reuse of very large #s of layers is relatively unlikely # +2: we want sequences of at least 2 # otData handles single-record duplication for ubound in range( lbound + 2, min(num_layers + 1, lbound + 2 + _MAX_REUSE_LEN) ): yield (lbound, ubound) class LayerReuseCache: reusePool: Mapping[Tuple[Any, ...], int] tuples: Mapping[int, Tuple[Any, ...]] keepAlive: List[ot.Paint] # we need id to remain valid def __init__(self): self.reusePool = {} self.tuples = {} self.keepAlive = [] def _paint_tuple(self, paint: ot.Paint): # start simple, who even cares about cyclic graphs or interesting field types def _tuple_safe(value): if isinstance(value, enum.Enum): return value elif hasattr(value, "__dict__"): return tuple( (k, _tuple_safe(v)) for k, v in sorted(value.__dict__.items()) ) elif isinstance(value, collections.abc.MutableSequence): return tuple(_tuple_safe(e) for e in value) return value # Cache the tuples for individual Paint instead of the whole sequence # because the seq could be a transient slice result = self.tuples.get(id(paint), None) if result is None: result = _tuple_safe(paint) self.tuples[id(paint)] = result self.keepAlive.append(paint) return result def _as_tuple(self, paints: Sequence[ot.Paint]) -> Tuple[Any, ...]: return tuple(self._paint_tuple(p) for p in paints) def try_reuse(self, layers: List[ot.Paint]) -> List[ot.Paint]: found_reuse = True while found_reuse: found_reuse = False ranges = sorted( _reuse_ranges(len(layers)), key=lambda t: (t[1] - t[0], t[1], t[0]), reverse=True, ) for lbound, ubound in ranges: reuse_lbound = self.reusePool.get( self._as_tuple(layers[lbound:ubound]), -1 ) if reuse_lbound == -1: continue new_slice = ot.Paint() new_slice.Format = int(ot.PaintFormat.PaintColrLayers) new_slice.NumLayers = ubound - lbound new_slice.FirstLayerIndex = reuse_lbound layers = layers[:lbound] + [new_slice] + layers[ubound:] found_reuse = True break return layers def add(self, layers: List[ot.Paint], first_layer_index: int): for lbound, ubound in _reuse_ranges(len(layers)): self.reusePool[self._as_tuple(layers[lbound:ubound])] = ( lbound + first_layer_index ) class LayerListBuilder: layers: List[ot.Paint] cache: LayerReuseCache allowLayerReuse: bool def __init__(self, *, allowLayerReuse=True): self.layers = [] if allowLayerReuse: self.cache = LayerReuseCache() else: self.cache = None # We need to intercept construction of PaintColrLayers callbacks = _buildPaintCallbacks() callbacks[ ( BuildCallback.BEFORE_BUILD, ot.Paint, ot.PaintFormat.PaintColrLayers, ) ] = self._beforeBuildPaintColrLayers self.tableBuilder = TableBuilder(callbacks) # COLR layers is unusual in that it modifies shared state # so we need a callback into an object def _beforeBuildPaintColrLayers(self, dest, source): # Sketchy gymnastics: a sequence input will have dropped it's layers # into NumLayers; get it back if isinstance(source.get("NumLayers", None), collections.abc.Sequence): layers = source["NumLayers"] else: layers = source["Layers"] # Convert maps seqs or whatever into typed objects layers = [self.buildPaint(l) for l in layers] # No reason to have a colr layers with just one entry if len(layers) == 1: return layers[0], {} if self.cache is not None: # Look for reuse, with preference to longer sequences # This may make the layer list smaller layers = self.cache.try_reuse(layers) # The layer list is now final; if it's too big we need to tree it is_tree = len(layers) > MAX_PAINT_COLR_LAYER_COUNT layers = build_n_ary_tree(layers, n=MAX_PAINT_COLR_LAYER_COUNT) # We now have a tree of sequences with Paint leaves. # Convert the sequences into PaintColrLayers. def listToColrLayers(layer): if isinstance(layer, collections.abc.Sequence): return self.buildPaint( { "Format": ot.PaintFormat.PaintColrLayers, "Layers": [listToColrLayers(l) for l in layer], } ) return layer layers = [listToColrLayers(l) for l in layers] # No reason to have a colr layers with just one entry if len(layers) == 1: return layers[0], {} paint = ot.Paint() paint.Format = int(ot.PaintFormat.PaintColrLayers) paint.NumLayers = len(layers) paint.FirstLayerIndex = len(self.layers) self.layers.extend(layers) # Register our parts for reuse provided we aren't a tree # If we are a tree the leaves registered for reuse and that will suffice if self.cache is not None and not is_tree: self.cache.add(layers, paint.FirstLayerIndex) # we've fully built dest; empty source prevents generalized build from kicking in return paint, {} def buildPaint(self, paint: _PaintInput) -> ot.Paint: return self.tableBuilder.build(ot.Paint, paint) def build(self) -> Optional[ot.LayerList]: if not self.layers: return None layers = ot.LayerList() layers.LayerCount = len(self.layers) layers.Paint = self.layers return layers def buildBaseGlyphPaintRecord( baseGlyph: str, layerBuilder: LayerListBuilder, paint: _PaintInput ) -> ot.BaseGlyphList: self = ot.BaseGlyphPaintRecord() self.BaseGlyph = baseGlyph self.Paint = layerBuilder.buildPaint(paint) return self def _format_glyph_errors(errors: Mapping[str, Exception]) -> str: lines = [] for baseGlyph, error in sorted(errors.items()): lines.append(f" {baseGlyph} => {type(error).__name__}: {error}") return "\n".join(lines) def buildColrV1( colorGlyphs: _ColorGlyphsDict, glyphMap: Optional[Mapping[str, int]] = None, *, allowLayerReuse: bool = True, ) -> Tuple[Optional[ot.LayerList], ot.BaseGlyphList]: if glyphMap is not None: colorGlyphItems = sorted( colorGlyphs.items(), key=lambda item: glyphMap[item[0]] ) else: colorGlyphItems = colorGlyphs.items() errors = {} baseGlyphs = [] layerBuilder = LayerListBuilder(allowLayerReuse=allowLayerReuse) for baseGlyph, paint in colorGlyphItems: try: baseGlyphs.append(buildBaseGlyphPaintRecord(baseGlyph, layerBuilder, paint)) except (ColorLibError, OverflowError, ValueError, TypeError) as e: errors[baseGlyph] = e if errors: failed_glyphs = _format_glyph_errors(errors) exc = ColorLibError(f"Failed to build BaseGlyphList:\n{failed_glyphs}") exc.errors = errors raise exc from next(iter(errors.values())) layers = layerBuilder.build() glyphs = ot.BaseGlyphList() glyphs.BaseGlyphCount = len(baseGlyphs) glyphs.BaseGlyphPaintRecord = baseGlyphs return (layers, glyphs) PKaZZZ]��))fontTools/colorLib/errors.pyclass ColorLibError(Exception): pass PKaZZZ+t���fontTools/colorLib/geometry.py"""Helpers for manipulating 2D points and vectors in COLR table.""" from math import copysign, cos, hypot, isclose, pi from fontTools.misc.roundTools import otRound def _vector_between(origin, target): return (target[0] - origin[0], target[1] - origin[1]) def _round_point(pt): return (otRound(pt[0]), otRound(pt[1])) def _unit_vector(vec): length = hypot(*vec) if length == 0: return None return (vec[0] / length, vec[1] / length) _CIRCLE_INSIDE_TOLERANCE = 1e-4 # The unit vector's X and Y components are respectively # U = (cos(α), sin(α)) # where α is the angle between the unit vector and the positive x axis. _UNIT_VECTOR_THRESHOLD = cos(3 / 8 * pi) # == sin(1/8 * pi) == 0.38268343236508984 def _rounding_offset(direction): # Return 2-tuple of -/+ 1.0 or 0.0 approximately based on the direction vector. # We divide the unit circle in 8 equal slices oriented towards the cardinal # (N, E, S, W) and intermediate (NE, SE, SW, NW) directions. To each slice we # map one of the possible cases: -1, 0, +1 for either X and Y coordinate. # E.g. Return (+1.0, -1.0) if unit vector is oriented towards SE, or # (-1.0, 0.0) if it's pointing West, etc. uv = _unit_vector(direction) if not uv: return (0, 0) result = [] for uv_component in uv: if -_UNIT_VECTOR_THRESHOLD <= uv_component < _UNIT_VECTOR_THRESHOLD: # unit vector component near 0: direction almost orthogonal to the # direction of the current axis, thus keep coordinate unchanged result.append(0) else: # nudge coord by +/- 1.0 in direction of unit vector result.append(copysign(1.0, uv_component)) return tuple(result) class Circle: def __init__(self, centre, radius): self.centre = centre self.radius = radius def __repr__(self): return f"Circle(centre={self.centre}, radius={self.radius})" def round(self): return Circle(_round_point(self.centre), otRound(self.radius)) def inside(self, outer_circle, tolerance=_CIRCLE_INSIDE_TOLERANCE): dist = self.radius + hypot(*_vector_between(self.centre, outer_circle.centre)) return ( isclose(outer_circle.radius, dist, rel_tol=_CIRCLE_INSIDE_TOLERANCE) or outer_circle.radius > dist ) def concentric(self, other): return self.centre == other.centre def move(self, dx, dy): self.centre = (self.centre[0] + dx, self.centre[1] + dy) def round_start_circle_stable_containment(c0, r0, c1, r1): """Round start circle so that it stays inside/outside end circle after rounding. The rounding of circle coordinates to integers may cause an abrupt change if the start circle c0 is so close to the end circle c1's perimiter that it ends up falling outside (or inside) as a result of the rounding. To keep the gradient unchanged, we nudge it in the right direction. See: https://github.com/googlefonts/colr-gradients-spec/issues/204 https://github.com/googlefonts/picosvg/issues/158 """ start, end = Circle(c0, r0), Circle(c1, r1) inside_before_round = start.inside(end) round_start = start.round() round_end = end.round() inside_after_round = round_start.inside(round_end) if inside_before_round == inside_after_round: return round_start elif inside_after_round: # start was outside before rounding: we need to push start away from end direction = _vector_between(round_end.centre, round_start.centre) radius_delta = +1.0 else: # start was inside before rounding: we need to push start towards end direction = _vector_between(round_start.centre, round_end.centre) radius_delta = -1.0 dx, dy = _rounding_offset(direction) # At most 2 iterations ought to be enough to converge. Before the loop, we # know the start circle didn't keep containment after normal rounding; thus # we continue adjusting by -/+ 1.0 until containment is restored. # Normal rounding can at most move each coordinates -/+0.5; in the worst case # both the start and end circle's centres and radii will be rounded in opposite # directions, e.g. when they move along a 45 degree diagonal: # c0 = (1.5, 1.5) ===> (2.0, 2.0) # r0 = 0.5 ===> 1.0 # c1 = (0.499, 0.499) ===> (0.0, 0.0) # r1 = 2.499 ===> 2.0 # In this example, the relative distance between the circles, calculated # as r1 - (r0 + distance(c0, c1)) is initially 0.57437 (c0 is inside c1), and # -1.82842 after rounding (c0 is now outside c1). Nudging c0 by -1.0 on both # x and y axes moves it towards c1 by hypot(-1.0, -1.0) = 1.41421. Two of these # moves cover twice that distance, which is enough to restore containment. max_attempts = 2 for _ in range(max_attempts): if round_start.concentric(round_end): # can't move c0 towards c1 (they are the same), so we change the radius round_start.radius += radius_delta assert round_start.radius >= 0 else: round_start.move(dx, dy) if inside_before_round == round_start.inside(round_end): break else: # likely a bug raise AssertionError( f"Rounding circle {start} " f"{'inside' if inside_before_round else 'outside'} " f"{end} failed after {max_attempts} attempts!" ) return round_start PKaZZZ}`/�--#fontTools/colorLib/table_builder.py""" colorLib.table_builder: Generic helper for filling in BaseTable derivatives from tuples and maps and such. """ import collections import enum from fontTools.ttLib.tables.otBase import ( BaseTable, FormatSwitchingBaseTable, UInt8FormatSwitchingBaseTable, ) from fontTools.ttLib.tables.otConverters import ( ComputedInt, SimpleValue, Struct, Short, UInt8, UShort, IntValue, FloatValue, OptionalValue, ) from fontTools.misc.roundTools import otRound class BuildCallback(enum.Enum): """Keyed on (BEFORE_BUILD, class[, Format if available]). Receives (dest, source). Should return (dest, source), which can be new objects. """ BEFORE_BUILD = enum.auto() """Keyed on (AFTER_BUILD, class[, Format if available]). Receives (dest). Should return dest, which can be a new object. """ AFTER_BUILD = enum.auto() """Keyed on (CREATE_DEFAULT, class[, Format if available]). Receives no arguments. Should return a new instance of class. """ CREATE_DEFAULT = enum.auto() def _assignable(convertersByName): return {k: v for k, v in convertersByName.items() if not isinstance(v, ComputedInt)} def _isNonStrSequence(value): return isinstance(value, collections.abc.Sequence) and not isinstance(value, str) def _split_format(cls, source): if _isNonStrSequence(source): assert len(source) > 0, f"{cls} needs at least format from {source}" fmt, remainder = source[0], source[1:] elif isinstance(source, collections.abc.Mapping): assert "Format" in source, f"{cls} needs at least Format from {source}" remainder = source.copy() fmt = remainder.pop("Format") else: raise ValueError(f"Not sure how to populate {cls} from {source}") assert isinstance( fmt, collections.abc.Hashable ), f"{cls} Format is not hashable: {fmt!r}" assert fmt in cls.convertersByName, f"{cls} invalid Format: {fmt!r}" return fmt, remainder class TableBuilder: """ Helps to populate things derived from BaseTable from maps, tuples, etc. A table of lifecycle callbacks may be provided to add logic beyond what is possible based on otData info for the target class. See BuildCallbacks. """ def __init__(self, callbackTable=None): if callbackTable is None: callbackTable = {} self._callbackTable = callbackTable def _convert(self, dest, field, converter, value): enumClass = getattr(converter, "enumClass", None) if enumClass: if isinstance(value, enumClass): pass elif isinstance(value, str): try: value = getattr(enumClass, value.upper()) except AttributeError: raise ValueError(f"{value} is not a valid {enumClass}") else: value = enumClass(value) elif isinstance(converter, IntValue): value = otRound(value) elif isinstance(converter, FloatValue): value = float(value) elif isinstance(converter, Struct): if converter.repeat: if _isNonStrSequence(value): value = [self.build(converter.tableClass, v) for v in value] else: value = [self.build(converter.tableClass, value)] setattr(dest, converter.repeat, len(value)) else: value = self.build(converter.tableClass, value) elif callable(converter): value = converter(value) setattr(dest, field, value) def build(self, cls, source): assert issubclass(cls, BaseTable) if isinstance(source, cls): return source callbackKey = (cls,) fmt = None if issubclass(cls, FormatSwitchingBaseTable): fmt, source = _split_format(cls, source) callbackKey = (cls, fmt) dest = self._callbackTable.get( (BuildCallback.CREATE_DEFAULT,) + callbackKey, lambda: cls() )() assert isinstance(dest, cls) convByName = _assignable(cls.convertersByName) skippedFields = set() # For format switchers we need to resolve converters based on format if issubclass(cls, FormatSwitchingBaseTable): dest.Format = fmt convByName = _assignable(convByName[dest.Format]) skippedFields.add("Format") # Convert sequence => mapping so before thunk only has to handle one format if _isNonStrSequence(source): # Sequence (typically list or tuple) assumed to match fields in declaration order assert len(source) <= len( convByName ), f"Sequence of {len(source)} too long for {cls}; expected <= {len(convByName)} values" source = dict(zip(convByName.keys(), source)) dest, source = self._callbackTable.get( (BuildCallback.BEFORE_BUILD,) + callbackKey, lambda d, s: (d, s) )(dest, source) if isinstance(source, collections.abc.Mapping): for field, value in source.items(): if field in skippedFields: continue converter = convByName.get(field, None) if not converter: raise ValueError( f"Unrecognized field {field} for {cls}; expected one of {sorted(convByName.keys())}" ) self._convert(dest, field, converter, value) else: # let's try as a 1-tuple dest = self.build(cls, (source,)) for field, conv in convByName.items(): if not hasattr(dest, field) and isinstance(conv, OptionalValue): setattr(dest, field, conv.DEFAULT) dest = self._callbackTable.get( (BuildCallback.AFTER_BUILD,) + callbackKey, lambda d: d )(dest) return dest class TableUnbuilder: def __init__(self, callbackTable=None): if callbackTable is None: callbackTable = {} self._callbackTable = callbackTable def unbuild(self, table): assert isinstance(table, BaseTable) source = {} callbackKey = (type(table),) if isinstance(table, FormatSwitchingBaseTable): source["Format"] = int(table.Format) callbackKey += (table.Format,) for converter in table.getConverters(): if isinstance(converter, ComputedInt): continue value = getattr(table, converter.name) enumClass = getattr(converter, "enumClass", None) if enumClass: source[converter.name] = value.name.lower() elif isinstance(converter, Struct): if converter.repeat: source[converter.name] = [self.unbuild(v) for v in value] else: source[converter.name] = self.unbuild(value) elif isinstance(converter, SimpleValue): # "simple" values (e.g. int, float, str) need no further un-building source[converter.name] = value else: raise NotImplementedError( "Don't know how unbuild {value!r} with {converter!r}" ) source = self._callbackTable.get(callbackKey, lambda s: s)(source) return source PKaZZZV��^^fontTools/colorLib/unbuilder.pyfrom fontTools.ttLib.tables import otTables as ot from .table_builder import TableUnbuilder def unbuildColrV1(layerList, baseGlyphList): layers = [] if layerList: layers = layerList.Paint unbuilder = LayerListUnbuilder(layers) return { rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint) for rec in baseGlyphList.BaseGlyphPaintRecord } def _flatten_layers(lst): for paint in lst: if paint["Format"] == ot.PaintFormat.PaintColrLayers: yield from _flatten_layers(paint["Layers"]) else: yield paint class LayerListUnbuilder: def __init__(self, layers): self.layers = layers callbacks = { ( ot.Paint, ot.PaintFormat.PaintColrLayers, ): self._unbuildPaintColrLayers, } self.tableUnbuilder = TableUnbuilder(callbacks) def unbuildPaint(self, paint): assert isinstance(paint, ot.Paint) return self.tableUnbuilder.unbuild(paint) def _unbuildPaintColrLayers(self, source): assert source["Format"] == ot.PaintFormat.PaintColrLayers layers = list( _flatten_layers( [ self.unbuildPaint(childPaint) for childPaint in self.layers[ source["FirstLayerIndex"] : source["FirstLayerIndex"] + source["NumLayers"] ] ] ) ) if len(layers) == 1: return layers[0] return {"Format": source["Format"], "Layers": layers} if __name__ == "__main__": from pprint import pprint import sys from fontTools.ttLib import TTFont try: fontfile = sys.argv[1] except IndexError: sys.exit("usage: fonttools colorLib.unbuilder FONTFILE") font = TTFont(fontfile) colr = font["COLR"] if colr.version < 1: sys.exit(f"error: No COLR table version=1 found in {fontfile}") colorGlyphs = unbuildColrV1( colr.table.LayerList, colr.table.BaseGlyphList, ) pprint(colorGlyphs) PKaZZZS�6�S S fontTools/config/__init__.py""" Define all configuration options that can affect the working of fontTools modules. E.g. optimization levels of varLib IUP, otlLib GPOS compression level, etc. If this file gets too big, split it into smaller files per-module. An instance of the Config class can be attached to a TTFont object, so that the various modules can access their configuration options from it. """ from textwrap import dedent from fontTools.misc.configTools import * class Config(AbstractConfig): options = Options() OPTIONS = Config.options Config.register_option( name="fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL", help=dedent( """\ GPOS Lookup type 2 (PairPos) compression level: 0 = do not attempt to compact PairPos lookups; 1 to 8 = create at most 1 to 8 new subtables for each existing subtable, provided that it would yield a 50%% file size saving; 9 = create as many new subtables as needed to yield a file size saving. Default: 0. This compaction aims to save file size, by splitting large class kerning subtables (Format 2) that contain many zero values into smaller and denser subtables. It's a trade-off between the overhead of several subtables versus the sparseness of one big subtable. See the pull request: https://github.com/fonttools/fonttools/pull/2326 """ ), default=0, parse=int, validate=lambda v: v in range(10), ) Config.register_option( name="fontTools.ttLib.tables.otBase:USE_HARFBUZZ_REPACKER", help=dedent( """\ FontTools tries to use the HarfBuzz Repacker to serialize GPOS/GSUB tables if the uharfbuzz python bindings are importable, otherwise falls back to its slower, less efficient serializer. Set to False to always use the latter. Set to True to explicitly request the HarfBuzz Repacker (will raise an error if uharfbuzz cannot be imported). """ ), default=None, parse=Option.parse_optional_bool, validate=Option.validate_optional_bool, ) Config.register_option( name="fontTools.otlLib.builder:WRITE_GPOS7", help=dedent( """\ macOS before 13.2 didn’t support GPOS LookupType 7 (non-chaining ContextPos lookups), so FontTools.otlLib.builder disables a file size optimisation that would use LookupType 7 instead of 8 when there is no chaining (no prefix or suffix). Set to True to enable the optimization. """ ), default=False, parse=Option.parse_optional_bool, validate=Option.validate_optional_bool, ) PKaZZZ���7jjfontTools/cu2qu/__init__.py# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .cu2qu import * PKaZZZ�|'KSSfontTools/cu2qu/__main__.pyimport sys from .cli import main if __name__ == "__main__": sys.exit(main()) PKaZZZ��d�EEfontTools/cu2qu/benchmark.py"""Benchmark the cu2qu algorithm performance.""" from .cu2qu import * import random import timeit MAX_ERR = 0.05 def generate_curve(): return [ tuple(float(random.randint(0, 2048)) for coord in range(2)) for point in range(4) ] def setup_curve_to_quadratic(): return generate_curve(), MAX_ERR def setup_curves_to_quadratic(): num_curves = 3 return ([generate_curve() for curve in range(num_curves)], [MAX_ERR] * num_curves) def run_benchmark(module, function, setup_suffix="", repeat=5, number=1000): setup_func = "setup_" + function if setup_suffix: print("%s with %s:" % (function, setup_suffix), end="") setup_func += "_" + setup_suffix else: print("%s:" % function, end="") def wrapper(function, setup_func): function = globals()[function] setup_func = globals()[setup_func] def wrapped(): return function(*setup_func()) return wrapped results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number) print("\t%5.1fus" % (min(results) * 1000000.0 / number)) def main(): """Benchmark the cu2qu algorithm performance.""" run_benchmark("cu2qu", "curve_to_quadratic") run_benchmark("cu2qu", "curves_to_quadratic") if __name__ == "__main__": random.seed(1) main() PKaZZZ!�7Ի�fontTools/cu2qu/cli.pyimport os import argparse import logging import shutil import multiprocessing as mp from contextlib import closing from functools import partial import fontTools from .ufo import font_to_quadratic, fonts_to_quadratic ufo_module = None try: import ufoLib2 as ufo_module except ImportError: try: import defcon as ufo_module except ImportError as e: pass logger = logging.getLogger("fontTools.cu2qu") def _cpu_count(): try: return mp.cpu_count() except NotImplementedError: # pragma: no cover return 1 def open_ufo(path): if hasattr(ufo_module.Font, "open"): # ufoLib2 return ufo_module.Font.open(path) return ufo_module.Font(path) # defcon def _font_to_quadratic(input_path, output_path=None, **kwargs): ufo = open_ufo(input_path) logger.info("Converting curves for %s", input_path) if font_to_quadratic(ufo, **kwargs): logger.info("Saving %s", output_path) if output_path: ufo.save(output_path) else: ufo.save() # save in-place elif output_path: _copytree(input_path, output_path) def _samepath(path1, path2): # TODO on python3+, there's os.path.samefile path1 = os.path.normcase(os.path.abspath(os.path.realpath(path1))) path2 = os.path.normcase(os.path.abspath(os.path.realpath(path2))) return path1 == path2 def _copytree(input_path, output_path): if _samepath(input_path, output_path): logger.debug("input and output paths are the same file; skipped copy") return if os.path.exists(output_path): shutil.rmtree(output_path) shutil.copytree(input_path, output_path) def main(args=None): """Convert a UFO font from cubic to quadratic curves""" parser = argparse.ArgumentParser(prog="cu2qu") parser.add_argument("--version", action="version", version=fontTools.__version__) parser.add_argument( "infiles", nargs="+", metavar="INPUT", help="one or more input UFO source file(s).", ) parser.add_argument("-v", "--verbose", action="count", default=0) parser.add_argument( "-e", "--conversion-error", type=float, metavar="ERROR", default=None, help="maxiumum approximation error measured in EM (default: 0.001)", ) parser.add_argument( "-m", "--mixed", default=False, action="store_true", help="whether to used mixed quadratic and cubic curves", ) parser.add_argument( "--keep-direction", dest="reverse_direction", action="store_false", help="do not reverse the contour direction", ) mode_parser = parser.add_mutually_exclusive_group() mode_parser.add_argument( "-i", "--interpolatable", action="store_true", help="whether curve conversion should keep interpolation compatibility", ) mode_parser.add_argument( "-j", "--jobs", type=int, nargs="?", default=1, const=_cpu_count(), metavar="N", help="Convert using N multiple processes (default: %(default)s)", ) output_parser = parser.add_mutually_exclusive_group() output_parser.add_argument( "-o", "--output-file", default=None, metavar="OUTPUT", help=( "output filename for the converted UFO. By default fonts are " "modified in place. This only works with a single input." ), ) output_parser.add_argument( "-d", "--output-dir", default=None, metavar="DIRECTORY", help="output directory where to save converted UFOs", ) options = parser.parse_args(args) if ufo_module is None: parser.error("Either ufoLib2 or defcon are required to run this script.") if not options.verbose: level = "WARNING" elif options.verbose == 1: level = "INFO" else: level = "DEBUG" logging.basicConfig(level=level) if len(options.infiles) > 1 and options.output_file: parser.error("-o/--output-file can't be used with multile inputs") if options.output_dir: output_dir = options.output_dir if not os.path.exists(output_dir): os.mkdir(output_dir) elif not os.path.isdir(output_dir): parser.error("'%s' is not a directory" % output_dir) output_paths = [ os.path.join(output_dir, os.path.basename(p)) for p in options.infiles ] elif options.output_file: output_paths = [options.output_file] else: # save in-place output_paths = [None] * len(options.infiles) kwargs = dict( dump_stats=options.verbose > 0, max_err_em=options.conversion_error, reverse_direction=options.reverse_direction, all_quadratic=False if options.mixed else True, ) if options.interpolatable: logger.info("Converting curves compatibly") ufos = [open_ufo(infile) for infile in options.infiles] if fonts_to_quadratic(ufos, **kwargs): for ufo, output_path in zip(ufos, output_paths): logger.info("Saving %s", output_path) if output_path: ufo.save(output_path) else: ufo.save() else: for input_path, output_path in zip(options.infiles, output_paths): if output_path: _copytree(input_path, output_path) else: jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1 if jobs > 1: func = partial(_font_to_quadratic, **kwargs) logger.info("Running %d parallel processes", jobs) with closing(mp.Pool(jobs)) as pool: pool.starmap(func, zip(options.infiles, output_paths)) else: for input_path, output_path in zip(options.infiles, output_paths): _font_to_quadratic(input_path, output_path, **kwargs) PKaZZZl��9R@R@fontTools/cu2qu/cu2qu.py# cython: language_level=3 # distutils: define_macros=CYTHON_TRACE_NOGIL=1 # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import cython COMPILED = cython.compiled except (AttributeError, ImportError): # if cython not installed, use mock module with no-op decorators and types from fontTools.misc import cython COMPILED = False import math from .errors import Error as Cu2QuError, ApproxNotFoundError __all__ = ["curve_to_quadratic", "curves_to_quadratic"] MAX_N = 100 NAN = float("NaN") @cython.cfunc @cython.inline @cython.returns(cython.double) @cython.locals(v1=cython.complex, v2=cython.complex) def dot(v1, v2): """Return the dot product of two vectors. Args: v1 (complex): First vector. v2 (complex): Second vector. Returns: double: Dot product. """ return (v1 * v2.conjugate()).real @cython.cfunc @cython.inline @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) @cython.locals( _1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex ) def calc_cubic_points(a, b, c, d): _1 = d _2 = (c / 3.0) + d _3 = (b + c) / 3.0 + _2 _4 = a + d + c + b return _1, _2, _3, _4 @cython.cfunc @cython.inline @cython.locals( p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex ) @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) def calc_cubic_parameters(p0, p1, p2, p3): c = (p1 - p0) * 3.0 b = (p2 - p1) * 3.0 - c d = p0 a = p3 - d - c - b return a, b, c, d @cython.cfunc @cython.inline @cython.locals( p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex ) def split_cubic_into_n_iter(p0, p1, p2, p3, n): """Split a cubic Bezier into n equal parts. Splits the curve into `n` equal parts by curve time. (t=0..1/n, t=1/n..2/n, ...) Args: p0 (complex): Start point of curve. p1 (complex): First handle of curve. p2 (complex): Second handle of curve. p3 (complex): End point of curve. Returns: An iterator yielding the control points (four complex values) of the subcurves. """ # Hand-coded special-cases if n == 2: return iter(split_cubic_into_two(p0, p1, p2, p3)) if n == 3: return iter(split_cubic_into_three(p0, p1, p2, p3)) if n == 4: a, b = split_cubic_into_two(p0, p1, p2, p3) return iter( split_cubic_into_two(a[0], a[1], a[2], a[3]) + split_cubic_into_two(b[0], b[1], b[2], b[3]) ) if n == 6: a, b = split_cubic_into_two(p0, p1, p2, p3) return iter( split_cubic_into_three(a[0], a[1], a[2], a[3]) + split_cubic_into_three(b[0], b[1], b[2], b[3]) ) return _split_cubic_into_n_gen(p0, p1, p2, p3, n) @cython.locals( p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, n=cython.int, ) @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) @cython.locals( dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int ) @cython.locals( a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex ) def _split_cubic_into_n_gen(p0, p1, p2, p3, n): a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3) dt = 1 / n delta_2 = dt * dt delta_3 = dt * delta_2 for i in range(n): t1 = i * dt t1_2 = t1 * t1 # calc new a, b, c and d a1 = a * delta_3 b1 = (3 * a * t1 + b) * delta_2 c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d yield calc_cubic_points(a1, b1, c1, d1) @cython.cfunc @cython.inline @cython.locals( p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex ) @cython.locals(mid=cython.complex, deriv3=cython.complex) def split_cubic_into_two(p0, p1, p2, p3): """Split a cubic Bezier into two equal parts. Splits the curve into two equal parts at t = 0.5 Args: p0 (complex): Start point of curve. p1 (complex): First handle of curve. p2 (complex): Second handle of curve. p3 (complex): End point of curve. Returns: tuple: Two cubic Beziers (each expressed as a tuple of four complex values). """ mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 deriv3 = (p3 + p2 - p1 - p0) * 0.125 return ( (p0, (p0 + p1) * 0.5, mid - deriv3, mid), (mid, mid + deriv3, (p2 + p3) * 0.5, p3), ) @cython.cfunc @cython.inline @cython.locals( p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, ) @cython.locals( mid1=cython.complex, deriv1=cython.complex, mid2=cython.complex, deriv2=cython.complex, ) def split_cubic_into_three(p0, p1, p2, p3): """Split a cubic Bezier into three equal parts. Splits the curve into three equal parts at t = 1/3 and t = 2/3 Args: p0 (complex): Start point of curve. p1 (complex): First handle of curve. p2 (complex): Second handle of curve. p3 (complex): End point of curve. Returns: tuple: Three cubic Beziers (each expressed as a tuple of four complex values). """ mid1 = (8 * p0 + 12 * p1 + 6 * p2 + p3) * (1 / 27) deriv1 = (p3 + 3 * p2 - 4 * p0) * (1 / 27) mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * (1 / 27) deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27) return ( (p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1), (mid1, mid1 + deriv1, mid2 - deriv2, mid2), (mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3), ) @cython.cfunc @cython.inline @cython.returns(cython.complex) @cython.locals( t=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, ) @cython.locals(_p1=cython.complex, _p2=cython.complex) def cubic_approx_control(t, p0, p1, p2, p3): """Approximate a cubic Bezier using a quadratic one. Args: t (double): Position of control point. p0 (complex): Start point of curve. p1 (complex): First handle of curve. p2 (complex): Second handle of curve. p3 (complex): End point of curve. Returns: complex: Location of candidate control point on quadratic curve. """ _p1 = p0 + (p1 - p0) * 1.5 _p2 = p3 + (p2 - p3) * 1.5 return _p1 + (_p2 - _p1) * t @cython.cfunc @cython.inline @cython.returns(cython.complex) @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) @cython.locals(ab=cython.complex, cd=cython.complex, p=cython.complex, h=cython.double) def calc_intersect(a, b, c, d): """Calculate the intersection of two lines. Args: a (complex): Start point of first line. b (complex): End point of first line. c (complex): Start point of second line. d (complex): End point of second line. Returns: complex: Location of intersection if one present, ``complex(NaN,NaN)`` if no intersection was found. """ ab = b - a cd = d - c p = ab * 1j try: h = dot(p, a - c) / dot(p, cd) except ZeroDivisionError: return complex(NAN, NAN) return c + cd * h @cython.cfunc @cython.returns(cython.int) @cython.locals( tolerance=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, ) @cython.locals(mid=cython.complex, deriv3=cython.complex) def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance): """Check if a cubic Bezier lies within a given distance of the origin. "Origin" means *the* origin (0,0), not the start of the curve. Note that no checks are made on the start and end positions of the curve; this function only checks the inside of the curve. Args: p0 (complex): Start point of curve. p1 (complex): First handle of curve. p2 (complex): Second handle of curve. p3 (complex): End point of curve. tolerance (double): Distance from origin. Returns: bool: True if the cubic Bezier ``p`` entirely lies within a distance ``tolerance`` of the origin, False otherwise. """ # First check p2 then p1, as p2 has higher error early on. if abs(p2) <= tolerance and abs(p1) <= tolerance: return True # Split. mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 if abs(mid) > tolerance: return False deriv3 = (p3 + p2 - p1 - p0) * 0.125 return cubic_farthest_fit_inside( p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance ) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance) @cython.cfunc @cython.inline @cython.locals(tolerance=cython.double) @cython.locals( q1=cython.complex, c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex, ) def cubic_approx_quadratic(cubic, tolerance): """Approximate a cubic Bezier with a single quadratic within a given tolerance. Args: cubic (sequence): Four complex numbers representing control points of the cubic Bezier curve. tolerance (double): Permitted deviation from the original curve. Returns: Three complex numbers representing control points of the quadratic curve if it fits within the given tolerance, or ``None`` if no suitable curve could be calculated. """ q1 = calc_intersect(cubic[0], cubic[1], cubic[2], cubic[3]) if math.isnan(q1.imag): return None c0 = cubic[0] c3 = cubic[3] c1 = c0 + (q1 - c0) * (2 / 3) c2 = c3 + (q1 - c3) * (2 / 3) if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance): return None return c0, q1, c3 @cython.cfunc @cython.locals(n=cython.int, tolerance=cython.double) @cython.locals(i=cython.int) @cython.locals(all_quadratic=cython.int) @cython.locals( c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex ) @cython.locals( q0=cython.complex, q1=cython.complex, next_q1=cython.complex, q2=cython.complex, d1=cython.complex, ) def cubic_approx_spline(cubic, n, tolerance, all_quadratic): """Approximate a cubic Bezier curve with a spline of n quadratics. Args: cubic (sequence): Four complex numbers representing control points of the cubic Bezier curve. n (int): Number of quadratic Bezier curves in the spline. tolerance (double): Permitted deviation from the original curve. Returns: A list of ``n+2`` complex numbers, representing control points of the quadratic spline if it fits within the given tolerance, or ``None`` if no suitable spline could be calculated. """ if n == 1: return cubic_approx_quadratic(cubic, tolerance) if n == 2 and all_quadratic == False: return cubic cubics = split_cubic_into_n_iter(cubic[0], cubic[1], cubic[2], cubic[3], n) # calculate the spline of quadratics and check errors at the same time. next_cubic = next(cubics) next_q1 = cubic_approx_control( 0, next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3] ) q2 = cubic[0] d1 = 0j spline = [cubic[0], next_q1] for i in range(1, n + 1): # Current cubic to convert c0, c1, c2, c3 = next_cubic # Current quadratic approximation of current cubic q0 = q2 q1 = next_q1 if i < n: next_cubic = next(cubics) next_q1 = cubic_approx_control( i / (n - 1), next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3] ) spline.append(next_q1) q2 = (q1 + next_q1) * 0.5 else: q2 = c3 # End-point deltas d0 = d1 d1 = q2 - c3 if abs(d1) > tolerance or not cubic_farthest_fit_inside( d0, q0 + (q1 - q0) * (2 / 3) - c1, q2 + (q1 - q2) * (2 / 3) - c2, d1, tolerance, ): return None spline.append(cubic[3]) return spline @cython.locals(max_err=cython.double) @cython.locals(n=cython.int) @cython.locals(all_quadratic=cython.int) def curve_to_quadratic(curve, max_err, all_quadratic=True): """Approximate a cubic Bezier curve with a spline of n quadratics. Args: cubic (sequence): Four 2D tuples representing control points of the cubic Bezier curve. max_err (double): Permitted deviation from the original curve. all_quadratic (bool): If True (default) returned value is a quadratic spline. If False, it's either a single quadratic curve or a single cubic curve. Returns: If all_quadratic is True: A list of 2D tuples, representing control points of the quadratic spline if it fits within the given tolerance, or ``None`` if no suitable spline could be calculated. If all_quadratic is False: Either a quadratic curve (if length of output is 3), or a cubic curve (if length of output is 4). """ curve = [complex(*p) for p in curve] for n in range(1, MAX_N + 1): spline = cubic_approx_spline(curve, n, max_err, all_quadratic) if spline is not None: # done. go home return [(s.real, s.imag) for s in spline] raise ApproxNotFoundError(curve) @cython.locals(l=cython.int, last_i=cython.int, i=cython.int) @cython.locals(all_quadratic=cython.int) def curves_to_quadratic(curves, max_errors, all_quadratic=True): """Return quadratic Bezier splines approximating the input cubic Beziers. Args: curves: A sequence of *n* curves, each curve being a sequence of four 2D tuples. max_errors: A sequence of *n* floats representing the maximum permissible deviation from each of the cubic Bezier curves. all_quadratic (bool): If True (default) returned values are a quadratic spline. If False, they are either a single quadratic curve or a single cubic curve. Example:: >>> curves_to_quadratic( [ ... [ (50,50), (100,100), (150,100), (200,50) ], ... [ (75,50), (120,100), (150,75), (200,60) ] ... ], [1,1] ) [[(50.0, 50.0), (75.0, 75.0), (125.0, 91.66666666666666), (175.0, 75.0), (200.0, 50.0)], [(75.0, 50.0), (97.5, 75.0), (135.41666666666666, 82.08333333333333), (175.0, 67.5), (200.0, 60.0)]] The returned splines have "implied oncurve points" suitable for use in TrueType ``glif`` outlines - i.e. in the first spline returned above, the first quadratic segment runs from (50,50) to ( (75 + 125)/2 , (120 + 91.666..)/2 ) = (100, 83.333...). Returns: If all_quadratic is True, a list of splines, each spline being a list of 2D tuples. If all_quadratic is False, a list of curves, each curve being a quadratic (length 3), or cubic (length 4). Raises: fontTools.cu2qu.Errors.ApproxNotFoundError: if no suitable approximation can be found for all curves with the given parameters. """ curves = [[complex(*p) for p in curve] for curve in curves] assert len(max_errors) == len(curves) l = len(curves) splines = [None] * l last_i = i = 0 n = 1 while True: spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic) if spline is None: if n == MAX_N: break n += 1 last_i = i continue splines[i] = spline i = (i + 1) % l if i == last_i: # done. go home return [[(s.real, s.imag) for s in spline] for spline in splines] raise ApproxNotFoundError(curves) PKaZZZ�� � fontTools/cu2qu/errors.py# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class Error(Exception): """Base Cu2Qu exception class for all other errors.""" class ApproxNotFoundError(Error): def __init__(self, curve): message = "no approximation found: %s" % curve super().__init__(message) self.curve = curve class UnequalZipLengthsError(Error): pass class IncompatibleGlyphsError(Error): def __init__(self, glyphs): assert len(glyphs) > 1 self.glyphs = glyphs names = set(repr(g.name) for g in glyphs) if len(names) > 1: self.combined_name = "{%s}" % ", ".join(sorted(names)) else: self.combined_name = names.pop() def __repr__(self): return "<%s %s>" % (type(self).__name__, self.combined_name) class IncompatibleSegmentNumberError(IncompatibleGlyphsError): def __str__(self): return "Glyphs named %s have different number of segments" % ( self.combined_name ) class IncompatibleSegmentTypesError(IncompatibleGlyphsError): def __init__(self, glyphs, segments): IncompatibleGlyphsError.__init__(self, glyphs) self.segments = segments def __str__(self): lines = [] ndigits = len(str(max(self.segments))) for i, tags in sorted(self.segments.items()): lines.append( "%s: (%s)" % (str(i).rjust(ndigits), ", ".join(repr(t) for t in tags)) ) return "Glyphs named %s have incompatible segment types:\n %s" % ( self.combined_name, "\n ".join(lines), ) class IncompatibleFontsError(Error): def __init__(self, glyph_errors): self.glyph_errors = glyph_errors def __str__(self): return "fonts contains incompatible glyphs: %s" % ( ", ".join(repr(g) for g in sorted(self.glyph_errors.keys())) ) PKaZZZ<T �..fontTools/cu2qu/ufo.py# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Converts cubic bezier curves to quadratic splines. Conversion is performed such that the quadratic splines keep the same end-curve tangents as the original cubics. The approach is iterative, increasing the number of segments for a spline until the error gets below a bound. Respective curves from multiple fonts will be converted at once to ensure that the resulting splines are interpolation-compatible. """ import logging from fontTools.pens.basePen import AbstractPen from fontTools.pens.pointPen import PointToSegmentPen from fontTools.pens.reverseContourPen import ReverseContourPen from . import curves_to_quadratic from .errors import ( UnequalZipLengthsError, IncompatibleSegmentNumberError, IncompatibleSegmentTypesError, IncompatibleGlyphsError, IncompatibleFontsError, ) __all__ = ["fonts_to_quadratic", "font_to_quadratic"] # The default approximation error below is a relative value (1/1000 of the EM square). # Later on, we convert it to absolute font units by multiplying it by a font's UPEM # (see fonts_to_quadratic). DEFAULT_MAX_ERR = 0.001 CURVE_TYPE_LIB_KEY = "com.github.googlei18n.cu2qu.curve_type" logger = logging.getLogger(__name__) _zip = zip def zip(*args): """Ensure each argument to zip has the same length. Also make sure a list is returned for python 2/3 compatibility. """ if len(set(len(a) for a in args)) != 1: raise UnequalZipLengthsError(*args) return list(_zip(*args)) class GetSegmentsPen(AbstractPen): """Pen to collect segments into lists of points for conversion. Curves always include their initial on-curve point, so some points are duplicated between segments. """ def __init__(self): self._last_pt = None self.segments = [] def _add_segment(self, tag, *args): if tag in ["move", "line", "qcurve", "curve"]: self._last_pt = args[-1] self.segments.append((tag, args)) def moveTo(self, pt): self._add_segment("move", pt) def lineTo(self, pt): self._add_segment("line", pt) def qCurveTo(self, *points): self._add_segment("qcurve", self._last_pt, *points) def curveTo(self, *points): self._add_segment("curve", self._last_pt, *points) def closePath(self): self._add_segment("close") def endPath(self): self._add_segment("end") def addComponent(self, glyphName, transformation): pass def _get_segments(glyph): """Get a glyph's segments as extracted by GetSegmentsPen.""" pen = GetSegmentsPen() # glyph.draw(pen) # We can't simply draw the glyph with the pen, but we must initialize the # PointToSegmentPen explicitly with outputImpliedClosingLine=True. # By default PointToSegmentPen does not outputImpliedClosingLine -- unless # last and first point on closed contour are duplicated. Because we are # converting multiple glyphs at the same time, we want to make sure # this function returns the same number of segments, whether or not # the last and first point overlap. # https://github.com/googlefonts/fontmake/issues/572 # https://github.com/fonttools/fonttools/pull/1720 pointPen = PointToSegmentPen(pen, outputImpliedClosingLine=True) glyph.drawPoints(pointPen) return pen.segments def _set_segments(glyph, segments, reverse_direction): """Draw segments as extracted by GetSegmentsPen back to a glyph.""" glyph.clearContours() pen = glyph.getPen() if reverse_direction: pen = ReverseContourPen(pen) for tag, args in segments: if tag == "move": pen.moveTo(*args) elif tag == "line": pen.lineTo(*args) elif tag == "curve": pen.curveTo(*args[1:]) elif tag == "qcurve": pen.qCurveTo(*args[1:]) elif tag == "close": pen.closePath() elif tag == "end": pen.endPath() else: raise AssertionError('Unhandled segment type "%s"' % tag) def _segments_to_quadratic(segments, max_err, stats, all_quadratic=True): """Return quadratic approximations of cubic segments.""" assert all(s[0] == "curve" for s in segments), "Non-cubic given to convert" new_points = curves_to_quadratic([s[1] for s in segments], max_err, all_quadratic) n = len(new_points[0]) assert all(len(s) == n for s in new_points[1:]), "Converted incompatibly" spline_length = str(n - 2) stats[spline_length] = stats.get(spline_length, 0) + 1 if all_quadratic or n == 3: return [("qcurve", p) for p in new_points] else: return [("curve", p) for p in new_points] def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats, all_quadratic=True): """Do the actual conversion of a set of compatible glyphs, after arguments have been set up. Return True if the glyphs were modified, else return False. """ try: segments_by_location = zip(*[_get_segments(g) for g in glyphs]) except UnequalZipLengthsError: raise IncompatibleSegmentNumberError(glyphs) if not any(segments_by_location): return False # always modify input glyphs if reverse_direction is True glyphs_modified = reverse_direction new_segments_by_location = [] incompatible = {} for i, segments in enumerate(segments_by_location): tag = segments[0][0] if not all(s[0] == tag for s in segments[1:]): incompatible[i] = [s[0] for s in segments] elif tag == "curve": new_segments = _segments_to_quadratic( segments, max_err, stats, all_quadratic ) if all_quadratic or new_segments != segments: glyphs_modified = True segments = new_segments new_segments_by_location.append(segments) if glyphs_modified: new_segments_by_glyph = zip(*new_segments_by_location) for glyph, new_segments in zip(glyphs, new_segments_by_glyph): _set_segments(glyph, new_segments, reverse_direction) if incompatible: raise IncompatibleSegmentTypesError(glyphs, segments=incompatible) return glyphs_modified def glyphs_to_quadratic( glyphs, max_err=None, reverse_direction=False, stats=None, all_quadratic=True ): """Convert the curves of a set of compatible of glyphs to quadratic. All curves will be converted to quadratic at once, ensuring interpolation compatibility. If this is not required, calling glyphs_to_quadratic with one glyph at a time may yield slightly more optimized results. Return True if glyphs were modified, else return False. Raises IncompatibleGlyphsError if glyphs have non-interpolatable outlines. """ if stats is None: stats = {} if not max_err: # assume 1000 is the default UPEM max_err = DEFAULT_MAX_ERR * 1000 if isinstance(max_err, (list, tuple)): max_errors = max_err else: max_errors = [max_err] * len(glyphs) assert len(max_errors) == len(glyphs) return _glyphs_to_quadratic( glyphs, max_errors, reverse_direction, stats, all_quadratic ) def fonts_to_quadratic( fonts, max_err_em=None, max_err=None, reverse_direction=False, stats=None, dump_stats=False, remember_curve_type=True, all_quadratic=True, ): """Convert the curves of a collection of fonts to quadratic. All curves will be converted to quadratic at once, ensuring interpolation compatibility. If this is not required, calling fonts_to_quadratic with one font at a time may yield slightly more optimized results. Return the set of modified glyph names if any, else return an empty set. By default, cu2qu stores the curve type in the fonts' lib, under a private key "com.github.googlei18n.cu2qu.curve_type", and will not try to convert them again if the curve type is already set to "quadratic". Setting 'remember_curve_type' to False disables this optimization. Raises IncompatibleFontsError if same-named glyphs from different fonts have non-interpolatable outlines. """ if remember_curve_type: curve_types = {f.lib.get(CURVE_TYPE_LIB_KEY, "cubic") for f in fonts} if len(curve_types) == 1: curve_type = next(iter(curve_types)) if curve_type in ("quadratic", "mixed"): logger.info("Curves already converted to quadratic") return False elif curve_type == "cubic": pass # keep converting else: raise NotImplementedError(curve_type) elif len(curve_types) > 1: # going to crash later if they do differ logger.warning("fonts may contain different curve types") if stats is None: stats = {} if max_err_em and max_err: raise TypeError("Only one of max_err and max_err_em can be specified.") if not (max_err_em or max_err): max_err_em = DEFAULT_MAX_ERR if isinstance(max_err, (list, tuple)): assert len(max_err) == len(fonts) max_errors = max_err elif max_err: max_errors = [max_err] * len(fonts) if isinstance(max_err_em, (list, tuple)): assert len(fonts) == len(max_err_em) max_errors = [f.info.unitsPerEm * e for f, e in zip(fonts, max_err_em)] elif max_err_em: max_errors = [f.info.unitsPerEm * max_err_em for f in fonts] modified = set() glyph_errors = {} for name in set().union(*(f.keys() for f in fonts)): glyphs = [] cur_max_errors = [] for font, error in zip(fonts, max_errors): if name in font: glyphs.append(font[name]) cur_max_errors.append(error) try: if _glyphs_to_quadratic( glyphs, cur_max_errors, reverse_direction, stats, all_quadratic ): modified.add(name) except IncompatibleGlyphsError as exc: logger.error(exc) glyph_errors[name] = exc if glyph_errors: raise IncompatibleFontsError(glyph_errors) if modified and dump_stats: spline_lengths = sorted(stats.keys()) logger.info( "New spline lengths: %s" % (", ".join("%s: %d" % (l, stats[l]) for l in spline_lengths)) ) if remember_curve_type: for font in fonts: curve_type = font.lib.get(CURVE_TYPE_LIB_KEY, "cubic") new_curve_type = "quadratic" if all_quadratic else "mixed" if curve_type != new_curve_type: font.lib[CURVE_TYPE_LIB_KEY] = new_curve_type return modified def glyph_to_quadratic(glyph, **kwargs): """Convenience wrapper around glyphs_to_quadratic, for just one glyph. Return True if the glyph was modified, else return False. """ return glyphs_to_quadratic([glyph], **kwargs) def font_to_quadratic(font, **kwargs): """Convenience wrapper around fonts_to_quadratic, for just one font. Return the set of modified glyph names if any, else return empty set. """ return fonts_to_quadratic([font], **kwargs) PKaZZZ�vl�����$fontTools/designspaceLib/__init__.pyfrom __future__ import annotations import collections import copy import itertools import math import os import posixpath from io import BytesIO, StringIO from textwrap import indent from typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union, cast from fontTools.misc import etree as ET from fontTools.misc import plistlib from fontTools.misc.loggingTools import LogMixin from fontTools.misc.textTools import tobytes, tostr """ designSpaceDocument - read and write designspace files """ __all__ = [ "AxisDescriptor", "AxisLabelDescriptor", "AxisMappingDescriptor", "BaseDocReader", "BaseDocWriter", "DesignSpaceDocument", "DesignSpaceDocumentError", "DiscreteAxisDescriptor", "InstanceDescriptor", "LocationLabelDescriptor", "RangeAxisSubsetDescriptor", "RuleDescriptor", "SourceDescriptor", "ValueAxisSubsetDescriptor", "VariableFontDescriptor", ] # ElementTree allows to find namespace-prefixed elements, but not attributes # so we have to do it ourselves for 'xml:lang' XML_NS = "{http://www.w3.org/XML/1998/namespace}" XML_LANG = XML_NS + "lang" def posix(path): """Normalize paths using forward slash to work also on Windows.""" new_path = posixpath.join(*path.split(os.path.sep)) if path.startswith("/"): # The above transformation loses absolute paths new_path = "/" + new_path elif path.startswith(r"\\"): # The above transformation loses leading slashes of UNC path mounts new_path = "//" + new_path return new_path def posixpath_property(private_name): """Generate a propery that holds a path always using forward slashes.""" def getter(self): # Normal getter return getattr(self, private_name) def setter(self, value): # The setter rewrites paths using forward slashes if value is not None: value = posix(value) setattr(self, private_name, value) return property(getter, setter) class DesignSpaceDocumentError(Exception): def __init__(self, msg, obj=None): self.msg = msg self.obj = obj def __str__(self): return str(self.msg) + (": %r" % self.obj if self.obj is not None else "") class AsDictMixin(object): def asdict(self): d = {} for attr, value in self.__dict__.items(): if attr.startswith("_"): continue if hasattr(value, "asdict"): value = value.asdict() elif isinstance(value, list): value = [v.asdict() if hasattr(v, "asdict") else v for v in value] d[attr] = value return d class SimpleDescriptor(AsDictMixin): """Containers for a bunch of attributes""" # XXX this is ugly. The 'print' is inappropriate here, and instead of # assert, it should simply return True/False def compare(self, other): # test if this object contains the same data as the other for attr in self._attrs: try: assert getattr(self, attr) == getattr(other, attr) except AssertionError: print( "failed attribute", attr, getattr(self, attr), "!=", getattr(other, attr), ) def __repr__(self): attrs = [f"{a}={repr(getattr(self, a))}," for a in self._attrs] attrs = indent("\n".join(attrs), " ") return f"{self.__class__.__name__}(\n{attrs}\n)" class SourceDescriptor(SimpleDescriptor): """Simple container for data related to the source .. code:: python doc = DesignSpaceDocument() s1 = SourceDescriptor() s1.path = masterPath1 s1.name = "master.ufo1" s1.font = defcon.Font("master.ufo1") s1.location = dict(weight=0) s1.familyName = "MasterFamilyName" s1.styleName = "MasterStyleNameOne" s1.localisedFamilyName = dict(fr="Caractère") s1.mutedGlyphNames.append("A") s1.mutedGlyphNames.append("Z") doc.addSource(s1) """ flavor = "source" _attrs = [ "filename", "path", "name", "layerName", "location", "copyLib", "copyGroups", "copyFeatures", "muteKerning", "muteInfo", "mutedGlyphNames", "familyName", "styleName", "localisedFamilyName", ] filename = posixpath_property("_filename") path = posixpath_property("_path") def __init__( self, *, filename=None, path=None, font=None, name=None, location=None, designLocation=None, layerName=None, familyName=None, styleName=None, localisedFamilyName=None, copyLib=False, copyInfo=False, copyGroups=False, copyFeatures=False, muteKerning=False, muteInfo=False, mutedGlyphNames=None, ): self.filename = filename """string. A relative path to the source file, **as it is in the document**. MutatorMath + VarLib. """ self.path = path """The absolute path, calculated from filename.""" self.font = font """Any Python object. Optional. Points to a representation of this source font that is loaded in memory, as a Python object (e.g. a ``defcon.Font`` or a ``fontTools.ttFont.TTFont``). The default document reader will not fill-in this attribute, and the default writer will not use this attribute. It is up to the user of ``designspaceLib`` to either load the resource identified by ``filename`` and store it in this field, or write the contents of this field to the disk and make ```filename`` point to that. """ self.name = name """string. Optional. Unique identifier name for this source. MutatorMath + varLib. """ self.designLocation = ( designLocation if designLocation is not None else location or {} ) """dict. Axis values for this source, in design space coordinates. MutatorMath + varLib. This may be only part of the full design location. See :meth:`getFullDesignLocation()` .. versionadded:: 5.0 """ self.layerName = layerName """string. The name of the layer in the source to look for outline data. Default ``None`` which means ``foreground``. """ self.familyName = familyName """string. Family name of this source. Though this data can be extracted from the font, it can be efficient to have it right here. varLib. """ self.styleName = styleName """string. Style name of this source. Though this data can be extracted from the font, it can be efficient to have it right here. varLib. """ self.localisedFamilyName = localisedFamilyName or {} """dict. A dictionary of localised family name strings, keyed by language code. If present, will be used to build localized names for all instances. .. versionadded:: 5.0 """ self.copyLib = copyLib """bool. Indicates if the contents of the font.lib need to be copied to the instances. MutatorMath. .. deprecated:: 5.0 """ self.copyInfo = copyInfo """bool. Indicates if the non-interpolating font.info needs to be copied to the instances. MutatorMath. .. deprecated:: 5.0 """ self.copyGroups = copyGroups """bool. Indicates if the groups need to be copied to the instances. MutatorMath. .. deprecated:: 5.0 """ self.copyFeatures = copyFeatures """bool. Indicates if the feature text needs to be copied to the instances. MutatorMath. .. deprecated:: 5.0 """ self.muteKerning = muteKerning """bool. Indicates if the kerning data from this source needs to be muted (i.e. not be part of the calculations). MutatorMath only. """ self.muteInfo = muteInfo """bool. Indicated if the interpolating font.info data for this source needs to be muted. MutatorMath only. """ self.mutedGlyphNames = mutedGlyphNames or [] """list. Glyphnames that need to be muted in the instances. MutatorMath only. """ @property def location(self): """dict. Axis values for this source, in design space coordinates. MutatorMath + varLib. .. deprecated:: 5.0 Use the more explicit alias for this property :attr:`designLocation`. """ return self.designLocation @location.setter def location(self, location: Optional[SimpleLocationDict]): self.designLocation = location or {} def setFamilyName(self, familyName, languageCode="en"): """Setter for :attr:`localisedFamilyName` .. versionadded:: 5.0 """ self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode="en"): """Getter for :attr:`localisedFamilyName` .. versionadded:: 5.0 """ return self.localisedFamilyName.get(languageCode) def getFullDesignLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict: """Get the complete design location of this source, from its :attr:`designLocation` and the document's axis defaults. .. versionadded:: 5.0 """ result: SimpleLocationDict = {} for axis in doc.axes: if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] else: result[axis.name] = axis.map_forward(axis.default) return result class RuleDescriptor(SimpleDescriptor): """Represents the rule descriptor element: a set of glyph substitutions to trigger conditionally in some parts of the designspace. .. code:: python r1 = RuleDescriptor() r1.name = "unique.rule.name" r1.conditionSets.append([dict(name="weight", minimum=-10, maximum=10), dict(...)]) r1.conditionSets.append([dict(...), dict(...)]) r1.subs.append(("a", "a.alt")) .. code:: xml <!-- optional: list of substitution rules --> <rules> <rule name="vertical.bars"> <conditionset> <condition minimum="250.000000" maximum="750.000000" name="weight"/> <condition minimum="100" name="width"/> <condition minimum="10" maximum="40" name="optical"/> </conditionset> <sub name="cent" with="cent.alt"/> <sub name="dollar" with="dollar.alt"/> </rule> </rules> """ _attrs = ["name", "conditionSets", "subs"] # what do we need here def __init__(self, *, name=None, conditionSets=None, subs=None): self.name = name """string. Unique name for this rule. Can be used to reference this rule data.""" # list of lists of dict(name='aaaa', minimum=0, maximum=1000) self.conditionSets = conditionSets or [] """a list of conditionsets. - Each conditionset is a list of conditions. - Each condition is a dict with ``name``, ``minimum`` and ``maximum`` keys. """ # list of substitutions stored as tuples of glyphnames ("a", "a.alt") self.subs = subs or [] """list of substitutions. - Each substitution is stored as tuples of glyphnames, e.g. ("a", "a.alt"). - Note: By default, rules are applied first, before other text shaping/OpenType layout, as they are part of the `Required Variation Alternates OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_. See ref:`rules-element` § Attributes. """ def evaluateRule(rule, location): """Return True if any of the rule's conditionsets matches the given location.""" return any(evaluateConditions(c, location) for c in rule.conditionSets) def evaluateConditions(conditions, location): """Return True if all the conditions matches the given location. - If a condition has no minimum, check for < maximum. - If a condition has no maximum, check for > minimum. """ for cd in conditions: value = location[cd["name"]] if cd.get("minimum") is None: if value > cd["maximum"]: return False elif cd.get("maximum") is None: if cd["minimum"] > value: return False elif not cd["minimum"] <= value <= cd["maximum"]: return False return True def processRules(rules, location, glyphNames): """Apply these rules at this location to these glyphnames. Return a new list of glyphNames with substitutions applied. - rule order matters """ newNames = [] for rule in rules: if evaluateRule(rule, location): for name in glyphNames: swap = False for a, b in rule.subs: if name == a: swap = True break if swap: newNames.append(b) else: newNames.append(name) glyphNames = newNames newNames = [] return glyphNames AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]] SimpleLocationDict = Dict[str, float] class AxisMappingDescriptor(SimpleDescriptor): """Represents the axis mapping element: mapping an input location to an output location in the designspace. .. code:: python m1 = AxisMappingDescriptor() m1.inputLocation = {"weight": 900, "width": 150} m1.outputLocation = {"weight": 870} .. code:: xml <mappings> <mapping> <input> <dimension name="weight" xvalue="900"/> <dimension name="width" xvalue="150"/> </input> <output> <dimension name="weight" xvalue="870"/> </output> </mapping> </mappings> """ _attrs = ["inputLocation", "outputLocation"] def __init__( self, *, inputLocation=None, outputLocation=None, description=None, groupDescription=None, ): self.inputLocation: SimpleLocationDict = inputLocation or {} """dict. Axis values for the input of the mapping, in design space coordinates. varLib. .. versionadded:: 5.1 """ self.outputLocation: SimpleLocationDict = outputLocation or {} """dict. Axis values for the output of the mapping, in design space coordinates. varLib. .. versionadded:: 5.1 """ self.description = description """string. A description of the mapping. varLib. .. versionadded:: 5.2 """ self.groupDescription = groupDescription """string. A description of the group of mappings. varLib. .. versionadded:: 5.2 """ class InstanceDescriptor(SimpleDescriptor): """Simple container for data related to the instance .. code:: python i2 = InstanceDescriptor() i2.path = instancePath2 i2.familyName = "InstanceFamilyName" i2.styleName = "InstanceStyleName" i2.name = "instance.ufo2" # anisotropic location i2.designLocation = dict(weight=500, width=(400,300)) i2.postScriptFontName = "InstancePostscriptName" i2.styleMapFamilyName = "InstanceStyleMapFamilyName" i2.styleMapStyleName = "InstanceStyleMapStyleName" i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever' doc.addInstance(i2) """ flavor = "instance" _defaultLanguageCode = "en" _attrs = [ "filename", "path", "name", "locationLabel", "designLocation", "userLocation", "familyName", "styleName", "postScriptFontName", "styleMapFamilyName", "styleMapStyleName", "localisedFamilyName", "localisedStyleName", "localisedStyleMapFamilyName", "localisedStyleMapStyleName", "glyphs", "kerning", "info", "lib", ] filename = posixpath_property("_filename") path = posixpath_property("_path") def __init__( self, *, filename=None, path=None, font=None, name=None, location=None, locationLabel=None, designLocation=None, userLocation=None, familyName=None, styleName=None, postScriptFontName=None, styleMapFamilyName=None, styleMapStyleName=None, localisedFamilyName=None, localisedStyleName=None, localisedStyleMapFamilyName=None, localisedStyleMapStyleName=None, glyphs=None, kerning=True, info=True, lib=None, ): self.filename = filename """string. Relative path to the instance file, **as it is in the document**. The file may or may not exist. MutatorMath + VarLib. """ self.path = path """string. Absolute path to the instance file, calculated from the document path and the string in the filename attr. The file may or may not exist. MutatorMath. """ self.font = font """Same as :attr:`SourceDescriptor.font` .. seealso:: :attr:`SourceDescriptor.font` """ self.name = name """string. Unique identifier name of the instance, used to identify it if it needs to be referenced from elsewhere in the document. """ self.locationLabel = locationLabel """Name of a :class:`LocationLabelDescriptor`. If provided, the instance should have the same location as the LocationLabel. .. seealso:: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 """ self.designLocation: AnisotropicLocationDict = ( designLocation if designLocation is not None else (location or {}) ) """dict. Axis values for this instance, in design space coordinates. MutatorMath + varLib. .. seealso:: This may be only part of the full location. See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 """ self.userLocation: SimpleLocationDict = userLocation or {} """dict. Axis values for this instance, in user space coordinates. MutatorMath + varLib. .. seealso:: This may be only part of the full location. See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 """ self.familyName = familyName """string. Family name of this instance. MutatorMath + varLib. """ self.styleName = styleName """string. Style name of this instance. MutatorMath + varLib. """ self.postScriptFontName = postScriptFontName """string. Postscript fontname for this instance. MutatorMath + varLib. """ self.styleMapFamilyName = styleMapFamilyName """string. StyleMap familyname for this instance. MutatorMath + varLib. """ self.styleMapStyleName = styleMapStyleName """string. StyleMap stylename for this instance. MutatorMath + varLib. """ self.localisedFamilyName = localisedFamilyName or {} """dict. A dictionary of localised family name strings, keyed by language code. """ self.localisedStyleName = localisedStyleName or {} """dict. A dictionary of localised stylename strings, keyed by language code. """ self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {} """A dictionary of localised style map familyname strings, keyed by language code. """ self.localisedStyleMapStyleName = localisedStyleMapStyleName or {} """A dictionary of localised style map stylename strings, keyed by language code. """ self.glyphs = glyphs or {} """dict for special master definitions for glyphs. If glyphs need special masters (to record the results of executed rules for example). MutatorMath. .. deprecated:: 5.0 Use rules or sparse sources instead. """ self.kerning = kerning """ bool. Indicates if this instance needs its kerning calculated. MutatorMath. .. deprecated:: 5.0 """ self.info = info """bool. Indicated if this instance needs the interpolating font.info calculated. .. deprecated:: 5.0 """ self.lib = lib or {} """Custom data associated with this instance.""" @property def location(self): """dict. Axis values for this instance. MutatorMath + varLib. .. deprecated:: 5.0 Use the more explicit alias for this property :attr:`designLocation`. """ return self.designLocation @location.setter def location(self, location: Optional[AnisotropicLocationDict]): self.designLocation = location or {} def setStyleName(self, styleName, languageCode="en"): """These methods give easier access to the localised names.""" self.localisedStyleName[languageCode] = tostr(styleName) def getStyleName(self, languageCode="en"): return self.localisedStyleName.get(languageCode) def setFamilyName(self, familyName, languageCode="en"): self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode="en"): return self.localisedFamilyName.get(languageCode) def setStyleMapStyleName(self, styleMapStyleName, languageCode="en"): self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName) def getStyleMapStyleName(self, languageCode="en"): return self.localisedStyleMapStyleName.get(languageCode) def setStyleMapFamilyName(self, styleMapFamilyName, languageCode="en"): self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName) def getStyleMapFamilyName(self, languageCode="en"): return self.localisedStyleMapFamilyName.get(languageCode) def clearLocation(self, axisName: Optional[str] = None): """Clear all location-related fields. Ensures that :attr:``designLocation`` and :attr:``userLocation`` are dictionaries (possibly empty if clearing everything). In order to update the location of this instance wholesale, a user should first clear all the fields, then change the field(s) for which they have data. .. code:: python instance.clearLocation() instance.designLocation = {'Weight': (34, 36.5), 'Width': 100} instance.userLocation = {'Opsz': 16} In order to update a single axis location, the user should only clear that axis, then edit the values: .. code:: python instance.clearLocation('Weight') instance.designLocation['Weight'] = (34, 36.5) Args: axisName: if provided, only clear the location for that axis. .. versionadded:: 5.0 """ self.locationLabel = None if axisName is None: self.designLocation = {} self.userLocation = {} else: if self.designLocation is None: self.designLocation = {} if axisName in self.designLocation: del self.designLocation[axisName] if self.userLocation is None: self.userLocation = {} if axisName in self.userLocation: del self.userLocation[axisName] def getLocationLabelDescriptor( self, doc: "DesignSpaceDocument" ) -> Optional[LocationLabelDescriptor]: """Get the :class:`LocationLabelDescriptor` instance that matches this instances's :attr:`locationLabel`. Raises if the named label can't be found. .. versionadded:: 5.0 """ if self.locationLabel is None: return None label = doc.getLocationLabel(self.locationLabel) if label is None: raise DesignSpaceDocumentError( "InstanceDescriptor.getLocationLabelDescriptor(): " f"unknown location label `{self.locationLabel}` in instance `{self.name}`." ) return label def getFullDesignLocation( self, doc: "DesignSpaceDocument" ) -> AnisotropicLocationDict: """Get the complete design location of this instance, by combining data from the various location fields, default axis values and mappings, and top-level location labels. The source of truth for this instance's location is determined for each axis independently by taking the first not-None field in this list: - ``locationLabel``: the location along this axis is the same as the matching STAT format 4 label. No anisotropy. - ``designLocation[axisName]``: the explicit design location along this axis, possibly anisotropic. - ``userLocation[axisName]``: the explicit user location along this axis. No anisotropy. - ``axis.default``: default axis value. No anisotropy. .. versionadded:: 5.0 """ label = self.getLocationLabelDescriptor(doc) if label is not None: return doc.map_forward(label.userLocation) # type: ignore result: AnisotropicLocationDict = {} for axis in doc.axes: if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] elif axis.name in self.userLocation: result[axis.name] = axis.map_forward(self.userLocation[axis.name]) else: result[axis.name] = axis.map_forward(axis.default) return result def getFullUserLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict: """Get the complete user location for this instance. .. seealso:: :meth:`getFullDesignLocation` .. versionadded:: 5.0 """ return doc.map_backward(self.getFullDesignLocation(doc)) def tagForAxisName(name): # try to find or make a tag name for this axis name names = { "weight": ("wght", dict(en="Weight")), "width": ("wdth", dict(en="Width")), "optical": ("opsz", dict(en="Optical Size")), "slant": ("slnt", dict(en="Slant")), "italic": ("ital", dict(en="Italic")), } if name.lower() in names: return names[name.lower()] if len(name) < 4: tag = name + "*" * (4 - len(name)) else: tag = name[:4] return tag, dict(en=name) class AbstractAxisDescriptor(SimpleDescriptor): flavor = "axis" def __init__( self, *, tag=None, name=None, labelNames=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): # opentype tag for this axis self.tag = tag """string. Four letter tag for this axis. Some might be registered at the `OpenType specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__. Privately-defined axis tags must begin with an uppercase letter and use only uppercase letters or digits. """ # name of the axis used in locations self.name = name """string. Name of the axis as it is used in the location dicts. MutatorMath + varLib. """ # names for UI purposes, if this is not a standard axis, self.labelNames = labelNames or {} """dict. When defining a non-registered axis, it will be necessary to define user-facing readable names for the axis. Keyed by xml:lang code. Values are required to be ``unicode`` strings, even if they only contain ASCII characters. """ self.hidden = hidden """bool. Whether this axis should be hidden in user interfaces. """ self.map = map or [] """list of input / output values that can describe a warp of user space to design space coordinates. If no map values are present, it is assumed user space is the same as design space, as in [(minimum, minimum), (maximum, maximum)]. varLib. """ self.axisOrdering = axisOrdering """STAT table field ``axisOrdering``. See: `OTSpec STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_ .. versionadded:: 5.0 """ self.axisLabels: List[AxisLabelDescriptor] = axisLabels or [] """STAT table entries for Axis Value Tables format 1, 2, 3. See: `OTSpec STAT Axis Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_ .. versionadded:: 5.0 """ class AxisDescriptor(AbstractAxisDescriptor): """Simple container for the axis data. Add more localisations? .. code:: python a1 = AxisDescriptor() a1.minimum = 1 a1.maximum = 1000 a1.default = 400 a1.name = "weight" a1.tag = "wght" a1.labelNames['fa-IR'] = "قطر" a1.labelNames['en'] = "Wéíght" a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)] a1.axisOrdering = 1 a1.axisLabels = [ AxisLabelDescriptor(name="Regular", userValue=400, elidable=True) ] doc.addAxis(a1) """ _attrs = [ "tag", "name", "maximum", "minimum", "default", "map", "axisOrdering", "axisLabels", ] def __init__( self, *, tag=None, name=None, labelNames=None, minimum=None, default=None, maximum=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): super().__init__( tag=tag, name=name, labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.minimum = minimum """number. The minimum value for this axis in user space. MutatorMath + varLib. """ self.maximum = maximum """number. The maximum value for this axis in user space. MutatorMath + varLib. """ self.default = default """number. The default value for this axis, i.e. when a new location is created, this is the value this axis will get in user space. MutatorMath + varLib. """ def serialize(self): # output to a dict, used in testing return dict( tag=self.tag, name=self.name, labelNames=self.labelNames, maximum=self.maximum, minimum=self.minimum, default=self.default, hidden=self.hidden, map=self.map, axisOrdering=self.axisOrdering, axisLabels=self.axisLabels, ) def map_forward(self, v): """Maps value from axis mapping's input (user) to output (design).""" from fontTools.varLib.models import piecewiseLinearMap if not self.map: return v return piecewiseLinearMap(v, {k: v for k, v in self.map}) def map_backward(self, v): """Maps value from axis mapping's output (design) to input (user).""" from fontTools.varLib.models import piecewiseLinearMap if isinstance(v, tuple): v = v[0] if not self.map: return v return piecewiseLinearMap(v, {v: k for k, v in self.map}) class DiscreteAxisDescriptor(AbstractAxisDescriptor): """Container for discrete axis data. Use this for axes that do not interpolate. The main difference from a continuous axis is that a continuous axis has a ``minimum`` and ``maximum``, while a discrete axis has a list of ``values``. Example: an Italic axis with 2 stops, Roman and Italic, that are not compatible. The axis still allows to bind together the full font family, which is useful for the STAT table, however it can't become a variation axis in a VF. .. code:: python a2 = DiscreteAxisDescriptor() a2.values = [0, 1] a2.default = 0 a2.name = "Italic" a2.tag = "ITAL" a2.labelNames['fr'] = "Italique" a2.map = [(0, 0), (1, -11)] a2.axisOrdering = 2 a2.axisLabels = [ AxisLabelDescriptor(name="Roman", userValue=0, elidable=True) ] doc.addAxis(a2) .. versionadded:: 5.0 """ flavor = "axis" _attrs = ("tag", "name", "values", "default", "map", "axisOrdering", "axisLabels") def __init__( self, *, tag=None, name=None, labelNames=None, values=None, default=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): super().__init__( tag=tag, name=name, labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.default: float = default """The default value for this axis, i.e. when a new location is created, this is the value this axis will get in user space. However, this default value is less important than in continuous axes: - it doesn't define the "neutral" version of outlines from which deltas would apply, as this axis does not interpolate. - it doesn't provide the reference glyph set for the designspace, as fonts at each value can have different glyph sets. """ self.values: List[float] = values or [] """List of possible values for this axis. Contrary to continuous axes, only the values in this list can be taken by the axis, nothing in-between. """ def map_forward(self, value): """Maps value from axis mapping's input to output. Returns value unchanged if no mapping entry is found. Note: for discrete axes, each value must have its mapping entry, if you intend that value to be mapped. """ return next((v for k, v in self.map if k == value), value) def map_backward(self, value): """Maps value from axis mapping's output to input. Returns value unchanged if no mapping entry is found. Note: for discrete axes, each value must have its mapping entry, if you intend that value to be mapped. """ if isinstance(value, tuple): value = value[0] return next((k for k, v in self.map if v == value), value) class AxisLabelDescriptor(SimpleDescriptor): """Container for axis label data. Analogue of OpenType's STAT data for a single axis (formats 1, 2 and 3). All values are user values. See: `OTSpec STAT Axis value table, format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_ The STAT format of the Axis value depends on which field are filled-in, see :meth:`getFormat` .. versionadded:: 5.0 """ flavor = "label" _attrs = ( "userMinimum", "userValue", "userMaximum", "name", "elidable", "olderSibling", "linkedUserValue", "labelNames", ) def __init__( self, *, name, userValue, userMinimum=None, userMaximum=None, elidable=False, olderSibling=False, linkedUserValue=None, labelNames=None, ): self.userMinimum: Optional[float] = userMinimum """STAT field ``rangeMinValue`` (format 2).""" self.userValue: float = userValue """STAT field ``value`` (format 1, 3) or ``nominalValue`` (format 2).""" self.userMaximum: Optional[float] = userMaximum """STAT field ``rangeMaxValue`` (format 2).""" self.name: str = name """Label for this axis location, STAT field ``valueNameID``.""" self.elidable: bool = elidable """STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ """ self.olderSibling: bool = olderSibling """STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ """ self.linkedUserValue: Optional[float] = linkedUserValue """STAT field ``linkedValue`` (format 3).""" self.labelNames: MutableMapping[str, str] = labelNames or {} """User-facing translations of this location's label. Keyed by ``xml:lang`` code. """ def getFormat(self) -> int: """Determine which format of STAT Axis value to use to encode this label. =========== ========= =========== =========== =============== STAT Format userValue userMinimum userMaximum linkedUserValue =========== ========= =========== =========== =============== 1 ✅ ❌ ❌ ❌ 2 ✅ ✅ ✅ ❌ 3 ✅ ❌ ❌ ✅ =========== ========= =========== =========== =============== """ if self.linkedUserValue is not None: return 3 if self.userMinimum is not None or self.userMaximum is not None: return 2 return 1 @property def defaultName(self) -> str: """Return the English name from :attr:`labelNames` or the :attr:`name`.""" return self.labelNames.get("en") or self.name class LocationLabelDescriptor(SimpleDescriptor): """Container for location label data. Analogue of OpenType's STAT data for a free-floating location (format 4). All values are user values. See: `OTSpec STAT Axis value table, format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_ .. versionadded:: 5.0 """ flavor = "label" _attrs = ("name", "elidable", "olderSibling", "userLocation", "labelNames") def __init__( self, *, name, userLocation, elidable=False, olderSibling=False, labelNames=None, ): self.name: str = name """Label for this named location, STAT field ``valueNameID``.""" self.userLocation: SimpleLocationDict = userLocation or {} """Location in user coordinates along each axis. If an axis is not mentioned, it is assumed to be at its default location. .. seealso:: This may be only part of the full location. See: :meth:`getFullUserLocation` """ self.elidable: bool = elidable """STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ """ self.olderSibling: bool = olderSibling """STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ """ self.labelNames: Dict[str, str] = labelNames or {} """User-facing translations of this location's label. Keyed by xml:lang code. """ @property def defaultName(self) -> str: """Return the English name from :attr:`labelNames` or the :attr:`name`.""" return self.labelNames.get("en") or self.name def getFullUserLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict: """Get the complete user location of this label, by combining data from the explicit user location and default axis values. .. versionadded:: 5.0 """ return { axis.name: self.userLocation.get(axis.name, axis.default) for axis in doc.axes } class VariableFontDescriptor(SimpleDescriptor): """Container for variable fonts, sub-spaces of the Designspace. Use-cases: - From a single DesignSpace with discrete axes, define 1 variable font per value on the discrete axes. Before version 5, you would have needed 1 DesignSpace per such variable font, and a lot of data duplication. - From a big variable font with many axes, define subsets of that variable font that only include some axes and freeze other axes at a given location. .. versionadded:: 5.0 """ flavor = "variable-font" _attrs = ("filename", "axisSubsets", "lib") filename = posixpath_property("_filename") def __init__(self, *, name, filename=None, axisSubsets=None, lib=None): self.name: str = name """string, required. Name of this variable to identify it during the build process and from other parts of the document, and also as a filename in case the filename property is empty. VarLib. """ self.filename: str = filename """string, optional. Relative path to the variable font file, **as it is in the document**. The file may or may not exist. If not specified, the :attr:`name` will be used as a basename for the file. """ self.axisSubsets: List[ Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor] ] = (axisSubsets or []) """Axis subsets to include in this variable font. If an axis is not mentioned, assume that we only want the default location of that axis (same as a :class:`ValueAxisSubsetDescriptor`). """ self.lib: MutableMapping[str, Any] = lib or {} """Custom data associated with this variable font.""" class RangeAxisSubsetDescriptor(SimpleDescriptor): """Subset of a continuous axis to include in a variable font. .. versionadded:: 5.0 """ flavor = "axis-subset" _attrs = ("name", "userMinimum", "userDefault", "userMaximum") def __init__( self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf ): self.name: str = name """Name of the :class:`AxisDescriptor` to subset.""" self.userMinimum: float = userMinimum """New minimum value of the axis in the target variable font. If not specified, assume the same minimum value as the full axis. (default = ``-math.inf``) """ self.userDefault: Optional[float] = userDefault """New default value of the axis in the target variable font. If not specified, assume the same default value as the full axis. (default = ``None``) """ self.userMaximum: float = userMaximum """New maximum value of the axis in the target variable font. If not specified, assume the same maximum value as the full axis. (default = ``math.inf``) """ class ValueAxisSubsetDescriptor(SimpleDescriptor): """Single value of a discrete or continuous axis to use in a variable font. .. versionadded:: 5.0 """ flavor = "axis-subset" _attrs = ("name", "userValue") def __init__(self, *, name, userValue): self.name: str = name """Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor` to "snapshot" or "freeze". """ self.userValue: float = userValue """Value in user coordinates at which to freeze the given axis.""" class BaseDocWriter(object): _whiteSpace = " " axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor axisMappingDescriptorClass = AxisMappingDescriptor locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor @classmethod def getAxisDecriptor(cls): return cls.axisDescriptorClass() @classmethod def getAxisMappingDescriptor(cls): return cls.axisMappingDescriptorClass() @classmethod def getSourceDescriptor(cls): return cls.sourceDescriptorClass() @classmethod def getInstanceDescriptor(cls): return cls.instanceDescriptorClass() @classmethod def getRuleDescriptor(cls): return cls.ruleDescriptorClass() def __init__(self, documentPath, documentObject: DesignSpaceDocument): self.path = documentPath self.documentObject = documentObject self.effectiveFormatTuple = self._getEffectiveFormatTuple() self.root = ET.Element("designspace") def write(self, pretty=True, encoding="UTF-8", xml_declaration=True): self.root.attrib["format"] = ".".join(str(i) for i in self.effectiveFormatTuple) if ( self.documentObject.axes or self.documentObject.axisMappings or self.documentObject.elidedFallbackName is not None ): axesElement = ET.Element("axes") if self.documentObject.elidedFallbackName is not None: axesElement.attrib["elidedfallbackname"] = ( self.documentObject.elidedFallbackName ) self.root.append(axesElement) for axisObject in self.documentObject.axes: self._addAxis(axisObject) if self.documentObject.axisMappings: mappingsElement = None lastGroup = object() for mappingObject in self.documentObject.axisMappings: if getattr(mappingObject, "groupDescription", None) != lastGroup: if mappingsElement is not None: self.root.findall(".axes")[0].append(mappingsElement) lastGroup = getattr(mappingObject, "groupDescription", None) mappingsElement = ET.Element("mappings") if lastGroup is not None: mappingsElement.attrib["description"] = lastGroup self._addAxisMapping(mappingsElement, mappingObject) if mappingsElement is not None: self.root.findall(".axes")[0].append(mappingsElement) if self.documentObject.locationLabels: labelsElement = ET.Element("labels") for labelObject in self.documentObject.locationLabels: self._addLocationLabel(labelsElement, labelObject) self.root.append(labelsElement) if self.documentObject.rules: if getattr(self.documentObject, "rulesProcessingLast", False): attributes = {"processing": "last"} else: attributes = {} self.root.append(ET.Element("rules", attributes)) for ruleObject in self.documentObject.rules: self._addRule(ruleObject) if self.documentObject.sources: self.root.append(ET.Element("sources")) for sourceObject in self.documentObject.sources: self._addSource(sourceObject) if self.documentObject.variableFonts: variableFontsElement = ET.Element("variable-fonts") for variableFont in self.documentObject.variableFonts: self._addVariableFont(variableFontsElement, variableFont) self.root.append(variableFontsElement) if self.documentObject.instances: self.root.append(ET.Element("instances")) for instanceObject in self.documentObject.instances: self._addInstance(instanceObject) if self.documentObject.lib: self._addLib(self.root, self.documentObject.lib, 2) tree = ET.ElementTree(self.root) tree.write( self.path, encoding=encoding, method="xml", xml_declaration=xml_declaration, pretty_print=pretty, ) def _getEffectiveFormatTuple(self): """Try to use the version specified in the document, or a sufficiently recent version to be able to encode what the document contains. """ minVersion = self.documentObject.formatTuple if ( any( hasattr(axis, "values") or axis.axisOrdering is not None or axis.axisLabels for axis in self.documentObject.axes ) or self.documentObject.locationLabels or any(source.localisedFamilyName for source in self.documentObject.sources) or self.documentObject.variableFonts or any( instance.locationLabel or instance.userLocation for instance in self.documentObject.instances ) ): if minVersion < (5, 0): minVersion = (5, 0) if self.documentObject.axisMappings: if minVersion < (5, 1): minVersion = (5, 1) return minVersion def _makeLocationElement(self, locationObject, name=None): """Convert Location dict to a locationElement.""" locElement = ET.Element("location") if name is not None: locElement.attrib["name"] = name validatedLocation = self.documentObject.newDefaultLocation() for axisName, axisValue in locationObject.items(): if axisName in validatedLocation: # only accept values we know validatedLocation[axisName] = axisValue for dimensionName, dimensionValue in validatedLocation.items(): dimElement = ET.Element("dimension") dimElement.attrib["name"] = dimensionName if type(dimensionValue) == tuple: dimElement.attrib["xvalue"] = self.intOrFloat(dimensionValue[0]) dimElement.attrib["yvalue"] = self.intOrFloat(dimensionValue[1]) else: dimElement.attrib["xvalue"] = self.intOrFloat(dimensionValue) locElement.append(dimElement) return locElement, validatedLocation def intOrFloat(self, num): if int(num) == num: return "%d" % num return ("%f" % num).rstrip("0").rstrip(".") def _addRule(self, ruleObject): # if none of the conditions have minimum or maximum values, do not add the rule. ruleElement = ET.Element("rule") if ruleObject.name is not None: ruleElement.attrib["name"] = ruleObject.name for conditions in ruleObject.conditionSets: conditionsetElement = ET.Element("conditionset") for cond in conditions: if cond.get("minimum") is None and cond.get("maximum") is None: # neither is defined, don't add this condition continue conditionElement = ET.Element("condition") conditionElement.attrib["name"] = cond.get("name") if cond.get("minimum") is not None: conditionElement.attrib["minimum"] = self.intOrFloat( cond.get("minimum") ) if cond.get("maximum") is not None: conditionElement.attrib["maximum"] = self.intOrFloat( cond.get("maximum") ) conditionsetElement.append(conditionElement) if len(conditionsetElement): ruleElement.append(conditionsetElement) for sub in ruleObject.subs: subElement = ET.Element("sub") subElement.attrib["name"] = sub[0] subElement.attrib["with"] = sub[1] ruleElement.append(subElement) if len(ruleElement): self.root.findall(".rules")[0].append(ruleElement) def _addAxis(self, axisObject): axisElement = ET.Element("axis") axisElement.attrib["tag"] = axisObject.tag axisElement.attrib["name"] = axisObject.name self._addLabelNames(axisElement, axisObject.labelNames) if axisObject.map: for inputValue, outputValue in axisObject.map: mapElement = ET.Element("map") mapElement.attrib["input"] = self.intOrFloat(inputValue) mapElement.attrib["output"] = self.intOrFloat(outputValue) axisElement.append(mapElement) if axisObject.axisOrdering or axisObject.axisLabels: labelsElement = ET.Element("labels") if axisObject.axisOrdering is not None: labelsElement.attrib["ordering"] = str(axisObject.axisOrdering) for label in axisObject.axisLabels: self._addAxisLabel(labelsElement, label) axisElement.append(labelsElement) if hasattr(axisObject, "minimum"): axisElement.attrib["minimum"] = self.intOrFloat(axisObject.minimum) axisElement.attrib["maximum"] = self.intOrFloat(axisObject.maximum) elif hasattr(axisObject, "values"): axisElement.attrib["values"] = " ".join( self.intOrFloat(v) for v in axisObject.values ) axisElement.attrib["default"] = self.intOrFloat(axisObject.default) if axisObject.hidden: axisElement.attrib["hidden"] = "1" self.root.findall(".axes")[0].append(axisElement) def _addAxisMapping(self, mappingsElement, mappingObject): mappingElement = ET.Element("mapping") if getattr(mappingObject, "description", None) is not None: mappingElement.attrib["description"] = mappingObject.description for what in ("inputLocation", "outputLocation"): whatObject = getattr(mappingObject, what, None) if whatObject is None: continue whatElement = ET.Element(what[:-8]) mappingElement.append(whatElement) for name, value in whatObject.items(): dimensionElement = ET.Element("dimension") dimensionElement.attrib["name"] = name dimensionElement.attrib["xvalue"] = self.intOrFloat(value) whatElement.append(dimensionElement) mappingsElement.append(mappingElement) def _addAxisLabel( self, axisElement: ET.Element, label: AxisLabelDescriptor ) -> None: labelElement = ET.Element("label") labelElement.attrib["uservalue"] = self.intOrFloat(label.userValue) if label.userMinimum is not None: labelElement.attrib["userminimum"] = self.intOrFloat(label.userMinimum) if label.userMaximum is not None: labelElement.attrib["usermaximum"] = self.intOrFloat(label.userMaximum) labelElement.attrib["name"] = label.name if label.elidable: labelElement.attrib["elidable"] = "true" if label.olderSibling: labelElement.attrib["oldersibling"] = "true" if label.linkedUserValue is not None: labelElement.attrib["linkeduservalue"] = self.intOrFloat( label.linkedUserValue ) self._addLabelNames(labelElement, label.labelNames) axisElement.append(labelElement) def _addLabelNames(self, parentElement, labelNames): for languageCode, labelName in sorted(labelNames.items()): languageElement = ET.Element("labelname") languageElement.attrib[XML_LANG] = languageCode languageElement.text = labelName parentElement.append(languageElement) def _addLocationLabel( self, parentElement: ET.Element, label: LocationLabelDescriptor ) -> None: labelElement = ET.Element("label") labelElement.attrib["name"] = label.name if label.elidable: labelElement.attrib["elidable"] = "true" if label.olderSibling: labelElement.attrib["oldersibling"] = "true" self._addLabelNames(labelElement, label.labelNames) self._addLocationElement(labelElement, userLocation=label.userLocation) parentElement.append(labelElement) def _addLocationElement( self, parentElement, *, designLocation: AnisotropicLocationDict = None, userLocation: SimpleLocationDict = None, ): locElement = ET.Element("location") for axis in self.documentObject.axes: if designLocation is not None and axis.name in designLocation: dimElement = ET.Element("dimension") dimElement.attrib["name"] = axis.name value = designLocation[axis.name] if isinstance(value, tuple): dimElement.attrib["xvalue"] = self.intOrFloat(value[0]) dimElement.attrib["yvalue"] = self.intOrFloat(value[1]) else: dimElement.attrib["xvalue"] = self.intOrFloat(value) locElement.append(dimElement) elif userLocation is not None and axis.name in userLocation: dimElement = ET.Element("dimension") dimElement.attrib["name"] = axis.name value = userLocation[axis.name] dimElement.attrib["uservalue"] = self.intOrFloat(value) locElement.append(dimElement) if len(locElement) > 0: parentElement.append(locElement) def _addInstance(self, instanceObject): instanceElement = ET.Element("instance") if instanceObject.name is not None: instanceElement.attrib["name"] = instanceObject.name if instanceObject.locationLabel is not None: instanceElement.attrib["location"] = instanceObject.locationLabel if instanceObject.familyName is not None: instanceElement.attrib["familyname"] = instanceObject.familyName if instanceObject.styleName is not None: instanceElement.attrib["stylename"] = instanceObject.styleName # add localisations if instanceObject.localisedStyleName: languageCodes = list(instanceObject.localisedStyleName.keys()) languageCodes.sort() for code in languageCodes: if code == "en": continue # already stored in the element attribute localisedStyleNameElement = ET.Element("stylename") localisedStyleNameElement.attrib[XML_LANG] = code localisedStyleNameElement.text = instanceObject.getStyleName(code) instanceElement.append(localisedStyleNameElement) if instanceObject.localisedFamilyName: languageCodes = list(instanceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code == "en": continue # already stored in the element attribute localisedFamilyNameElement = ET.Element("familyname") localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = instanceObject.getFamilyName(code) instanceElement.append(localisedFamilyNameElement) if instanceObject.localisedStyleMapStyleName: languageCodes = list(instanceObject.localisedStyleMapStyleName.keys()) languageCodes.sort() for code in languageCodes: if code == "en": continue localisedStyleMapStyleNameElement = ET.Element("stylemapstylename") localisedStyleMapStyleNameElement.attrib[XML_LANG] = code localisedStyleMapStyleNameElement.text = ( instanceObject.getStyleMapStyleName(code) ) instanceElement.append(localisedStyleMapStyleNameElement) if instanceObject.localisedStyleMapFamilyName: languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code == "en": continue localisedStyleMapFamilyNameElement = ET.Element("stylemapfamilyname") localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code localisedStyleMapFamilyNameElement.text = ( instanceObject.getStyleMapFamilyName(code) ) instanceElement.append(localisedStyleMapFamilyNameElement) if self.effectiveFormatTuple >= (5, 0): if instanceObject.locationLabel is None: self._addLocationElement( instanceElement, designLocation=instanceObject.designLocation, userLocation=instanceObject.userLocation, ) else: # Pre-version 5.0 code was validating and filling in the location # dict while writing it out, as preserved below. if instanceObject.location is not None: locationElement, instanceObject.location = self._makeLocationElement( instanceObject.location ) instanceElement.append(locationElement) if instanceObject.filename is not None: instanceElement.attrib["filename"] = instanceObject.filename if instanceObject.postScriptFontName is not None: instanceElement.attrib["postscriptfontname"] = ( instanceObject.postScriptFontName ) if instanceObject.styleMapFamilyName is not None: instanceElement.attrib["stylemapfamilyname"] = ( instanceObject.styleMapFamilyName ) if instanceObject.styleMapStyleName is not None: instanceElement.attrib["stylemapstylename"] = ( instanceObject.styleMapStyleName ) if self.effectiveFormatTuple < (5, 0): # Deprecated members as of version 5.0 if instanceObject.glyphs: if instanceElement.findall(".glyphs") == []: glyphsElement = ET.Element("glyphs") instanceElement.append(glyphsElement) glyphsElement = instanceElement.findall(".glyphs")[0] for glyphName, data in sorted(instanceObject.glyphs.items()): glyphElement = self._writeGlyphElement( instanceElement, instanceObject, glyphName, data ) glyphsElement.append(glyphElement) if instanceObject.kerning: kerningElement = ET.Element("kerning") instanceElement.append(kerningElement) if instanceObject.info: infoElement = ET.Element("info") instanceElement.append(infoElement) self._addLib(instanceElement, instanceObject.lib, 4) self.root.findall(".instances")[0].append(instanceElement) def _addSource(self, sourceObject): sourceElement = ET.Element("source") if sourceObject.filename is not None: sourceElement.attrib["filename"] = sourceObject.filename if sourceObject.name is not None: if sourceObject.name.find("temp_master") != 0: # do not save temporary source names sourceElement.attrib["name"] = sourceObject.name if sourceObject.familyName is not None: sourceElement.attrib["familyname"] = sourceObject.familyName if sourceObject.styleName is not None: sourceElement.attrib["stylename"] = sourceObject.styleName if sourceObject.layerName is not None: sourceElement.attrib["layer"] = sourceObject.layerName if sourceObject.localisedFamilyName: languageCodes = list(sourceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code == "en": continue # already stored in the element attribute localisedFamilyNameElement = ET.Element("familyname") localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = sourceObject.getFamilyName(code) sourceElement.append(localisedFamilyNameElement) if sourceObject.copyLib: libElement = ET.Element("lib") libElement.attrib["copy"] = "1" sourceElement.append(libElement) if sourceObject.copyGroups: groupsElement = ET.Element("groups") groupsElement.attrib["copy"] = "1" sourceElement.append(groupsElement) if sourceObject.copyFeatures: featuresElement = ET.Element("features") featuresElement.attrib["copy"] = "1" sourceElement.append(featuresElement) if sourceObject.copyInfo or sourceObject.muteInfo: infoElement = ET.Element("info") if sourceObject.copyInfo: infoElement.attrib["copy"] = "1" if sourceObject.muteInfo: infoElement.attrib["mute"] = "1" sourceElement.append(infoElement) if sourceObject.muteKerning: kerningElement = ET.Element("kerning") kerningElement.attrib["mute"] = "1" sourceElement.append(kerningElement) if sourceObject.mutedGlyphNames: for name in sourceObject.mutedGlyphNames: glyphElement = ET.Element("glyph") glyphElement.attrib["name"] = name glyphElement.attrib["mute"] = "1" sourceElement.append(glyphElement) if self.effectiveFormatTuple >= (5, 0): self._addLocationElement( sourceElement, designLocation=sourceObject.location ) else: # Pre-version 5.0 code was validating and filling in the location # dict while writing it out, as preserved below. locationElement, sourceObject.location = self._makeLocationElement( sourceObject.location ) sourceElement.append(locationElement) self.root.findall(".sources")[0].append(sourceElement) def _addVariableFont( self, parentElement: ET.Element, vf: VariableFontDescriptor ) -> None: vfElement = ET.Element("variable-font") vfElement.attrib["name"] = vf.name if vf.filename is not None: vfElement.attrib["filename"] = vf.filename if vf.axisSubsets: subsetsElement = ET.Element("axis-subsets") for subset in vf.axisSubsets: subsetElement = ET.Element("axis-subset") subsetElement.attrib["name"] = subset.name # Mypy doesn't support narrowing union types via hasattr() # https://mypy.readthedocs.io/en/stable/type_narrowing.html # TODO(Python 3.10): use TypeGuard if hasattr(subset, "userMinimum"): subset = cast(RangeAxisSubsetDescriptor, subset) if subset.userMinimum != -math.inf: subsetElement.attrib["userminimum"] = self.intOrFloat( subset.userMinimum ) if subset.userMaximum != math.inf: subsetElement.attrib["usermaximum"] = self.intOrFloat( subset.userMaximum ) if subset.userDefault is not None: subsetElement.attrib["userdefault"] = self.intOrFloat( subset.userDefault ) elif hasattr(subset, "userValue"): subset = cast(ValueAxisSubsetDescriptor, subset) subsetElement.attrib["uservalue"] = self.intOrFloat( subset.userValue ) subsetsElement.append(subsetElement) vfElement.append(subsetsElement) self._addLib(vfElement, vf.lib, 4) parentElement.append(vfElement) def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int) -> None: if not data: return libElement = ET.Element("lib") libElement.append(plistlib.totree(data, indent_level=indent_level)) parentElement.append(libElement) def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data): glyphElement = ET.Element("glyph") if data.get("mute"): glyphElement.attrib["mute"] = "1" if data.get("unicodes") is not None: glyphElement.attrib["unicode"] = " ".join( [hex(u) for u in data.get("unicodes")] ) if data.get("instanceLocation") is not None: locationElement, data["instanceLocation"] = self._makeLocationElement( data.get("instanceLocation") ) glyphElement.append(locationElement) if glyphName is not None: glyphElement.attrib["name"] = glyphName if data.get("note") is not None: noteElement = ET.Element("note") noteElement.text = data.get("note") glyphElement.append(noteElement) if data.get("masters") is not None: mastersElement = ET.Element("masters") for m in data.get("masters"): masterElement = ET.Element("master") if m.get("glyphName") is not None: masterElement.attrib["glyphname"] = m.get("glyphName") if m.get("font") is not None: masterElement.attrib["source"] = m.get("font") if m.get("location") is not None: locationElement, m["location"] = self._makeLocationElement( m.get("location") ) masterElement.append(locationElement) mastersElement.append(masterElement) glyphElement.append(mastersElement) return glyphElement class BaseDocReader(LogMixin): axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor axisMappingDescriptorClass = AxisMappingDescriptor locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontsDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor def __init__(self, documentPath, documentObject): self.path = documentPath self.documentObject = documentObject tree = ET.parse(self.path) self.root = tree.getroot() self.documentObject.formatVersion = self.root.attrib.get("format", "3.0") self._axes = [] self.rules = [] self.sources = [] self.instances = [] self.axisDefaults = {} self._strictAxisNames = True @classmethod def fromstring(cls, string, documentObject): f = BytesIO(tobytes(string, encoding="utf-8")) self = cls(f, documentObject) self.path = None return self def read(self): self.readAxes() self.readLabels() self.readRules() self.readVariableFonts() self.readSources() self.readInstances() self.readLib() def readRules(self): # we also need to read any conditions that are outside of a condition set. rules = [] rulesElement = self.root.find(".rules") if rulesElement is not None: processingValue = rulesElement.attrib.get("processing", "first") if processingValue not in {"first", "last"}: raise DesignSpaceDocumentError( "<rules> processing attribute value is not valid: %r, " "expected 'first' or 'last'" % processingValue ) self.documentObject.rulesProcessingLast = processingValue == "last" for ruleElement in self.root.findall(".rules/rule"): ruleObject = self.ruleDescriptorClass() ruleName = ruleObject.name = ruleElement.attrib.get("name") # read any stray conditions outside a condition set externalConditions = self._readConditionElements( ruleElement, ruleName, ) if externalConditions: ruleObject.conditionSets.append(externalConditions) self.log.info( "Found stray rule conditions outside a conditionset. " "Wrapped them in a new conditionset." ) # read the conditionsets for conditionSetElement in ruleElement.findall(".conditionset"): conditionSet = self._readConditionElements( conditionSetElement, ruleName, ) if conditionSet is not None: ruleObject.conditionSets.append(conditionSet) for subElement in ruleElement.findall(".sub"): a = subElement.attrib["name"] b = subElement.attrib["with"] ruleObject.subs.append((a, b)) rules.append(ruleObject) self.documentObject.rules = rules def _readConditionElements(self, parentElement, ruleName=None): cds = [] for conditionElement in parentElement.findall(".condition"): cd = {} cdMin = conditionElement.attrib.get("minimum") if cdMin is not None: cd["minimum"] = float(cdMin) else: # will allow these to be None, assume axis.minimum cd["minimum"] = None cdMax = conditionElement.attrib.get("maximum") if cdMax is not None: cd["maximum"] = float(cdMax) else: # will allow these to be None, assume axis.maximum cd["maximum"] = None cd["name"] = conditionElement.attrib.get("name") # # test for things if cd.get("minimum") is None and cd.get("maximum") is None: raise DesignSpaceDocumentError( "condition missing required minimum or maximum in rule" + (" '%s'" % ruleName if ruleName is not None else "") ) cds.append(cd) return cds def readAxes(self): # read the axes elements, including the warp map. axesElement = self.root.find(".axes") if axesElement is not None and "elidedfallbackname" in axesElement.attrib: self.documentObject.elidedFallbackName = axesElement.attrib[ "elidedfallbackname" ] axisElements = self.root.findall(".axes/axis") if not axisElements: return for axisElement in axisElements: if ( self.documentObject.formatTuple >= (5, 0) and "values" in axisElement.attrib ): axisObject = self.discreteAxisDescriptorClass() axisObject.values = [ float(s) for s in axisElement.attrib["values"].split(" ") ] else: axisObject = self.axisDescriptorClass() axisObject.minimum = float(axisElement.attrib.get("minimum")) axisObject.maximum = float(axisElement.attrib.get("maximum")) axisObject.default = float(axisElement.attrib.get("default")) axisObject.name = axisElement.attrib.get("name") if axisElement.attrib.get("hidden", False): axisObject.hidden = True axisObject.tag = axisElement.attrib.get("tag") for mapElement in axisElement.findall("map"): a = float(mapElement.attrib["input"]) b = float(mapElement.attrib["output"]) axisObject.map.append((a, b)) for labelNameElement in axisElement.findall("labelname"): # Note: elementtree reads the "xml:lang" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' for key, lang in labelNameElement.items(): if key == XML_LANG: axisObject.labelNames[lang] = tostr(labelNameElement.text) labelElement = axisElement.find(".labels") if labelElement is not None: if "ordering" in labelElement.attrib: axisObject.axisOrdering = int(labelElement.attrib["ordering"]) for label in labelElement.findall(".label"): axisObject.axisLabels.append(self.readAxisLabel(label)) self.documentObject.axes.append(axisObject) self.axisDefaults[axisObject.name] = axisObject.default self.documentObject.axisMappings = [] for mappingsElement in self.root.findall(".axes/mappings"): groupDescription = mappingsElement.attrib.get("description") for mappingElement in mappingsElement.findall("mapping"): description = mappingElement.attrib.get("description") inputElement = mappingElement.find("input") outputElement = mappingElement.find("output") inputLoc = {} outputLoc = {} for dimElement in inputElement.findall(".dimension"): name = dimElement.attrib["name"] value = float(dimElement.attrib["xvalue"]) inputLoc[name] = value for dimElement in outputElement.findall(".dimension"): name = dimElement.attrib["name"] value = float(dimElement.attrib["xvalue"]) outputLoc[name] = value axisMappingObject = self.axisMappingDescriptorClass( inputLocation=inputLoc, outputLocation=outputLoc, description=description, groupDescription=groupDescription, ) self.documentObject.axisMappings.append(axisMappingObject) def readAxisLabel(self, element: ET.Element): xml_attrs = { "userminimum", "uservalue", "usermaximum", "name", "elidable", "oldersibling", "linkeduservalue", } unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError( f"label element contains unknown attributes: {', '.join(unknown_attrs)}" ) name = element.get("name") if name is None: raise DesignSpaceDocumentError("label element must have a name attribute.") valueStr = element.get("uservalue") if valueStr is None: raise DesignSpaceDocumentError( "label element must have a uservalue attribute." ) value = float(valueStr) minimumStr = element.get("userminimum") minimum = float(minimumStr) if minimumStr is not None else None maximumStr = element.get("usermaximum") maximum = float(maximumStr) if maximumStr is not None else None linkedValueStr = element.get("linkeduservalue") linkedValue = float(linkedValueStr) if linkedValueStr is not None else None elidable = True if element.get("elidable") == "true" else False olderSibling = True if element.get("oldersibling") == "true" else False labelNames = { lang: label_name.text or "" for label_name in element.findall("labelname") for attr, lang in label_name.items() if attr == XML_LANG # Note: elementtree reads the "xml:lang" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' } return self.axisLabelDescriptorClass( name=name, userValue=value, userMinimum=minimum, userMaximum=maximum, elidable=elidable, olderSibling=olderSibling, linkedUserValue=linkedValue, labelNames=labelNames, ) def readLabels(self): if self.documentObject.formatTuple < (5, 0): return xml_attrs = {"name", "elidable", "oldersibling"} for labelElement in self.root.findall(".labels/label"): unknown_attrs = set(labelElement.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError( f"Label element contains unknown attributes: {', '.join(unknown_attrs)}" ) name = labelElement.get("name") if name is None: raise DesignSpaceDocumentError( "label element must have a name attribute." ) designLocation, userLocation = self.locationFromElement(labelElement) if designLocation: raise DesignSpaceDocumentError( f'<label> element "{name}" must only have user locations (using uservalue="").' ) elidable = True if labelElement.get("elidable") == "true" else False olderSibling = True if labelElement.get("oldersibling") == "true" else False labelNames = { lang: label_name.text or "" for label_name in labelElement.findall("labelname") for attr, lang in label_name.items() if attr == XML_LANG # Note: elementtree reads the "xml:lang" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' } locationLabel = self.locationLabelDescriptorClass( name=name, userLocation=userLocation, elidable=elidable, olderSibling=olderSibling, labelNames=labelNames, ) self.documentObject.locationLabels.append(locationLabel) def readVariableFonts(self): if self.documentObject.formatTuple < (5, 0): return xml_attrs = {"name", "filename"} for variableFontElement in self.root.findall(".variable-fonts/variable-font"): unknown_attrs = set(variableFontElement.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError( f"variable-font element contains unknown attributes: {', '.join(unknown_attrs)}" ) name = variableFontElement.get("name") if name is None: raise DesignSpaceDocumentError( "variable-font element must have a name attribute." ) filename = variableFontElement.get("filename") axisSubsetsElement = variableFontElement.find(".axis-subsets") if axisSubsetsElement is None: raise DesignSpaceDocumentError( "variable-font element must contain an axis-subsets element." ) axisSubsets = [] for axisSubset in axisSubsetsElement.iterfind(".axis-subset"): axisSubsets.append(self.readAxisSubset(axisSubset)) lib = None libElement = variableFontElement.find(".lib") if libElement is not None: lib = plistlib.fromtree(libElement[0]) variableFont = self.variableFontsDescriptorClass( name=name, filename=filename, axisSubsets=axisSubsets, lib=lib, ) self.documentObject.variableFonts.append(variableFont) def readAxisSubset(self, element: ET.Element): if "uservalue" in element.attrib: xml_attrs = {"name", "uservalue"} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError( f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}" ) name = element.get("name") if name is None: raise DesignSpaceDocumentError( "axis-subset element must have a name attribute." ) userValueStr = element.get("uservalue") if userValueStr is None: raise DesignSpaceDocumentError( "The axis-subset element for a discrete subset must have a uservalue attribute." ) userValue = float(userValueStr) return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue) else: xml_attrs = {"name", "userminimum", "userdefault", "usermaximum"} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError( f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}" ) name = element.get("name") if name is None: raise DesignSpaceDocumentError( "axis-subset element must have a name attribute." ) userMinimum = element.get("userminimum") userDefault = element.get("userdefault") userMaximum = element.get("usermaximum") if ( userMinimum is not None and userDefault is not None and userMaximum is not None ): return self.rangeAxisSubsetDescriptorClass( name=name, userMinimum=float(userMinimum), userDefault=float(userDefault), userMaximum=float(userMaximum), ) if all(v is None for v in (userMinimum, userDefault, userMaximum)): return self.rangeAxisSubsetDescriptorClass(name=name) raise DesignSpaceDocumentError( "axis-subset element must have min/max/default values or none at all." ) def readSources(self): for sourceCount, sourceElement in enumerate( self.root.findall(".sources/source") ): filename = sourceElement.attrib.get("filename") if filename is not None and self.path is not None: sourcePath = os.path.abspath( os.path.join(os.path.dirname(self.path), filename) ) else: sourcePath = None sourceName = sourceElement.attrib.get("name") if sourceName is None: # add a temporary source name sourceName = "temp_master.%d" % (sourceCount) sourceObject = self.sourceDescriptorClass() sourceObject.path = sourcePath # absolute path to the ufo source sourceObject.filename = filename # path as it is stored in the document sourceObject.name = sourceName familyName = sourceElement.attrib.get("familyname") if familyName is not None: sourceObject.familyName = familyName styleName = sourceElement.attrib.get("stylename") if styleName is not None: sourceObject.styleName = styleName for familyNameElement in sourceElement.findall("familyname"): for key, lang in familyNameElement.items(): if key == XML_LANG: familyName = familyNameElement.text sourceObject.setFamilyName(familyName, lang) designLocation, userLocation = self.locationFromElement(sourceElement) if userLocation: raise DesignSpaceDocumentError( f'<source> element "{sourceName}" must only have design locations (using xvalue="").' ) sourceObject.location = designLocation layerName = sourceElement.attrib.get("layer") if layerName is not None: sourceObject.layerName = layerName for libElement in sourceElement.findall(".lib"): if libElement.attrib.get("copy") == "1": sourceObject.copyLib = True for groupsElement in sourceElement.findall(".groups"): if groupsElement.attrib.get("copy") == "1": sourceObject.copyGroups = True for infoElement in sourceElement.findall(".info"): if infoElement.attrib.get("copy") == "1": sourceObject.copyInfo = True if infoElement.attrib.get("mute") == "1": sourceObject.muteInfo = True for featuresElement in sourceElement.findall(".features"): if featuresElement.attrib.get("copy") == "1": sourceObject.copyFeatures = True for glyphElement in sourceElement.findall(".glyph"): glyphName = glyphElement.attrib.get("name") if glyphName is None: continue if glyphElement.attrib.get("mute") == "1": sourceObject.mutedGlyphNames.append(glyphName) for kerningElement in sourceElement.findall(".kerning"): if kerningElement.attrib.get("mute") == "1": sourceObject.muteKerning = True self.documentObject.sources.append(sourceObject) def locationFromElement(self, element): """Read a nested ``<location>`` element inside the given ``element``. .. versionchanged:: 5.0 Return a tuple of (designLocation, userLocation) """ elementLocation = (None, None) for locationElement in element.findall(".location"): elementLocation = self.readLocationElement(locationElement) break return elementLocation def readLocationElement(self, locationElement): """Read a ``<location>`` element. .. versionchanged:: 5.0 Return a tuple of (designLocation, userLocation) """ if self._strictAxisNames and not self.documentObject.axes: raise DesignSpaceDocumentError("No axes defined") userLoc = {} designLoc = {} for dimensionElement in locationElement.findall(".dimension"): dimName = dimensionElement.attrib.get("name") if self._strictAxisNames and dimName not in self.axisDefaults: # In case the document contains no axis definitions, self.log.warning('Location with undefined axis: "%s".', dimName) continue userValue = xValue = yValue = None try: userValue = dimensionElement.attrib.get("uservalue") if userValue is not None: userValue = float(userValue) except ValueError: self.log.warning( "ValueError in readLocation userValue %3.3f", userValue ) try: xValue = dimensionElement.attrib.get("xvalue") if xValue is not None: xValue = float(xValue) except ValueError: self.log.warning("ValueError in readLocation xValue %3.3f", xValue) try: yValue = dimensionElement.attrib.get("yvalue") if yValue is not None: yValue = float(yValue) except ValueError: self.log.warning("ValueError in readLocation yValue %3.3f", yValue) if userValue is None == xValue is None: raise DesignSpaceDocumentError( f'Exactly one of uservalue="" or xvalue="" must be provided for location dimension "{dimName}"' ) if yValue is not None: if xValue is None: raise DesignSpaceDocumentError( f'Missing xvalue="" for the location dimension "{dimName}"" with yvalue="{yValue}"' ) designLoc[dimName] = (xValue, yValue) elif xValue is not None: designLoc[dimName] = xValue else: userLoc[dimName] = userValue return designLoc, userLoc def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True): instanceElements = self.root.findall(".instances/instance") for instanceElement in instanceElements: self._readSingleInstanceElement( instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo, ) def _readSingleInstanceElement( self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True ): filename = instanceElement.attrib.get("filename") if filename is not None and self.documentObject.path is not None: instancePath = os.path.join( os.path.dirname(self.documentObject.path), filename ) else: instancePath = None instanceObject = self.instanceDescriptorClass() instanceObject.path = instancePath # absolute path to the instance instanceObject.filename = filename # path as it is stored in the document name = instanceElement.attrib.get("name") if name is not None: instanceObject.name = name familyname = instanceElement.attrib.get("familyname") if familyname is not None: instanceObject.familyName = familyname stylename = instanceElement.attrib.get("stylename") if stylename is not None: instanceObject.styleName = stylename postScriptFontName = instanceElement.attrib.get("postscriptfontname") if postScriptFontName is not None: instanceObject.postScriptFontName = postScriptFontName styleMapFamilyName = instanceElement.attrib.get("stylemapfamilyname") if styleMapFamilyName is not None: instanceObject.styleMapFamilyName = styleMapFamilyName styleMapStyleName = instanceElement.attrib.get("stylemapstylename") if styleMapStyleName is not None: instanceObject.styleMapStyleName = styleMapStyleName # read localised names for styleNameElement in instanceElement.findall("stylename"): for key, lang in styleNameElement.items(): if key == XML_LANG: styleName = styleNameElement.text instanceObject.setStyleName(styleName, lang) for familyNameElement in instanceElement.findall("familyname"): for key, lang in familyNameElement.items(): if key == XML_LANG: familyName = familyNameElement.text instanceObject.setFamilyName(familyName, lang) for styleMapStyleNameElement in instanceElement.findall("stylemapstylename"): for key, lang in styleMapStyleNameElement.items(): if key == XML_LANG: styleMapStyleName = styleMapStyleNameElement.text instanceObject.setStyleMapStyleName(styleMapStyleName, lang) for styleMapFamilyNameElement in instanceElement.findall("stylemapfamilyname"): for key, lang in styleMapFamilyNameElement.items(): if key == XML_LANG: styleMapFamilyName = styleMapFamilyNameElement.text instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang) designLocation, userLocation = self.locationFromElement(instanceElement) locationLabel = instanceElement.attrib.get("location") if (designLocation or userLocation) and locationLabel is not None: raise DesignSpaceDocumentError( 'instance element must have at most one of the location="..." attribute or the nested location element' ) instanceObject.locationLabel = locationLabel instanceObject.userLocation = userLocation or {} instanceObject.designLocation = designLocation or {} for glyphElement in instanceElement.findall(".glyphs/glyph"): self.readGlyphElement(glyphElement, instanceObject) for infoElement in instanceElement.findall("info"): self.readInfoElement(infoElement, instanceObject) for libElement in instanceElement.findall("lib"): self.readLibElement(libElement, instanceObject) self.documentObject.instances.append(instanceObject) def readLibElement(self, libElement, instanceObject): """Read the lib element for the given instance.""" instanceObject.lib = plistlib.fromtree(libElement[0]) def readInfoElement(self, infoElement, instanceObject): """Read the info element.""" instanceObject.info = True def readGlyphElement(self, glyphElement, instanceObject): """ Read the glyph element, which could look like either one of these: .. code-block:: xml <glyph name="b" unicode="0x62"/> <glyph name="b"/> <glyph name="b"> <master location="location-token-bbb" source="master-token-aaa2"/> <master glyphname="b.alt1" location="location-token-ccc" source="master-token-aaa3"/> <note> This is an instance from an anisotropic interpolation. </note> </glyph> """ glyphData = {} glyphName = glyphElement.attrib.get("name") if glyphName is None: raise DesignSpaceDocumentError("Glyph object without name attribute") mute = glyphElement.attrib.get("mute") if mute == "1": glyphData["mute"] = True # unicode unicodes = glyphElement.attrib.get("unicode") if unicodes is not None: try: unicodes = [int(u, 16) for u in unicodes.split(" ")] glyphData["unicodes"] = unicodes except ValueError: raise DesignSpaceDocumentError( "unicode values %s are not integers" % unicodes ) for noteElement in glyphElement.findall(".note"): glyphData["note"] = noteElement.text break designLocation, userLocation = self.locationFromElement(glyphElement) if userLocation: raise DesignSpaceDocumentError( f'<glyph> element "{glyphName}" must only have design locations (using xvalue="").' ) if designLocation is not None: glyphData["instanceLocation"] = designLocation glyphSources = None for masterElement in glyphElement.findall(".masters/master"): fontSourceName = masterElement.attrib.get("source") designLocation, userLocation = self.locationFromElement(masterElement) if userLocation: raise DesignSpaceDocumentError( f'<master> element "{fontSourceName}" must only have design locations (using xvalue="").' ) masterGlyphName = masterElement.attrib.get("glyphname") if masterGlyphName is None: # if we don't read a glyphname, use the one we have masterGlyphName = glyphName d = dict( font=fontSourceName, location=designLocation, glyphName=masterGlyphName ) if glyphSources is None: glyphSources = [] glyphSources.append(d) if glyphSources is not None: glyphData["masters"] = glyphSources instanceObject.glyphs[glyphName] = glyphData def readLib(self): """Read the lib element for the whole document.""" for libElement in self.root.findall(".lib"): self.documentObject.lib = plistlib.fromtree(libElement[0]) class DesignSpaceDocument(LogMixin, AsDictMixin): """The DesignSpaceDocument object can read and write ``.designspace`` data. It imports the axes, sources, variable fonts and instances to very basic **descriptor** objects that store the data in attributes. Data is added to the document by creating such descriptor objects, filling them with data and then adding them to the document. This makes it easy to integrate this object in different contexts. The **DesignSpaceDocument** object can be subclassed to work with different objects, as long as they have the same attributes. Reader and Writer objects can be subclassed as well. **Note:** Python attribute names are usually camelCased, the corresponding `XML <document-xml-structure>`_ attributes are usually all lowercase. .. code:: python from fontTools.designspaceLib import DesignSpaceDocument doc = DesignSpaceDocument.fromfile("some/path/to/my.designspace") doc.formatVersion doc.elidedFallbackName doc.axes doc.axisMappings doc.locationLabels doc.rules doc.rulesProcessingLast doc.sources doc.variableFonts doc.instances doc.lib """ def __init__(self, readerClass=None, writerClass=None): self.path = None """String, optional. When the document is read from the disk, this is the full path that was given to :meth:`read` or :meth:`fromfile`. """ self.filename = None """String, optional. When the document is read from the disk, this is its original file name, i.e. the last part of its path. When the document is produced by a Python script and still only exists in memory, the producing script can write here an indication of a possible "good" filename, in case one wants to save the file somewhere. """ self.formatVersion: Optional[str] = None """Format version for this document, as a string. E.g. "4.0" """ self.elidedFallbackName: Optional[str] = None """STAT Style Attributes Header field ``elidedFallbackNameID``. See: `OTSpec STAT Style Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_ .. versionadded:: 5.0 """ self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = [] """List of this document's axes.""" self.axisMappings: List[AxisMappingDescriptor] = [] """List of this document's axis mappings.""" self.locationLabels: List[LocationLabelDescriptor] = [] """List of this document's STAT format 4 labels. .. versionadded:: 5.0""" self.rules: List[RuleDescriptor] = [] """List of this document's rules.""" self.rulesProcessingLast: bool = False """This flag indicates whether the substitution rules should be applied before or after other glyph substitution features. - False: before - True: after. Default is False. For new projects, you probably want True. See the following issues for more information: `fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__ `fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__ If you want to use a different feature altogether, e.g. ``calt``, use the lib key ``com.github.fonttools.varLib.featureVarsFeatureTag`` .. code:: xml <lib> <dict> <key>com.github.fonttools.varLib.featureVarsFeatureTag</key> <string>calt</string> </dict> </lib> """ self.sources: List[SourceDescriptor] = [] """List of this document's sources.""" self.variableFonts: List[VariableFontDescriptor] = [] """List of this document's variable fonts. .. versionadded:: 5.0""" self.instances: List[InstanceDescriptor] = [] """List of this document's instances.""" self.lib: Dict = {} """User defined, custom data associated with the whole document. Use reverse-DNS notation to identify your own data. Respect the data stored by others. """ self.default: Optional[str] = None """Name of the default master. This attribute is updated by the :meth:`findDefault` """ if readerClass is not None: self.readerClass = readerClass else: self.readerClass = BaseDocReader if writerClass is not None: self.writerClass = writerClass else: self.writerClass = BaseDocWriter @classmethod def fromfile(cls, path, readerClass=None, writerClass=None): """Read a designspace file from ``path`` and return a new instance of :class:. """ self = cls(readerClass=readerClass, writerClass=writerClass) self.read(path) return self @classmethod def fromstring(cls, string, readerClass=None, writerClass=None): self = cls(readerClass=readerClass, writerClass=writerClass) reader = self.readerClass.fromstring(string, self) reader.read() if self.sources: self.findDefault() return self def tostring(self, encoding=None): """Returns the designspace as a string. Default encoding ``utf-8``.""" if encoding is str or (encoding is not None and encoding.lower() == "unicode"): f = StringIO() xml_declaration = False elif encoding is None or encoding == "utf-8": f = BytesIO() encoding = "UTF-8" xml_declaration = True else: raise ValueError("unsupported encoding: '%s'" % encoding) writer = self.writerClass(f, self) writer.write(encoding=encoding, xml_declaration=xml_declaration) return f.getvalue() def read(self, path): """Read a designspace file from ``path`` and populates the fields of ``self`` with the data. """ if hasattr(path, "__fspath__"): # support os.PathLike objects path = path.__fspath__() self.path = path self.filename = os.path.basename(path) reader = self.readerClass(path, self) reader.read() if self.sources: self.findDefault() def write(self, path): """Write this designspace to ``path``.""" if hasattr(path, "__fspath__"): # support os.PathLike objects path = path.__fspath__() self.path = path self.filename = os.path.basename(path) self.updatePaths() writer = self.writerClass(path, self) writer.write() def _posixRelativePath(self, otherPath): relative = os.path.relpath(otherPath, os.path.dirname(self.path)) return posix(relative) def updatePaths(self): """ Right before we save we need to identify and respond to the following situations: In each descriptor, we have to do the right thing for the filename attribute. :: case 1. descriptor.filename == None descriptor.path == None -- action: write as is, descriptors will not have a filename attr. useless, but no reason to interfere. case 2. descriptor.filename == "../something" descriptor.path == None -- action: write as is. The filename attr should not be touched. case 3. descriptor.filename == None descriptor.path == "~/absolute/path/there" -- action: calculate the relative path for filename. We're not overwriting some other value for filename, it should be fine case 4. descriptor.filename == '../somewhere' descriptor.path == "~/absolute/path/there" -- action: there is a conflict between the given filename, and the path. So we know where the file is relative to the document. Can't guess why they're different, we just choose for path to be correct and update filename. """ assert self.path is not None for descriptor in self.sources + self.instances: if descriptor.path is not None: # case 3 and 4: filename gets updated and relativized descriptor.filename = self._posixRelativePath(descriptor.path) def addSource(self, sourceDescriptor: SourceDescriptor): """Add the given ``sourceDescriptor`` to ``doc.sources``.""" self.sources.append(sourceDescriptor) def addSourceDescriptor(self, **kwargs): """Instantiate a new :class:`SourceDescriptor` using the given ``kwargs`` and add it to ``doc.sources``. """ source = self.writerClass.sourceDescriptorClass(**kwargs) self.addSource(source) return source def addInstance(self, instanceDescriptor: InstanceDescriptor): """Add the given ``instanceDescriptor`` to :attr:`instances`.""" self.instances.append(instanceDescriptor) def addInstanceDescriptor(self, **kwargs): """Instantiate a new :class:`InstanceDescriptor` using the given ``kwargs`` and add it to :attr:`instances`. """ instance = self.writerClass.instanceDescriptorClass(**kwargs) self.addInstance(instance) return instance def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]): """Add the given ``axisDescriptor`` to :attr:`axes`.""" self.axes.append(axisDescriptor) def addAxisDescriptor(self, **kwargs): """Instantiate a new :class:`AxisDescriptor` using the given ``kwargs`` and add it to :attr:`axes`. The axis will be and instance of :class:`DiscreteAxisDescriptor` if the ``kwargs`` provide a ``value``, or a :class:`AxisDescriptor` otherwise. """ if "values" in kwargs: axis = self.writerClass.discreteAxisDescriptorClass(**kwargs) else: axis = self.writerClass.axisDescriptorClass(**kwargs) self.addAxis(axis) return axis def addAxisMapping(self, axisMappingDescriptor: AxisMappingDescriptor): """Add the given ``axisMappingDescriptor`` to :attr:`axisMappings`.""" self.axisMappings.append(axisMappingDescriptor) def addAxisMappingDescriptor(self, **kwargs): """Instantiate a new :class:`AxisMappingDescriptor` using the given ``kwargs`` and add it to :attr:`rules`. """ axisMapping = self.writerClass.axisMappingDescriptorClass(**kwargs) self.addAxisMapping(axisMapping) return axisMapping def addRule(self, ruleDescriptor: RuleDescriptor): """Add the given ``ruleDescriptor`` to :attr:`rules`.""" self.rules.append(ruleDescriptor) def addRuleDescriptor(self, **kwargs): """Instantiate a new :class:`RuleDescriptor` using the given ``kwargs`` and add it to :attr:`rules`. """ rule = self.writerClass.ruleDescriptorClass(**kwargs) self.addRule(rule) return rule def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor): """Add the given ``variableFontDescriptor`` to :attr:`variableFonts`. .. versionadded:: 5.0 """ self.variableFonts.append(variableFontDescriptor) def addVariableFontDescriptor(self, **kwargs): """Instantiate a new :class:`VariableFontDescriptor` using the given ``kwargs`` and add it to :attr:`variableFonts`. .. versionadded:: 5.0 """ variableFont = self.writerClass.variableFontDescriptorClass(**kwargs) self.addVariableFont(variableFont) return variableFont def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor): """Add the given ``locationLabelDescriptor`` to :attr:`locationLabels`. .. versionadded:: 5.0 """ self.locationLabels.append(locationLabelDescriptor) def addLocationLabelDescriptor(self, **kwargs): """Instantiate a new :class:`LocationLabelDescriptor` using the given ``kwargs`` and add it to :attr:`locationLabels`. .. versionadded:: 5.0 """ locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs) self.addLocationLabel(locationLabel) return locationLabel def newDefaultLocation(self): """Return a dict with the default location in design space coordinates.""" # Without OrderedDict, output XML would be non-deterministic. # https://github.com/LettError/designSpaceDocument/issues/10 loc = collections.OrderedDict() for axisDescriptor in self.axes: loc[axisDescriptor.name] = axisDescriptor.map_forward( axisDescriptor.default ) return loc def labelForUserLocation( self, userLocation: SimpleLocationDict ) -> Optional[LocationLabelDescriptor]: """Return the :class:`LocationLabel` that matches the given ``userLocation``, or ``None`` if no such label exists. .. versionadded:: 5.0 """ return next( ( label for label in self.locationLabels if label.userLocation == userLocation ), None, ) def updateFilenameFromPath(self, masters=True, instances=True, force=False): """Set a descriptor filename attr from the path and this document path. If the filename attribute is not None: skip it. """ if masters: for descriptor in self.sources: if descriptor.filename is not None and not force: continue if self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) if instances: for descriptor in self.instances: if descriptor.filename is not None and not force: continue if self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) def newAxisDescriptor(self): """Ask the writer class to make us a new axisDescriptor.""" return self.writerClass.getAxisDecriptor() def newSourceDescriptor(self): """Ask the writer class to make us a new sourceDescriptor.""" return self.writerClass.getSourceDescriptor() def newInstanceDescriptor(self): """Ask the writer class to make us a new instanceDescriptor.""" return self.writerClass.getInstanceDescriptor() def getAxisOrder(self): """Return a list of axis names, in the same order as defined in the document.""" names = [] for axisDescriptor in self.axes: names.append(axisDescriptor.name) return names def getAxis(self, name: str) -> AxisDescriptor | DiscreteAxisDescriptor | None: """Return the axis with the given ``name``, or ``None`` if no such axis exists.""" return next((axis for axis in self.axes if axis.name == name), None) def getAxisByTag(self, tag: str) -> AxisDescriptor | DiscreteAxisDescriptor | None: """Return the axis with the given ``tag``, or ``None`` if no such axis exists.""" return next((axis for axis in self.axes if axis.tag == tag), None) def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]: """Return the top-level location label with the given ``name``, or ``None`` if no such label exists. .. versionadded:: 5.0 """ for label in self.locationLabels: if label.name == name: return label return None def map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict: """Map a user location to a design location. Assume that missing coordinates are at the default location for that axis. Note: the output won't be anisotropic, only the xvalue is set. .. versionadded:: 5.0 """ return { axis.name: axis.map_forward(userLocation.get(axis.name, axis.default)) for axis in self.axes } def map_backward( self, designLocation: AnisotropicLocationDict ) -> SimpleLocationDict: """Map a design location to a user location. Assume that missing coordinates are at the default location for that axis. When the input has anisotropic locations, only the xvalue is used. .. versionadded:: 5.0 """ return { axis.name: ( axis.map_backward(designLocation[axis.name]) if axis.name in designLocation else axis.default ) for axis in self.axes } def findDefault(self): """Set and return SourceDescriptor at the default location or None. The default location is the set of all `default` values in user space of all axes. This function updates the document's :attr:`default` value. .. versionchanged:: 5.0 Allow the default source to not specify some of the axis values, and they are assumed to be the default. See :meth:`SourceDescriptor.getFullDesignLocation()` """ self.default = None # Convert the default location from user space to design space before comparing # it against the SourceDescriptor locations (always in design space). defaultDesignLocation = self.newDefaultLocation() for sourceDescriptor in self.sources: if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation: self.default = sourceDescriptor return sourceDescriptor return None def normalizeLocation(self, location): """Return a dict with normalized axis values.""" from fontTools.varLib.models import normalizeValue new = {} for axis in self.axes: if axis.name not in location: # skipping this dimension it seems continue value = location[axis.name] # 'anisotropic' location, take first coord only if isinstance(value, tuple): value = value[0] triple = [ axis.map_forward(v) for v in (axis.minimum, axis.default, axis.maximum) ] new[axis.name] = normalizeValue(value, triple) return new def normalize(self): """ Normalise the geometry of this designspace: - scale all the locations of all masters and instances to the -1 - 0 - 1 value. - we need the axis data to do the scaling, so we do those last. """ # masters for item in self.sources: item.location = self.normalizeLocation(item.location) # instances for item in self.instances: # glyph masters for this instance for _, glyphData in item.glyphs.items(): glyphData["instanceLocation"] = self.normalizeLocation( glyphData["instanceLocation"] ) for glyphMaster in glyphData["masters"]: glyphMaster["location"] = self.normalizeLocation( glyphMaster["location"] ) item.location = self.normalizeLocation(item.location) # the axes for axis in self.axes: # scale the map first newMap = [] for inputValue, outputValue in axis.map: newOutputValue = self.normalizeLocation({axis.name: outputValue}).get( axis.name ) newMap.append((inputValue, newOutputValue)) if newMap: axis.map = newMap # finally the axis values minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name) maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name) default = self.normalizeLocation({axis.name: axis.default}).get(axis.name) # and set them in the axis.minimum axis.minimum = minimum axis.maximum = maximum axis.default = default # now the rules for rule in self.rules: newConditionSets = [] for conditions in rule.conditionSets: newConditions = [] for cond in conditions: if cond.get("minimum") is not None: minimum = self.normalizeLocation( {cond["name"]: cond["minimum"]} ).get(cond["name"]) else: minimum = None if cond.get("maximum") is not None: maximum = self.normalizeLocation( {cond["name"]: cond["maximum"]} ).get(cond["name"]) else: maximum = None newConditions.append( dict(name=cond["name"], minimum=minimum, maximum=maximum) ) newConditionSets.append(newConditions) rule.conditionSets = newConditionSets def loadSourceFonts(self, opener, **kwargs): """Ensure SourceDescriptor.font attributes are loaded, and return list of fonts. Takes a callable which initializes a new font object (e.g. TTFont, or defcon.Font, etc.) from the SourceDescriptor.path, and sets the SourceDescriptor.font attribute. If the font attribute is already not None, it is not loaded again. Fonts with the same path are only loaded once and shared among SourceDescriptors. For example, to load UFO sources using defcon: designspace = DesignSpaceDocument.fromfile("path/to/my.designspace") designspace.loadSourceFonts(defcon.Font) Or to load masters as FontTools binary fonts, including extra options: designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False) Args: opener (Callable): takes one required positional argument, the source.path, and an optional list of keyword arguments, and returns a new font object loaded from the path. **kwargs: extra options passed on to the opener function. Returns: List of font objects in the order they appear in the sources list. """ # we load fonts with the same source.path only once loaded = {} fonts = [] for source in self.sources: if source.font is not None: # font already loaded fonts.append(source.font) continue if source.path in loaded: source.font = loaded[source.path] else: if source.path is None: raise DesignSpaceDocumentError( "Designspace source '%s' has no 'path' attribute" % (source.name or "<Unknown>") ) source.font = opener(source.path, **kwargs) loaded[source.path] = source.font fonts.append(source.font) return fonts @property def formatTuple(self): """Return the formatVersion as a tuple of (major, minor). .. versionadded:: 5.0 """ if self.formatVersion is None: return (5, 0) numbers = (int(i) for i in self.formatVersion.split(".")) major = next(numbers) minor = next(numbers, 0) return (major, minor) def getVariableFonts(self) -> List[VariableFontDescriptor]: """Return all variable fonts defined in this document, or implicit variable fonts that can be built from the document's continuous axes. In the case of Designspace documents before version 5, the whole document was implicitly describing a variable font that covers the whole space. In version 5 and above documents, there can be as many variable fonts as there are locations on discrete axes. .. seealso:: :func:`splitInterpolable` .. versionadded:: 5.0 """ if self.variableFonts: return self.variableFonts variableFonts = [] discreteAxes = [] rangeAxisSubsets: List[ Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor] ] = [] for axis in self.axes: if hasattr(axis, "values"): # Mypy doesn't support narrowing union types via hasattr() # TODO(Python 3.10): use TypeGuard # https://mypy.readthedocs.io/en/stable/type_narrowing.html axis = cast(DiscreteAxisDescriptor, axis) discreteAxes.append(axis) # type: ignore else: rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name)) valueCombinations = itertools.product(*[axis.values for axis in discreteAxes]) for values in valueCombinations: basename = None if self.filename is not None: basename = os.path.splitext(self.filename)[0] + "-VF" if self.path is not None: basename = os.path.splitext(os.path.basename(self.path))[0] + "-VF" if basename is None: basename = "VF" axisNames = "".join( [f"-{axis.tag}{value}" for axis, value in zip(discreteAxes, values)] ) variableFonts.append( VariableFontDescriptor( name=f"{basename}{axisNames}", axisSubsets=rangeAxisSubsets + [ ValueAxisSubsetDescriptor(name=axis.name, userValue=value) for axis, value in zip(discreteAxes, values) ], ) ) return variableFonts def deepcopyExceptFonts(self): """Allow deep-copying a DesignSpace document without deep-copying attached UFO fonts or TTFont objects. The :attr:`font` attribute is shared by reference between the original and the copy. .. versionadded:: 5.0 """ fonts = [source.font for source in self.sources] try: for source in self.sources: source.font = None res = copy.deepcopy(self) for source, font in zip(res.sources, fonts): source.font = font return res finally: for source, font in zip(self.sources, fonts): source.font = font def main(args=None): """Roundtrip .designspace file through the DesignSpaceDocument class""" if args is None: import sys args = sys.argv[1:] from argparse import ArgumentParser parser = ArgumentParser(prog="designspaceLib", description=main.__doc__) parser.add_argument("input") parser.add_argument("output") options = parser.parse_args(args) ds = DesignSpaceDocument.fromfile(options.input) ds.write(options.output) PKaZZZ�Iݑgg$fontTools/designspaceLib/__main__.pyimport sys from fontTools.designspaceLib import main if __name__ == "__main__": sys.exit(main()) PKaZZZP;u�'K'K!fontTools/designspaceLib/split.py"""Allows building all the variable fonts of a DesignSpace version 5 by splitting the document into interpolable sub-space, then into each VF. """ from __future__ import annotations import itertools import logging import math from typing import Any, Callable, Dict, Iterator, List, Tuple, cast from fontTools.designspaceLib import ( AxisDescriptor, AxisMappingDescriptor, DesignSpaceDocument, DiscreteAxisDescriptor, InstanceDescriptor, RuleDescriptor, SimpleLocationDict, SourceDescriptor, VariableFontDescriptor, ) from fontTools.designspaceLib.statNames import StatNames, getStatNames from fontTools.designspaceLib.types import ( ConditionSet, Range, Region, getVFUserRegion, locationInRegion, regionInRegion, userRegionToDesignRegion, ) LOGGER = logging.getLogger(__name__) MakeInstanceFilenameCallable = Callable[ [DesignSpaceDocument, InstanceDescriptor, StatNames], str ] def defaultMakeInstanceFilename( doc: DesignSpaceDocument, instance: InstanceDescriptor, statNames: StatNames ) -> str: """Default callable to synthesize an instance filename when makeNames=True, for instances that don't specify an instance name in the designspace. This part of the name generation can be overriden because it's not specified by the STAT table. """ familyName = instance.familyName or statNames.familyNames.get("en") styleName = instance.styleName or statNames.styleNames.get("en") return f"{familyName}-{styleName}.ttf" def splitInterpolable( doc: DesignSpaceDocument, makeNames: bool = True, expandLocations: bool = True, makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename, ) -> Iterator[Tuple[SimpleLocationDict, DesignSpaceDocument]]: """Split the given DS5 into several interpolable sub-designspaces. There are as many interpolable sub-spaces as there are combinations of discrete axis values. E.g. with axes: - italic (discrete) Upright or Italic - style (discrete) Sans or Serif - weight (continuous) 100 to 900 There are 4 sub-spaces in which the Weight axis should interpolate: (Upright, Sans), (Upright, Serif), (Italic, Sans) and (Italic, Serif). The sub-designspaces still include the full axis definitions and STAT data, but the rules, sources, variable fonts, instances are trimmed down to only keep what falls within the interpolable sub-space. Args: - ``makeNames``: Whether to compute the instance family and style names using the STAT data. - ``expandLocations``: Whether to turn all locations into "full" locations, including implicit default axis values where missing. - ``makeInstanceFilename``: Callable to synthesize an instance filename when makeNames=True, for instances that don't specify an instance name in the designspace. This part of the name generation can be overridden because it's not specified by the STAT table. .. versionadded:: 5.0 """ discreteAxes = [] interpolableUserRegion: Region = {} for axis in doc.axes: if hasattr(axis, "values"): # Mypy doesn't support narrowing union types via hasattr() # TODO(Python 3.10): use TypeGuard # https://mypy.readthedocs.io/en/stable/type_narrowing.html axis = cast(DiscreteAxisDescriptor, axis) discreteAxes.append(axis) else: axis = cast(AxisDescriptor, axis) interpolableUserRegion[axis.name] = Range( axis.minimum, axis.maximum, axis.default, ) valueCombinations = itertools.product(*[axis.values for axis in discreteAxes]) for values in valueCombinations: discreteUserLocation = { discreteAxis.name: value for discreteAxis, value in zip(discreteAxes, values) } subDoc = _extractSubSpace( doc, {**interpolableUserRegion, **discreteUserLocation}, keepVFs=True, makeNames=makeNames, expandLocations=expandLocations, makeInstanceFilename=makeInstanceFilename, ) yield discreteUserLocation, subDoc def splitVariableFonts( doc: DesignSpaceDocument, makeNames: bool = False, expandLocations: bool = False, makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename, ) -> Iterator[Tuple[str, DesignSpaceDocument]]: """Convert each variable font listed in this document into a standalone designspace. This can be used to compile all the variable fonts from a format 5 designspace using tools that can only deal with 1 VF at a time. Args: - ``makeNames``: Whether to compute the instance family and style names using the STAT data. - ``expandLocations``: Whether to turn all locations into "full" locations, including implicit default axis values where missing. - ``makeInstanceFilename``: Callable to synthesize an instance filename when makeNames=True, for instances that don't specify an instance name in the designspace. This part of the name generation can be overridden because it's not specified by the STAT table. .. versionadded:: 5.0 """ # Make one DesignspaceDoc v5 for each variable font for vf in doc.getVariableFonts(): vfUserRegion = getVFUserRegion(doc, vf) vfDoc = _extractSubSpace( doc, vfUserRegion, keepVFs=False, makeNames=makeNames, expandLocations=expandLocations, makeInstanceFilename=makeInstanceFilename, ) vfDoc.lib = {**vfDoc.lib, **vf.lib} yield vf.name, vfDoc def convert5to4( doc: DesignSpaceDocument, ) -> Dict[str, DesignSpaceDocument]: """Convert each variable font listed in this document into a standalone format 4 designspace. This can be used to compile all the variable fonts from a format 5 designspace using tools that only know about format 4. .. versionadded:: 5.0 """ vfs = {} for _location, subDoc in splitInterpolable(doc): for vfName, vfDoc in splitVariableFonts(subDoc): vfDoc.formatVersion = "4.1" vfs[vfName] = vfDoc return vfs def _extractSubSpace( doc: DesignSpaceDocument, userRegion: Region, *, keepVFs: bool, makeNames: bool, expandLocations: bool, makeInstanceFilename: MakeInstanceFilenameCallable, ) -> DesignSpaceDocument: subDoc = DesignSpaceDocument() # Don't include STAT info # FIXME: (Jany) let's think about it. Not include = OK because the point of # the splitting is to build VFs and we'll use the STAT data of the full # document to generate the STAT of the VFs, so "no need" to have STAT data # in sub-docs. Counterpoint: what if someone wants to split this DS for # other purposes? Maybe for that it would be useful to also subset the STAT # data? # subDoc.elidedFallbackName = doc.elidedFallbackName def maybeExpandDesignLocation(object): if expandLocations: return object.getFullDesignLocation(doc) else: return object.designLocation for axis in doc.axes: range = userRegion[axis.name] if isinstance(range, Range) and hasattr(axis, "minimum"): # Mypy doesn't support narrowing union types via hasattr() # TODO(Python 3.10): use TypeGuard # https://mypy.readthedocs.io/en/stable/type_narrowing.html axis = cast(AxisDescriptor, axis) subDoc.addAxis( AxisDescriptor( # Same info tag=axis.tag, name=axis.name, labelNames=axis.labelNames, hidden=axis.hidden, # Subset range minimum=max(range.minimum, axis.minimum), default=range.default or axis.default, maximum=min(range.maximum, axis.maximum), map=[ (user, design) for user, design in axis.map if range.minimum <= user <= range.maximum ], # Don't include STAT info axisOrdering=None, axisLabels=None, ) ) subDoc.axisMappings = mappings = [] subDocAxes = {axis.name for axis in subDoc.axes} for mapping in doc.axisMappings: if not all(axis in subDocAxes for axis in mapping.inputLocation.keys()): continue if not all(axis in subDocAxes for axis in mapping.outputLocation.keys()): LOGGER.error( "In axis mapping from input %s, some output axes are not in the variable-font: %s", mapping.inputLocation, mapping.outputLocation, ) continue mappingAxes = set() mappingAxes.update(mapping.inputLocation.keys()) mappingAxes.update(mapping.outputLocation.keys()) for axis in doc.axes: if axis.name not in mappingAxes: continue range = userRegion[axis.name] if ( range.minimum != axis.minimum or (range.default is not None and range.default != axis.default) or range.maximum != axis.maximum ): LOGGER.error( "Limiting axis ranges used in <mapping> elements not supported: %s", axis.name, ) continue mappings.append( AxisMappingDescriptor( inputLocation=mapping.inputLocation, outputLocation=mapping.outputLocation, ) ) # Don't include STAT info # subDoc.locationLabels = doc.locationLabels # Rules: subset them based on conditions designRegion = userRegionToDesignRegion(doc, userRegion) subDoc.rules = _subsetRulesBasedOnConditions(doc.rules, designRegion) subDoc.rulesProcessingLast = doc.rulesProcessingLast # Sources: keep only the ones that fall within the kept axis ranges for source in doc.sources: if not locationInRegion(doc.map_backward(source.designLocation), userRegion): continue subDoc.addSource( SourceDescriptor( filename=source.filename, path=source.path, font=source.font, name=source.name, designLocation=_filterLocation( userRegion, maybeExpandDesignLocation(source) ), layerName=source.layerName, familyName=source.familyName, styleName=source.styleName, muteKerning=source.muteKerning, muteInfo=source.muteInfo, mutedGlyphNames=source.mutedGlyphNames, ) ) # Copy family name translations from the old default source to the new default vfDefault = subDoc.findDefault() oldDefault = doc.findDefault() if vfDefault is not None and oldDefault is not None: vfDefault.localisedFamilyName = oldDefault.localisedFamilyName # Variable fonts: keep only the ones that fall within the kept axis ranges if keepVFs: # Note: call getVariableFont() to make the implicit VFs explicit for vf in doc.getVariableFonts(): vfUserRegion = getVFUserRegion(doc, vf) if regionInRegion(vfUserRegion, userRegion): subDoc.addVariableFont( VariableFontDescriptor( name=vf.name, filename=vf.filename, axisSubsets=[ axisSubset for axisSubset in vf.axisSubsets if isinstance(userRegion[axisSubset.name], Range) ], lib=vf.lib, ) ) # Instances: same as Sources + compute missing names for instance in doc.instances: if not locationInRegion(instance.getFullUserLocation(doc), userRegion): continue if makeNames: statNames = getStatNames(doc, instance.getFullUserLocation(doc)) familyName = instance.familyName or statNames.familyNames.get("en") styleName = instance.styleName or statNames.styleNames.get("en") subDoc.addInstance( InstanceDescriptor( filename=instance.filename or makeInstanceFilename(doc, instance, statNames), path=instance.path, font=instance.font, name=instance.name or f"{familyName} {styleName}", userLocation={} if expandLocations else instance.userLocation, designLocation=_filterLocation( userRegion, maybeExpandDesignLocation(instance) ), familyName=familyName, styleName=styleName, postScriptFontName=instance.postScriptFontName or statNames.postScriptFontName, styleMapFamilyName=instance.styleMapFamilyName or statNames.styleMapFamilyNames.get("en"), styleMapStyleName=instance.styleMapStyleName or statNames.styleMapStyleName, localisedFamilyName=instance.localisedFamilyName or statNames.familyNames, localisedStyleName=instance.localisedStyleName or statNames.styleNames, localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName or statNames.styleMapFamilyNames, localisedStyleMapStyleName=instance.localisedStyleMapStyleName or {}, lib=instance.lib, ) ) else: subDoc.addInstance( InstanceDescriptor( filename=instance.filename, path=instance.path, font=instance.font, name=instance.name, userLocation={} if expandLocations else instance.userLocation, designLocation=_filterLocation( userRegion, maybeExpandDesignLocation(instance) ), familyName=instance.familyName, styleName=instance.styleName, postScriptFontName=instance.postScriptFontName, styleMapFamilyName=instance.styleMapFamilyName, styleMapStyleName=instance.styleMapStyleName, localisedFamilyName=instance.localisedFamilyName, localisedStyleName=instance.localisedStyleName, localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName, localisedStyleMapStyleName=instance.localisedStyleMapStyleName, lib=instance.lib, ) ) subDoc.lib = doc.lib return subDoc def _conditionSetFrom(conditionSet: List[Dict[str, Any]]) -> ConditionSet: c: Dict[str, Range] = {} for condition in conditionSet: minimum, maximum = condition.get("minimum"), condition.get("maximum") c[condition["name"]] = Range( minimum if minimum is not None else -math.inf, maximum if maximum is not None else math.inf, ) return c def _subsetRulesBasedOnConditions( rules: List[RuleDescriptor], designRegion: Region ) -> List[RuleDescriptor]: # What rules to keep: # - Keep the rule if any conditionset is relevant. # - A conditionset is relevant if all conditions are relevant or it is empty. # - A condition is relevant if # - axis is point (C-AP), # - and point in condition's range (C-AP-in) # (in this case remove the condition because it's always true) # - else (C-AP-out) whole conditionset can be discarded (condition false # => conditionset false) # - axis is range (C-AR), # - (C-AR-all) and axis range fully contained in condition range: we can # scrap the condition because it's always true # - (C-AR-inter) and intersection(axis range, condition range) not empty: # keep the condition with the smaller range (= intersection) # - (C-AR-none) else, whole conditionset can be discarded newRules: List[RuleDescriptor] = [] for rule in rules: newRule: RuleDescriptor = RuleDescriptor( name=rule.name, conditionSets=[], subs=rule.subs ) for conditionset in rule.conditionSets: cs = _conditionSetFrom(conditionset) newConditionset: List[Dict[str, Any]] = [] discardConditionset = False for selectionName, selectionValue in designRegion.items(): # TODO: Ensure that all(key in conditionset for key in region.keys())? if selectionName not in cs: # raise Exception("Selection has different axes than the rules") continue if isinstance(selectionValue, (float, int)): # is point # Case C-AP-in if selectionValue in cs[selectionName]: pass # always matches, conditionset can stay empty for this one. # Case C-AP-out else: discardConditionset = True else: # is range # Case C-AR-all if selectionValue in cs[selectionName]: pass # always matches, conditionset can stay empty for this one. else: intersection = cs[selectionName].intersection(selectionValue) # Case C-AR-inter if intersection is not None: newConditionset.append( { "name": selectionName, "minimum": intersection.minimum, "maximum": intersection.maximum, } ) # Case C-AR-none else: discardConditionset = True if not discardConditionset: newRule.conditionSets.append(newConditionset) if newRule.conditionSets: newRules.append(newRule) return newRules def _filterLocation( userRegion: Region, location: Dict[str, float], ) -> Dict[str, float]: return { name: value for name, value in location.items() if name in userRegion and isinstance(userRegion[name], Range) } PKaZZZ� �xm#m#%fontTools/designspaceLib/statNames.py"""Compute name information for a given location in user-space coordinates using STAT data. This can be used to fill-in automatically the names of an instance: .. code:: python instance = doc.instances[0] names = getStatNames(doc, instance.getFullUserLocation(doc)) print(names.styleNames) """ from __future__ import annotations from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import logging from fontTools.designspaceLib import ( AxisDescriptor, AxisLabelDescriptor, DesignSpaceDocument, DesignSpaceDocumentError, DiscreteAxisDescriptor, SimpleLocationDict, SourceDescriptor, ) LOGGER = logging.getLogger(__name__) # TODO(Python 3.8): use Literal # RibbiStyleName = Union[Literal["regular"], Literal["bold"], Literal["italic"], Literal["bold italic"]] RibbiStyle = str BOLD_ITALIC_TO_RIBBI_STYLE = { (False, False): "regular", (False, True): "italic", (True, False): "bold", (True, True): "bold italic", } @dataclass class StatNames: """Name data generated from the STAT table information.""" familyNames: Dict[str, str] styleNames: Dict[str, str] postScriptFontName: Optional[str] styleMapFamilyNames: Dict[str, str] styleMapStyleName: Optional[RibbiStyle] def getStatNames( doc: DesignSpaceDocument, userLocation: SimpleLocationDict ) -> StatNames: """Compute the family, style, PostScript names of the given ``userLocation`` using the document's STAT information. Also computes localizations. If not enough STAT data is available for a given name, either its dict of localized names will be empty (family and style names), or the name will be None (PostScript name). .. versionadded:: 5.0 """ familyNames: Dict[str, str] = {} defaultSource: Optional[SourceDescriptor] = doc.findDefault() if defaultSource is None: LOGGER.warning("Cannot determine default source to look up family name.") elif defaultSource.familyName is None: LOGGER.warning( "Cannot look up family name, assign the 'familyname' attribute to the default source." ) else: familyNames = { "en": defaultSource.familyName, **defaultSource.localisedFamilyName, } styleNames: Dict[str, str] = {} # If a free-standing label matches the location, use it for name generation. label = doc.labelForUserLocation(userLocation) if label is not None: styleNames = {"en": label.name, **label.labelNames} # Otherwise, scour the axis labels for matches. else: # Gather all languages in which at least one translation is provided # Then build names for all these languages, but fallback to English # whenever a translation is missing. labels = _getAxisLabelsForUserLocation(doc.axes, userLocation) if labels: languages = set( language for label in labels for language in label.labelNames ) languages.add("en") for language in languages: styleName = " ".join( label.labelNames.get(language, label.defaultName) for label in labels if not label.elidable ) if not styleName and doc.elidedFallbackName is not None: styleName = doc.elidedFallbackName styleNames[language] = styleName if "en" not in familyNames or "en" not in styleNames: # Not enough information to compute PS names of styleMap names return StatNames( familyNames=familyNames, styleNames=styleNames, postScriptFontName=None, styleMapFamilyNames={}, styleMapStyleName=None, ) postScriptFontName = f"{familyNames['en']}-{styleNames['en']}".replace(" ", "") styleMapStyleName, regularUserLocation = _getRibbiStyle(doc, userLocation) styleNamesForStyleMap = styleNames if regularUserLocation != userLocation: regularStatNames = getStatNames(doc, regularUserLocation) styleNamesForStyleMap = regularStatNames.styleNames styleMapFamilyNames = {} for language in set(familyNames).union(styleNames.keys()): familyName = familyNames.get(language, familyNames["en"]) styleName = styleNamesForStyleMap.get(language, styleNamesForStyleMap["en"]) styleMapFamilyNames[language] = (familyName + " " + styleName).strip() return StatNames( familyNames=familyNames, styleNames=styleNames, postScriptFontName=postScriptFontName, styleMapFamilyNames=styleMapFamilyNames, styleMapStyleName=styleMapStyleName, ) def _getSortedAxisLabels( axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]], ) -> Dict[str, list[AxisLabelDescriptor]]: """Returns axis labels sorted by their ordering, with unordered ones appended as they are listed.""" # First, get the axis labels with explicit ordering... sortedAxes = sorted( (axis for axis in axes if axis.axisOrdering is not None), key=lambda a: a.axisOrdering, ) sortedLabels: Dict[str, list[AxisLabelDescriptor]] = { axis.name: axis.axisLabels for axis in sortedAxes } # ... then append the others in the order they appear. # NOTE: This relies on Python 3.7+ dict's preserved insertion order. for axis in axes: if axis.axisOrdering is None: sortedLabels[axis.name] = axis.axisLabels return sortedLabels def _getAxisLabelsForUserLocation( axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]], userLocation: SimpleLocationDict, ) -> list[AxisLabelDescriptor]: labels: list[AxisLabelDescriptor] = [] allAxisLabels = _getSortedAxisLabels(axes) if allAxisLabels.keys() != userLocation.keys(): LOGGER.warning( f"Mismatch between user location '{userLocation.keys()}' and available " f"labels for '{allAxisLabels.keys()}'." ) for axisName, axisLabels in allAxisLabels.items(): userValue = userLocation[axisName] label: Optional[AxisLabelDescriptor] = next( ( l for l in axisLabels if l.userValue == userValue or ( l.userMinimum is not None and l.userMaximum is not None and l.userMinimum <= userValue <= l.userMaximum ) ), None, ) if label is None: LOGGER.debug( f"Document needs a label for axis '{axisName}', user value '{userValue}'." ) else: labels.append(label) return labels def _getRibbiStyle( self: DesignSpaceDocument, userLocation: SimpleLocationDict ) -> Tuple[RibbiStyle, SimpleLocationDict]: """Compute the RIBBI style name of the given user location, return the location of the matching Regular in the RIBBI group. .. versionadded:: 5.0 """ regularUserLocation = {} axes_by_tag = {axis.tag: axis for axis in self.axes} bold: bool = False italic: bool = False axis = axes_by_tag.get("wght") if axis is not None: for regular_label in axis.axisLabels: if ( regular_label.linkedUserValue == userLocation[axis.name] # In the "recursive" case where both the Regular has # linkedUserValue pointing the Bold, and the Bold has # linkedUserValue pointing to the Regular, only consider the # first case: Regular (e.g. 400) has linkedUserValue pointing to # Bold (e.g. 700, higher than Regular) and regular_label.userValue < regular_label.linkedUserValue ): regularUserLocation[axis.name] = regular_label.userValue bold = True break axis = axes_by_tag.get("ital") or axes_by_tag.get("slnt") if axis is not None: for upright_label in axis.axisLabels: if ( upright_label.linkedUserValue == userLocation[axis.name] # In the "recursive" case where both the Upright has # linkedUserValue pointing the Italic, and the Italic has # linkedUserValue pointing to the Upright, only consider the # first case: Upright (e.g. ital=0, slant=0) has # linkedUserValue pointing to Italic (e.g ital=1, slant=-12 or # slant=12 for backwards italics, in any case higher than # Upright in absolute value, hence the abs() below. and abs(upright_label.userValue) < abs(upright_label.linkedUserValue) ): regularUserLocation[axis.name] = upright_label.userValue italic = True break return BOLD_ITALIC_TO_RIBBI_STYLE[bold, italic], { **userLocation, **regularUserLocation, } PKaZZZĮ����!fontTools/designspaceLib/types.pyfrom __future__ import annotations from dataclasses import dataclass from typing import Dict, List, Optional, Union, cast from fontTools.designspaceLib import ( AxisDescriptor, DesignSpaceDocument, DesignSpaceDocumentError, RangeAxisSubsetDescriptor, SimpleLocationDict, ValueAxisSubsetDescriptor, VariableFontDescriptor, ) def clamp(value, minimum, maximum): return min(max(value, minimum), maximum) @dataclass class Range: minimum: float """Inclusive minimum of the range.""" maximum: float """Inclusive maximum of the range.""" default: float = 0 """Default value""" def __post_init__(self): self.minimum, self.maximum = sorted((self.minimum, self.maximum)) self.default = clamp(self.default, self.minimum, self.maximum) def __contains__(self, value: Union[float, Range]) -> bool: if isinstance(value, Range): return self.minimum <= value.minimum and value.maximum <= self.maximum return self.minimum <= value <= self.maximum def intersection(self, other: Range) -> Optional[Range]: if self.maximum < other.minimum or self.minimum > other.maximum: return None else: return Range( max(self.minimum, other.minimum), min(self.maximum, other.maximum), self.default, # We don't care about the default in this use-case ) # A region selection is either a range or a single value, as a Designspace v5 # axis-subset element only allows a single discrete value or a range for a # variable-font element. Region = Dict[str, Union[Range, float]] # A conditionset is a set of named ranges. ConditionSet = Dict[str, Range] # A rule is a list of conditionsets where any has to be relevant for the whole rule to be relevant. Rule = List[ConditionSet] Rules = Dict[str, Rule] def locationInRegion(location: SimpleLocationDict, region: Region) -> bool: for name, value in location.items(): if name not in region: return False regionValue = region[name] if isinstance(regionValue, (float, int)): if value != regionValue: return False else: if value not in regionValue: return False return True def regionInRegion(region: Region, superRegion: Region) -> bool: for name, value in region.items(): if not name in superRegion: return False superValue = superRegion[name] if isinstance(superValue, (float, int)): if value != superValue: return False else: if value not in superValue: return False return True def userRegionToDesignRegion(doc: DesignSpaceDocument, userRegion: Region) -> Region: designRegion = {} for name, value in userRegion.items(): axis = doc.getAxis(name) if axis is None: raise DesignSpaceDocumentError( f"Cannot find axis named '{name}' for region." ) if isinstance(value, (float, int)): designRegion[name] = axis.map_forward(value) else: designRegion[name] = Range( axis.map_forward(value.minimum), axis.map_forward(value.maximum), axis.map_forward(value.default), ) return designRegion def getVFUserRegion(doc: DesignSpaceDocument, vf: VariableFontDescriptor) -> Region: vfUserRegion: Region = {} # For each axis, 2 cases: # - it has a range = it's an axis in the VF DS # - it's a single location = use it to know which rules should apply in the VF for axisSubset in vf.axisSubsets: axis = doc.getAxis(axisSubset.name) if axis is None: raise DesignSpaceDocumentError( f"Cannot find axis named '{axisSubset.name}' for variable font '{vf.name}'." ) if hasattr(axisSubset, "userMinimum"): # Mypy doesn't support narrowing union types via hasattr() # TODO(Python 3.10): use TypeGuard # https://mypy.readthedocs.io/en/stable/type_narrowing.html axisSubset = cast(RangeAxisSubsetDescriptor, axisSubset) if not hasattr(axis, "minimum"): raise DesignSpaceDocumentError( f"Cannot select a range over '{axis.name}' for variable font '{vf.name}' " "because it's a discrete axis, use only 'userValue' instead." ) axis = cast(AxisDescriptor, axis) vfUserRegion[axis.name] = Range( max(axisSubset.userMinimum, axis.minimum), min(axisSubset.userMaximum, axis.maximum), axisSubset.userDefault or axis.default, ) else: axisSubset = cast(ValueAxisSubsetDescriptor, axisSubset) vfUserRegion[axis.name] = axisSubset.userValue # Any axis not mentioned explicitly has a single location = default value for axis in doc.axes: if axis.name not in vfUserRegion: assert isinstance( axis.default, (int, float) ), f"Axis '{axis.name}' has no valid default value." vfUserRegion[axis.name] = axis.default return vfUserRegion PKaZZZ �C�� � fontTools/encodings/MacRoman.pyMacRoman = [ "NUL", "Eth", "eth", "Lslash", "lslash", "Scaron", "scaron", "Yacute", "yacute", "HT", "LF", "Thorn", "thorn", "CR", "Zcaron", "zcaron", "DLE", "DC1", "DC2", "DC3", "DC4", "onehalf", "onequarter", "onesuperior", "threequarters", "threesuperior", "twosuperior", "brokenbar", "minus", "multiply", "RS", "US", "space", "exclam", "quotedbl", "numbersign", "dollar", "percent", "ampersand", "quotesingle", "parenleft", "parenright", "asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", "less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", "underscore", "grave", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "braceleft", "bar", "braceright", "asciitilde", "DEL", "Adieresis", "Aring", "Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis", "aacute", "agrave", "acircumflex", "adieresis", "atilde", "aring", "ccedilla", "eacute", "egrave", "ecircumflex", "edieresis", "iacute", "igrave", "icircumflex", "idieresis", "ntilde", "oacute", "ograve", "ocircumflex", "odieresis", "otilde", "uacute", "ugrave", "ucircumflex", "udieresis", "dagger", "degree", "cent", "sterling", "section", "bullet", "paragraph", "germandbls", "registered", "copyright", "trademark", "acute", "dieresis", "notequal", "AE", "Oslash", "infinity", "plusminus", "lessequal", "greaterequal", "yen", "mu", "partialdiff", "summation", "product", "pi", "integral", "ordfeminine", "ordmasculine", "Omega", "ae", "oslash", "questiondown", "exclamdown", "logicalnot", "radical", "florin", "approxequal", "Delta", "guillemotleft", "guillemotright", "ellipsis", "nbspace", "Agrave", "Atilde", "Otilde", "OE", "oe", "endash", "emdash", "quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide", "lozenge", "ydieresis", "Ydieresis", "fraction", "currency", "guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl", "periodcentered", "quotesinglbase", "quotedblbase", "perthousand", "Acircumflex", "Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex", "apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave", "dotlessi", "circumflex", "tilde", "macron", "breve", "dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek", "caron", ] PKaZZZD�)�� � 'fontTools/encodings/StandardEncoding.pyStandardEncoding = [ ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", "space", "exclam", "quotedbl", "numbersign", "dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright", "asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", "less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", "underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "braceleft", "bar", "braceright", "asciitilde", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", "exclamdown", "cent", "sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle", "quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl", ".notdef", "endash", "dagger", "daggerdbl", "periodcentered", ".notdef", "paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis", "perthousand", ".notdef", "questiondown", ".notdef", "grave", "acute", "circumflex", "tilde", "macron", "breve", "dotaccent", "dieresis", ".notdef", "ring", "cedilla", ".notdef", "hungarumlaut", "ogonek", "caron", "emdash", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", "AE", ".notdef", "ordfeminine", ".notdef", ".notdef", ".notdef", ".notdef", "Lslash", "Oslash", "OE", "ordmasculine", ".notdef", ".notdef", ".notdef", ".notdef", ".notdef", "ae", ".notdef", ".notdef", ".notdef", "dotlessi", ".notdef", ".notdef", "lslash", "oslash", "oe", "germandbls", ".notdef", ".notdef", ".notdef", ".notdef", ] PKaZZZhXXKKfontTools/encodings/__init__.py"""Empty __init__.py file to signal Python this directory is a package.""" PKaZZZ�`�qqqfontTools/encodings/codecs.py"""Extend the Python codecs module with a few encodings that are used in OpenType (name table) but missing from Python. See https://github.com/fonttools/fonttools/issues/236 for details.""" import codecs import encodings class ExtendCodec(codecs.Codec): def __init__(self, name, base_encoding, mapping): self.name = name self.base_encoding = base_encoding self.mapping = mapping self.reverse = {v: k for k, v in mapping.items()} self.max_len = max(len(v) for v in mapping.values()) self.info = codecs.CodecInfo( name=self.name, encode=self.encode, decode=self.decode ) codecs.register_error(name, self.error) def _map(self, mapper, output_type, exc_type, input, errors): base_error_handler = codecs.lookup_error(errors) length = len(input) out = output_type() while input: # first try to use self.error as the error handler try: part = mapper(input, self.base_encoding, errors=self.name) out += part break # All converted except exc_type as e: # else convert the correct part, handle error as requested and continue out += mapper(input[: e.start], self.base_encoding, self.name) replacement, pos = base_error_handler(e) out += replacement input = input[pos:] return out, length def encode(self, input, errors="strict"): return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors) def decode(self, input, errors="strict"): return self._map(codecs.decode, str, UnicodeDecodeError, input, errors) def error(self, e): if isinstance(e, UnicodeDecodeError): for end in range(e.start + 1, e.end + 1): s = e.object[e.start : end] if s in self.mapping: return self.mapping[s], end elif isinstance(e, UnicodeEncodeError): for end in range(e.start + 1, e.start + self.max_len + 1): s = e.object[e.start : end] if s in self.reverse: return self.reverse[s], end e.encoding = self.name raise e _extended_encodings = { "x_mac_japanese_ttx": ( "shift_jis", { b"\xFC": chr(0x007C), b"\x7E": chr(0x007E), b"\x80": chr(0x005C), b"\xA0": chr(0x00A0), b"\xFD": chr(0x00A9), b"\xFE": chr(0x2122), b"\xFF": chr(0x2026), }, ), "x_mac_trad_chinese_ttx": ( "big5", { b"\x80": chr(0x005C), b"\xA0": chr(0x00A0), b"\xFD": chr(0x00A9), b"\xFE": chr(0x2122), b"\xFF": chr(0x2026), }, ), "x_mac_korean_ttx": ( "euc_kr", { b"\x80": chr(0x00A0), b"\x81": chr(0x20A9), b"\x82": chr(0x2014), b"\x83": chr(0x00A9), b"\xFE": chr(0x2122), b"\xFF": chr(0x2026), }, ), "x_mac_simp_chinese_ttx": ( "gb2312", { b"\x80": chr(0x00FC), b"\xA0": chr(0x00A0), b"\xFD": chr(0x00A9), b"\xFE": chr(0x2122), b"\xFF": chr(0x2026), }, ), } _cache = {} def search_function(name): name = encodings.normalize_encoding(name) # Rather undocumented... if name in _extended_encodings: if name not in _cache: base_encoding, mapping = _extended_encodings[name] assert name[-4:] == "_ttx" # Python 2 didn't have any of the encodings that we are implementing # in this file. Python 3 added aliases for the East Asian ones, mapping # them "temporarily" to the same base encoding as us, with a comment # suggesting that full implementation will appear some time later. # As such, try the Python version of the x_mac_... first, if that is found, # use *that* as our base encoding. This would make our encoding upgrade # to the full encoding when and if Python finally implements that. # http://bugs.python.org/issue24041 base_encodings = [name[:-4], base_encoding] for base_encoding in base_encodings: try: codecs.lookup(base_encoding) except LookupError: continue _cache[name] = ExtendCodec(name, base_encoding, mapping) break return _cache[name].info return None codecs.register(search_function) PKaZZZr!u��fontTools/feaLib/__init__.py"""fontTools.feaLib -- a package for dealing with OpenType feature files.""" # The structure of OpenType feature files is defined here: # http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html PKaZZZ�E����fontTools/feaLib/__main__.pyfrom fontTools.ttLib import TTFont from fontTools.feaLib.builder import addOpenTypeFeatures, Builder from fontTools.feaLib.error import FeatureLibError from fontTools import configLogger from fontTools.misc.cliTools import makeOutputFileName import sys import argparse import logging log = logging.getLogger("fontTools.feaLib") def main(args=None): """Add features from a feature file (.fea) into an OTF font""" parser = argparse.ArgumentParser( description="Use fontTools to compile OpenType feature files (*.fea)." ) parser.add_argument( "input_fea", metavar="FEATURES", help="Path to the feature file" ) parser.add_argument( "input_font", metavar="INPUT_FONT", help="Path to the input font" ) parser.add_argument( "-o", "--output", dest="output_font", metavar="OUTPUT_FONT", help="Path to the output font.", ) parser.add_argument( "-t", "--tables", metavar="TABLE_TAG", choices=Builder.supportedTables, nargs="+", help="Specify the table(s) to be built.", ) parser.add_argument( "-d", "--debug", action="store_true", help="Add source-level debugging information to font.", ) parser.add_argument( "-v", "--verbose", help="Increase the logger verbosity. Multiple -v " "options are allowed.", action="count", default=0, ) parser.add_argument( "--traceback", help="show traceback for exceptions.", action="store_true" ) options = parser.parse_args(args) levels = ["WARNING", "INFO", "DEBUG"] configLogger(level=levels[min(len(levels) - 1, options.verbose)]) output_font = options.output_font or makeOutputFileName(options.input_font) log.info("Compiling features to '%s'" % (output_font)) font = TTFont(options.input_font) try: addOpenTypeFeatures( font, options.input_fea, tables=options.tables, debug=options.debug ) except FeatureLibError as e: if options.traceback: raise log.error(e) sys.exit(1) font.save(output_font) if __name__ == "__main__": sys.exit(main()) PKaZZZ��q�I I fontTools/feaLib/ast.pyfrom fontTools.feaLib.error import FeatureLibError from fontTools.feaLib.location import FeatureLibLocation from fontTools.misc.encodingTools import getEncoding from fontTools.misc.textTools import byteord, tobytes from collections import OrderedDict import itertools SHIFT = " " * 4 __all__ = [ "Element", "FeatureFile", "Comment", "GlyphName", "GlyphClass", "GlyphClassName", "MarkClassName", "AnonymousBlock", "Block", "FeatureBlock", "NestedBlock", "LookupBlock", "GlyphClassDefinition", "GlyphClassDefStatement", "MarkClass", "MarkClassDefinition", "AlternateSubstStatement", "Anchor", "AnchorDefinition", "AttachStatement", "AxisValueLocationStatement", "BaseAxis", "CVParametersNameStatement", "ChainContextPosStatement", "ChainContextSubstStatement", "CharacterStatement", "ConditionsetStatement", "CursivePosStatement", "ElidedFallbackName", "ElidedFallbackNameID", "Expression", "FeatureNameStatement", "FeatureReferenceStatement", "FontRevisionStatement", "HheaField", "IgnorePosStatement", "IgnoreSubstStatement", "IncludeStatement", "LanguageStatement", "LanguageSystemStatement", "LigatureCaretByIndexStatement", "LigatureCaretByPosStatement", "LigatureSubstStatement", "LookupFlagStatement", "LookupReferenceStatement", "MarkBasePosStatement", "MarkLigPosStatement", "MarkMarkPosStatement", "MultipleSubstStatement", "NameRecord", "OS2Field", "PairPosStatement", "ReverseChainSingleSubstStatement", "ScriptStatement", "SinglePosStatement", "SingleSubstStatement", "SizeParameters", "Statement", "STATAxisValueStatement", "STATDesignAxisStatement", "STATNameStatement", "SubtableStatement", "TableBlock", "ValueRecord", "ValueRecordDefinition", "VheaField", ] def deviceToString(device): if device is None: return "<device NULL>" else: return "<device %s>" % ", ".join("%d %d" % t for t in device) fea_keywords = set( [ "anchor", "anchordef", "anon", "anonymous", "by", "contour", "cursive", "device", "enum", "enumerate", "excludedflt", "exclude_dflt", "feature", "from", "ignore", "ignorebaseglyphs", "ignoreligatures", "ignoremarks", "include", "includedflt", "include_dflt", "language", "languagesystem", "lookup", "lookupflag", "mark", "markattachmenttype", "markclass", "nameid", "null", "parameters", "pos", "position", "required", "righttoleft", "reversesub", "rsub", "script", "sub", "substitute", "subtable", "table", "usemarkfilteringset", "useextension", "valuerecorddef", "base", "gdef", "head", "hhea", "name", "vhea", "vmtx", ] ) def asFea(g): if hasattr(g, "asFea"): return g.asFea() elif isinstance(g, tuple) and len(g) == 2: return asFea(g[0]) + " - " + asFea(g[1]) # a range elif g.lower() in fea_keywords: return "\\" + g else: return g class Element(object): """A base class representing "something" in a feature file.""" def __init__(self, location=None): #: location of this element as a `FeatureLibLocation` object. if location and not isinstance(location, FeatureLibLocation): location = FeatureLibLocation(*location) self.location = location def build(self, builder): pass def asFea(self, indent=""): """Returns this element as a string of feature code. For block-type elements (such as :class:`FeatureBlock`), the `indent` string is added to the start of each line in the output.""" raise NotImplementedError def __str__(self): return self.asFea() class Statement(Element): pass class Expression(Element): pass class Comment(Element): """A comment in a feature file.""" def __init__(self, text, location=None): super(Comment, self).__init__(location) #: Text of the comment self.text = text def asFea(self, indent=""): return self.text class NullGlyph(Expression): """The NULL glyph, used in glyph deletion substitutions.""" def __init__(self, location=None): Expression.__init__(self, location) #: The name itself as a string def glyphSet(self): """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" return () def asFea(self, indent=""): return "NULL" class GlyphName(Expression): """A single glyph name, such as ``cedilla``.""" def __init__(self, glyph, location=None): Expression.__init__(self, location) #: The name itself as a string self.glyph = glyph def glyphSet(self): """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" return (self.glyph,) def asFea(self, indent=""): return asFea(self.glyph) class GlyphClass(Expression): """A glyph class, such as ``[acute cedilla grave]``.""" def __init__(self, glyphs=None, location=None): Expression.__init__(self, location) #: The list of glyphs in this class, as :class:`GlyphName` objects. self.glyphs = glyphs if glyphs is not None else [] self.original = [] self.curr = 0 def glyphSet(self): """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" return tuple(self.glyphs) def asFea(self, indent=""): if len(self.original): if self.curr < len(self.glyphs): self.original.extend(self.glyphs[self.curr :]) self.curr = len(self.glyphs) return "[" + " ".join(map(asFea, self.original)) + "]" else: return "[" + " ".join(map(asFea, self.glyphs)) + "]" def extend(self, glyphs): """Add a list of :class:`GlyphName` objects to the class.""" self.glyphs.extend(glyphs) def append(self, glyph): """Add a single :class:`GlyphName` object to the class.""" self.glyphs.append(glyph) def add_range(self, start, end, glyphs): """Add a range (e.g. ``A-Z``) to the class. ``start`` and ``end`` are either :class:`GlyphName` objects or strings representing the start and end glyphs in the class, and ``glyphs`` is the full list of :class:`GlyphName` objects in the range.""" if self.curr < len(self.glyphs): self.original.extend(self.glyphs[self.curr :]) self.original.append((start, end)) self.glyphs.extend(glyphs) self.curr = len(self.glyphs) def add_cid_range(self, start, end, glyphs): """Add a range to the class by glyph ID. ``start`` and ``end`` are the initial and final IDs, and ``glyphs`` is the full list of :class:`GlyphName` objects in the range.""" if self.curr < len(self.glyphs): self.original.extend(self.glyphs[self.curr :]) self.original.append(("\\{}".format(start), "\\{}".format(end))) self.glyphs.extend(glyphs) self.curr = len(self.glyphs) def add_class(self, gc): """Add glyphs from the given :class:`GlyphClassName` object to the class.""" if self.curr < len(self.glyphs): self.original.extend(self.glyphs[self.curr :]) self.original.append(gc) self.glyphs.extend(gc.glyphSet()) self.curr = len(self.glyphs) class GlyphClassName(Expression): """A glyph class name, such as ``@FRENCH_MARKS``. This must be instantiated with a :class:`GlyphClassDefinition` object.""" def __init__(self, glyphclass, location=None): Expression.__init__(self, location) assert isinstance(glyphclass, GlyphClassDefinition) self.glyphclass = glyphclass def glyphSet(self): """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" return tuple(self.glyphclass.glyphSet()) def asFea(self, indent=""): return "@" + self.glyphclass.name class MarkClassName(Expression): """A mark class name, such as ``@FRENCH_MARKS`` defined with ``markClass``. This must be instantiated with a :class:`MarkClass` object.""" def __init__(self, markClass, location=None): Expression.__init__(self, location) assert isinstance(markClass, MarkClass) self.markClass = markClass def glyphSet(self): """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" return self.markClass.glyphSet() def asFea(self, indent=""): return "@" + self.markClass.name class AnonymousBlock(Statement): """An anonymous data block.""" def __init__(self, tag, content, location=None): Statement.__init__(self, location) self.tag = tag #: string containing the block's "tag" self.content = content #: block data as string def asFea(self, indent=""): res = "anon {} {{\n".format(self.tag) res += self.content res += "}} {};\n\n".format(self.tag) return res class Block(Statement): """A block of statements: feature, lookup, etc.""" def __init__(self, location=None): Statement.__init__(self, location) self.statements = [] #: Statements contained in the block def build(self, builder): """When handed a 'builder' object of comparable interface to :class:`fontTools.feaLib.builder`, walks the statements in this block, calling the builder callbacks.""" for s in self.statements: s.build(builder) def asFea(self, indent=""): indent += SHIFT return ( indent + ("\n" + indent).join([s.asFea(indent=indent) for s in self.statements]) + "\n" ) class FeatureFile(Block): """The top-level element of the syntax tree, containing the whole feature file in its ``statements`` attribute.""" def __init__(self): Block.__init__(self, location=None) self.markClasses = {} # name --> ast.MarkClass def asFea(self, indent=""): return "\n".join(s.asFea(indent=indent) for s in self.statements) class FeatureBlock(Block): """A named feature block.""" def __init__(self, name, use_extension=False, location=None): Block.__init__(self, location) self.name, self.use_extension = name, use_extension def build(self, builder): """Call the ``start_feature`` callback on the builder object, visit all the statements in this feature, and then call ``end_feature``.""" # TODO(sascha): Handle use_extension. builder.start_feature(self.location, self.name) # language exclude_dflt statements modify builder.features_ # limit them to this block with temporary builder.features_ features = builder.features_ builder.features_ = {} Block.build(self, builder) for key, value in builder.features_.items(): features.setdefault(key, []).extend(value) builder.features_ = features builder.end_feature() def asFea(self, indent=""): res = indent + "feature %s " % self.name.strip() if self.use_extension: res += "useExtension " res += "{\n" res += Block.asFea(self, indent=indent) res += indent + "} %s;\n" % self.name.strip() return res class NestedBlock(Block): """A block inside another block, for example when found inside a ``cvParameters`` block.""" def __init__(self, tag, block_name, location=None): Block.__init__(self, location) self.tag = tag self.block_name = block_name def build(self, builder): Block.build(self, builder) if self.block_name == "ParamUILabelNameID": builder.add_to_cv_num_named_params(self.tag) def asFea(self, indent=""): res = "{}{} {{\n".format(indent, self.block_name) res += Block.asFea(self, indent=indent) res += "{}}};\n".format(indent) return res class LookupBlock(Block): """A named lookup, containing ``statements``.""" def __init__(self, name, use_extension=False, location=None): Block.__init__(self, location) self.name, self.use_extension = name, use_extension def build(self, builder): # TODO(sascha): Handle use_extension. builder.start_lookup_block(self.location, self.name) Block.build(self, builder) builder.end_lookup_block() def asFea(self, indent=""): res = "lookup {} ".format(self.name) if self.use_extension: res += "useExtension " res += "{\n" res += Block.asFea(self, indent=indent) res += "{}}} {};\n".format(indent, self.name) return res class TableBlock(Block): """A ``table ... { }`` block.""" def __init__(self, name, location=None): Block.__init__(self, location) self.name = name def asFea(self, indent=""): res = "table {} {{\n".format(self.name.strip()) res += super(TableBlock, self).asFea(indent=indent) res += "}} {};\n".format(self.name.strip()) return res class GlyphClassDefinition(Statement): """Example: ``@UPPERCASE = [A-Z];``.""" def __init__(self, name, glyphs, location=None): Statement.__init__(self, location) self.name = name #: class name as a string, without initial ``@`` self.glyphs = glyphs #: a :class:`GlyphClass` object def glyphSet(self): """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" return tuple(self.glyphs.glyphSet()) def asFea(self, indent=""): return "@" + self.name + " = " + self.glyphs.asFea() + ";" class GlyphClassDefStatement(Statement): """Example: ``GlyphClassDef @UPPERCASE, [B], [C], [D];``. The parameters must be either :class:`GlyphClass` or :class:`GlyphClassName` objects, or ``None``.""" def __init__( self, baseGlyphs, markGlyphs, ligatureGlyphs, componentGlyphs, location=None ): Statement.__init__(self, location) self.baseGlyphs, self.markGlyphs = (baseGlyphs, markGlyphs) self.ligatureGlyphs = ligatureGlyphs self.componentGlyphs = componentGlyphs def build(self, builder): """Calls the builder's ``add_glyphClassDef`` callback.""" base = self.baseGlyphs.glyphSet() if self.baseGlyphs else tuple() liga = self.ligatureGlyphs.glyphSet() if self.ligatureGlyphs else tuple() mark = self.markGlyphs.glyphSet() if self.markGlyphs else tuple() comp = self.componentGlyphs.glyphSet() if self.componentGlyphs else tuple() builder.add_glyphClassDef(self.location, base, liga, mark, comp) def asFea(self, indent=""): return "GlyphClassDef {}, {}, {}, {};".format( self.baseGlyphs.asFea() if self.baseGlyphs else "", self.ligatureGlyphs.asFea() if self.ligatureGlyphs else "", self.markGlyphs.asFea() if self.markGlyphs else "", self.componentGlyphs.asFea() if self.componentGlyphs else "", ) class MarkClass(object): """One `or more` ``markClass`` statements for the same mark class. While glyph classes can be defined only once, the feature file format allows expanding mark classes with multiple definitions, each using different glyphs and anchors. The following are two ``MarkClassDefinitions`` for the same ``MarkClass``:: markClass [acute grave] <anchor 350 800> @FRENCH_ACCENTS; markClass [cedilla] <anchor 350 -200> @FRENCH_ACCENTS; The ``MarkClass`` object is therefore just a container for a list of :class:`MarkClassDefinition` statements. """ def __init__(self, name): self.name = name self.definitions = [] self.glyphs = OrderedDict() # glyph --> ast.MarkClassDefinitions def addDefinition(self, definition): """Add a :class:`MarkClassDefinition` statement to this mark class.""" assert isinstance(definition, MarkClassDefinition) self.definitions.append(definition) for glyph in definition.glyphSet(): if glyph in self.glyphs: otherLoc = self.glyphs[glyph].location if otherLoc is None: end = "" else: end = f" at {otherLoc}" raise FeatureLibError( "Glyph %s already defined%s" % (glyph, end), definition.location ) self.glyphs[glyph] = definition def glyphSet(self): """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" return tuple(self.glyphs.keys()) def asFea(self, indent=""): res = "\n".join(d.asFea() for d in self.definitions) return res class MarkClassDefinition(Statement): """A single ``markClass`` statement. The ``markClass`` should be a :class:`MarkClass` object, the ``anchor`` an :class:`Anchor` object, and the ``glyphs`` parameter should be a `glyph-containing object`_ . Example: .. code:: python mc = MarkClass("FRENCH_ACCENTS") mc.addDefinition( MarkClassDefinition(mc, Anchor(350, 800), GlyphClass([ GlyphName("acute"), GlyphName("grave") ]) ) ) mc.addDefinition( MarkClassDefinition(mc, Anchor(350, -200), GlyphClass([ GlyphName("cedilla") ]) ) ) mc.asFea() # markClass [acute grave] <anchor 350 800> @FRENCH_ACCENTS; # markClass [cedilla] <anchor 350 -200> @FRENCH_ACCENTS; """ def __init__(self, markClass, anchor, glyphs, location=None): Statement.__init__(self, location) assert isinstance(markClass, MarkClass) assert isinstance(anchor, Anchor) and isinstance(glyphs, Expression) self.markClass, self.anchor, self.glyphs = markClass, anchor, glyphs def glyphSet(self): """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" return self.glyphs.glyphSet() def asFea(self, indent=""): return "markClass {} {} @{};".format( self.glyphs.asFea(), self.anchor.asFea(), self.markClass.name ) class AlternateSubstStatement(Statement): """A ``sub ... from ...`` statement. ``prefix``, ``glyph``, ``suffix`` and ``replacement`` should be lists of `glyph-containing objects`_. ``glyph`` should be a `one element list`.""" def __init__(self, prefix, glyph, suffix, replacement, location=None): Statement.__init__(self, location) self.prefix, self.glyph, self.suffix = (prefix, glyph, suffix) self.replacement = replacement def build(self, builder): """Calls the builder's ``add_alternate_subst`` callback.""" glyph = self.glyph.glyphSet() assert len(glyph) == 1, glyph glyph = list(glyph)[0] prefix = [p.glyphSet() for p in self.prefix] suffix = [s.glyphSet() for s in self.suffix] replacement = self.replacement.glyphSet() builder.add_alternate_subst(self.location, prefix, glyph, suffix, replacement) def asFea(self, indent=""): res = "sub " if len(self.prefix) or len(self.suffix): if len(self.prefix): res += " ".join(map(asFea, self.prefix)) + " " res += asFea(self.glyph) + "'" # even though we really only use 1 if len(self.suffix): res += " " + " ".join(map(asFea, self.suffix)) else: res += asFea(self.glyph) res += " from " res += asFea(self.replacement) res += ";" return res class Anchor(Expression): """An ``Anchor`` element, used inside a ``pos`` rule. If a ``name`` is given, this will be used in preference to the coordinates. Other values should be integer. """ def __init__( self, x, y, name=None, contourpoint=None, xDeviceTable=None, yDeviceTable=None, location=None, ): Expression.__init__(self, location) self.name = name self.x, self.y, self.contourpoint = x, y, contourpoint self.xDeviceTable, self.yDeviceTable = xDeviceTable, yDeviceTable def asFea(self, indent=""): if self.name is not None: return "<anchor {}>".format(self.name) res = "<anchor {} {}".format(self.x, self.y) if self.contourpoint: res += " contourpoint {}".format(self.contourpoint) if self.xDeviceTable or self.yDeviceTable: res += " " res += deviceToString(self.xDeviceTable) res += " " res += deviceToString(self.yDeviceTable) res += ">" return res class AnchorDefinition(Statement): """A named anchor definition. (2.e.viii). ``name`` should be a string.""" def __init__(self, name, x, y, contourpoint=None, location=None): Statement.__init__(self, location) self.name, self.x, self.y, self.contourpoint = name, x, y, contourpoint def asFea(self, indent=""): res = "anchorDef {} {}".format(self.x, self.y) if self.contourpoint: res += " contourpoint {}".format(self.contourpoint) res += " {};".format(self.name) return res class AttachStatement(Statement): """A ``GDEF`` table ``Attach`` statement.""" def __init__(self, glyphs, contourPoints, location=None): Statement.__init__(self, location) self.glyphs = glyphs #: A `glyph-containing object`_ self.contourPoints = contourPoints #: A list of integer contour points def build(self, builder): """Calls the builder's ``add_attach_points`` callback.""" glyphs = self.glyphs.glyphSet() builder.add_attach_points(self.location, glyphs, self.contourPoints) def asFea(self, indent=""): return "Attach {} {};".format( self.glyphs.asFea(), " ".join(str(c) for c in self.contourPoints) ) class ChainContextPosStatement(Statement): r"""A chained contextual positioning statement. ``prefix``, ``glyphs``, and ``suffix`` should be lists of `glyph-containing objects`_ . ``lookups`` should be a list of elements representing what lookups to apply at each glyph position. Each element should be a :class:`LookupBlock` to apply a single chaining lookup at the given position, a list of :class:`LookupBlock`\ s to apply multiple lookups, or ``None`` to apply no lookup. The length of the outer list should equal the length of ``glyphs``; the inner lists can be of variable length.""" def __init__(self, prefix, glyphs, suffix, lookups, location=None): Statement.__init__(self, location) self.prefix, self.glyphs, self.suffix = prefix, glyphs, suffix self.lookups = list(lookups) for i, lookup in enumerate(lookups): if lookup: try: (_ for _ in lookup) except TypeError: self.lookups[i] = [lookup] def build(self, builder): """Calls the builder's ``add_chain_context_pos`` callback.""" prefix = [p.glyphSet() for p in self.prefix] glyphs = [g.glyphSet() for g in self.glyphs] suffix = [s.glyphSet() for s in self.suffix] builder.add_chain_context_pos( self.location, prefix, glyphs, suffix, self.lookups ) def asFea(self, indent=""): res = "pos " if ( len(self.prefix) or len(self.suffix) or any([x is not None for x in self.lookups]) ): if len(self.prefix): res += " ".join(g.asFea() for g in self.prefix) + " " for i, g in enumerate(self.glyphs): res += g.asFea() + "'" if self.lookups[i]: for lu in self.lookups[i]: res += " lookup " + lu.name if i < len(self.glyphs) - 1: res += " " if len(self.suffix): res += " " + " ".join(map(asFea, self.suffix)) else: res += " ".join(map(asFea, self.glyph)) res += ";" return res class ChainContextSubstStatement(Statement): r"""A chained contextual substitution statement. ``prefix``, ``glyphs``, and ``suffix`` should be lists of `glyph-containing objects`_ . ``lookups`` should be a list of elements representing what lookups to apply at each glyph position. Each element should be a :class:`LookupBlock` to apply a single chaining lookup at the given position, a list of :class:`LookupBlock`\ s to apply multiple lookups, or ``None`` to apply no lookup. The length of the outer list should equal the length of ``glyphs``; the inner lists can be of variable length.""" def __init__(self, prefix, glyphs, suffix, lookups, location=None): Statement.__init__(self, location) self.prefix, self.glyphs, self.suffix = prefix, glyphs, suffix self.lookups = list(lookups) for i, lookup in enumerate(lookups): if lookup: try: (_ for _ in lookup) except TypeError: self.lookups[i] = [lookup] def build(self, builder): """Calls the builder's ``add_chain_context_subst`` callback.""" prefix = [p.glyphSet() for p in self.prefix] glyphs = [g.glyphSet() for g in self.glyphs] suffix = [s.glyphSet() for s in self.suffix] builder.add_chain_context_subst( self.location, prefix, glyphs, suffix, self.lookups ) def asFea(self, indent=""): res = "sub " if ( len(self.prefix) or len(self.suffix) or any([x is not None for x in self.lookups]) ): if len(self.prefix): res += " ".join(g.asFea() for g in self.prefix) + " " for i, g in enumerate(self.glyphs): res += g.asFea() + "'" if self.lookups[i]: for lu in self.lookups[i]: res += " lookup " + lu.name if i < len(self.glyphs) - 1: res += " " if len(self.suffix): res += " " + " ".join(map(asFea, self.suffix)) else: res += " ".join(map(asFea, self.glyph)) res += ";" return res class CursivePosStatement(Statement): """A cursive positioning statement. Entry and exit anchors can either be :class:`Anchor` objects or ``None``.""" def __init__(self, glyphclass, entryAnchor, exitAnchor, location=None): Statement.__init__(self, location) self.glyphclass = glyphclass self.entryAnchor, self.exitAnchor = entryAnchor, exitAnchor def build(self, builder): """Calls the builder object's ``add_cursive_pos`` callback.""" builder.add_cursive_pos( self.location, self.glyphclass.glyphSet(), self.entryAnchor, self.exitAnchor ) def asFea(self, indent=""): entry = self.entryAnchor.asFea() if self.entryAnchor else "<anchor NULL>" exit = self.exitAnchor.asFea() if self.exitAnchor else "<anchor NULL>" return "pos cursive {} {} {};".format(self.glyphclass.asFea(), entry, exit) class FeatureReferenceStatement(Statement): """Example: ``feature salt;``""" def __init__(self, featureName, location=None): Statement.__init__(self, location) self.location, self.featureName = (location, featureName) def build(self, builder): """Calls the builder object's ``add_feature_reference`` callback.""" builder.add_feature_reference(self.location, self.featureName) def asFea(self, indent=""): return "feature {};".format(self.featureName) class IgnorePosStatement(Statement): """An ``ignore pos`` statement, containing `one or more` contexts to ignore. ``chainContexts`` should be a list of ``(prefix, glyphs, suffix)`` tuples, with each of ``prefix``, ``glyphs`` and ``suffix`` being `glyph-containing objects`_ .""" def __init__(self, chainContexts, location=None): Statement.__init__(self, location) self.chainContexts = chainContexts def build(self, builder): """Calls the builder object's ``add_chain_context_pos`` callback on each rule context.""" for prefix, glyphs, suffix in self.chainContexts: prefix = [p.glyphSet() for p in prefix] glyphs = [g.glyphSet() for g in glyphs] suffix = [s.glyphSet() for s in suffix] builder.add_chain_context_pos(self.location, prefix, glyphs, suffix, []) def asFea(self, indent=""): contexts = [] for prefix, glyphs, suffix in self.chainContexts: res = "" if len(prefix) or len(suffix): if len(prefix): res += " ".join(map(asFea, prefix)) + " " res += " ".join(g.asFea() + "'" for g in glyphs) if len(suffix): res += " " + " ".join(map(asFea, suffix)) else: res += " ".join(map(asFea, glyphs)) contexts.append(res) return "ignore pos " + ", ".join(contexts) + ";" class IgnoreSubstStatement(Statement): """An ``ignore sub`` statement, containing `one or more` contexts to ignore. ``chainContexts`` should be a list of ``(prefix, glyphs, suffix)`` tuples, with each of ``prefix``, ``glyphs`` and ``suffix`` being `glyph-containing objects`_ .""" def __init__(self, chainContexts, location=None): Statement.__init__(self, location) self.chainContexts = chainContexts def build(self, builder): """Calls the builder object's ``add_chain_context_subst`` callback on each rule context.""" for prefix, glyphs, suffix in self.chainContexts: prefix = [p.glyphSet() for p in prefix] glyphs = [g.glyphSet() for g in glyphs] suffix = [s.glyphSet() for s in suffix] builder.add_chain_context_subst(self.location, prefix, glyphs, suffix, []) def asFea(self, indent=""): contexts = [] for prefix, glyphs, suffix in self.chainContexts: res = "" if len(prefix): res += " ".join(map(asFea, prefix)) + " " res += " ".join(g.asFea() + "'" for g in glyphs) if len(suffix): res += " " + " ".join(map(asFea, suffix)) contexts.append(res) return "ignore sub " + ", ".join(contexts) + ";" class IncludeStatement(Statement): """An ``include()`` statement.""" def __init__(self, filename, location=None): super(IncludeStatement, self).__init__(location) self.filename = filename #: String containing name of file to include def build(self): # TODO: consider lazy-loading the including parser/lexer? raise FeatureLibError( "Building an include statement is not implemented yet. " "Instead, use Parser(..., followIncludes=True) for building.", self.location, ) def asFea(self, indent=""): return indent + "include(%s);" % self.filename class LanguageStatement(Statement): """A ``language`` statement within a feature.""" def __init__(self, language, include_default=True, required=False, location=None): Statement.__init__(self, location) assert len(language) == 4 self.language = language #: A four-character language tag self.include_default = include_default #: If false, "exclude_dflt" self.required = required def build(self, builder): """Call the builder object's ``set_language`` callback.""" builder.set_language( location=self.location, language=self.language, include_default=self.include_default, required=self.required, ) def asFea(self, indent=""): res = "language {}".format(self.language.strip()) if not self.include_default: res += " exclude_dflt" if self.required: res += " required" res += ";" return res class LanguageSystemStatement(Statement): """A top-level ``languagesystem`` statement.""" def __init__(self, script, language, location=None): Statement.__init__(self, location) self.script, self.language = (script, language) def build(self, builder): """Calls the builder object's ``add_language_system`` callback.""" builder.add_language_system(self.location, self.script, self.language) def asFea(self, indent=""): return "languagesystem {} {};".format(self.script, self.language.strip()) class FontRevisionStatement(Statement): """A ``head`` table ``FontRevision`` statement. ``revision`` should be a number, and will be formatted to three significant decimal places.""" def __init__(self, revision, location=None): Statement.__init__(self, location) self.revision = revision def build(self, builder): builder.set_font_revision(self.location, self.revision) def asFea(self, indent=""): return "FontRevision {:.3f};".format(self.revision) class LigatureCaretByIndexStatement(Statement): """A ``GDEF`` table ``LigatureCaretByIndex`` statement. ``glyphs`` should be a `glyph-containing object`_, and ``carets`` should be a list of integers.""" def __init__(self, glyphs, carets, location=None): Statement.__init__(self, location) self.glyphs, self.carets = (glyphs, carets) def build(self, builder): """Calls the builder object's ``add_ligatureCaretByIndex_`` callback.""" glyphs = self.glyphs.glyphSet() builder.add_ligatureCaretByIndex_(self.location, glyphs, set(self.carets)) def asFea(self, indent=""): return "LigatureCaretByIndex {} {};".format( self.glyphs.asFea(), " ".join(str(x) for x in self.carets) ) class LigatureCaretByPosStatement(Statement): """A ``GDEF`` table ``LigatureCaretByPos`` statement. ``glyphs`` should be a `glyph-containing object`_, and ``carets`` should be a list of integers.""" def __init__(self, glyphs, carets, location=None): Statement.__init__(self, location) self.glyphs, self.carets = (glyphs, carets) def build(self, builder): """Calls the builder object's ``add_ligatureCaretByPos_`` callback.""" glyphs = self.glyphs.glyphSet() builder.add_ligatureCaretByPos_(self.location, glyphs, set(self.carets)) def asFea(self, indent=""): return "LigatureCaretByPos {} {};".format( self.glyphs.asFea(), " ".join(str(x) for x in self.carets) ) class LigatureSubstStatement(Statement): """A chained contextual substitution statement. ``prefix``, ``glyphs``, and ``suffix`` should be lists of `glyph-containing objects`_; ``replacement`` should be a single `glyph-containing object`_. If ``forceChain`` is True, this is expressed as a chaining rule (e.g. ``sub f' i' by f_i``) even when no context is given.""" def __init__(self, prefix, glyphs, suffix, replacement, forceChain, location=None): Statement.__init__(self, location) self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix) self.replacement, self.forceChain = replacement, forceChain def build(self, builder): prefix = [p.glyphSet() for p in self.prefix] glyphs = [g.glyphSet() for g in self.glyphs] suffix = [s.glyphSet() for s in self.suffix] builder.add_ligature_subst( self.location, prefix, glyphs, suffix, self.replacement, self.forceChain ) def asFea(self, indent=""): res = "sub " if len(self.prefix) or len(self.suffix) or self.forceChain: if len(self.prefix): res += " ".join(g.asFea() for g in self.prefix) + " " res += " ".join(g.asFea() + "'" for g in self.glyphs) if len(self.suffix): res += " " + " ".join(g.asFea() for g in self.suffix) else: res += " ".join(g.asFea() for g in self.glyphs) res += " by " res += asFea(self.replacement) res += ";" return res class LookupFlagStatement(Statement): """A ``lookupflag`` statement. The ``value`` should be an integer value representing the flags in use, but not including the ``markAttachment`` class and ``markFilteringSet`` values, which must be specified as glyph-containing objects.""" def __init__( self, value=0, markAttachment=None, markFilteringSet=None, location=None ): Statement.__init__(self, location) self.value = value self.markAttachment = markAttachment self.markFilteringSet = markFilteringSet def build(self, builder): """Calls the builder object's ``set_lookup_flag`` callback.""" markAttach = None if self.markAttachment is not None: markAttach = self.markAttachment.glyphSet() markFilter = None if self.markFilteringSet is not None: markFilter = self.markFilteringSet.glyphSet() builder.set_lookup_flag(self.location, self.value, markAttach, markFilter) def asFea(self, indent=""): res = [] flags = ["RightToLeft", "IgnoreBaseGlyphs", "IgnoreLigatures", "IgnoreMarks"] curr = 1 for i in range(len(flags)): if self.value & curr != 0: res.append(flags[i]) curr = curr << 1 if self.markAttachment is not None: res.append("MarkAttachmentType {}".format(self.markAttachment.asFea())) if self.markFilteringSet is not None: res.append("UseMarkFilteringSet {}".format(self.markFilteringSet.asFea())) if not res: res = ["0"] return "lookupflag {};".format(" ".join(res)) class LookupReferenceStatement(Statement): """Represents a ``lookup ...;`` statement to include a lookup in a feature. The ``lookup`` should be a :class:`LookupBlock` object.""" def __init__(self, lookup, location=None): Statement.__init__(self, location) self.location, self.lookup = (location, lookup) def build(self, builder): """Calls the builder object's ``add_lookup_call`` callback.""" builder.add_lookup_call(self.lookup.name) def asFea(self, indent=""): return "lookup {};".format(self.lookup.name) class MarkBasePosStatement(Statement): """A mark-to-base positioning rule. The ``base`` should be a `glyph-containing object`_. The ``marks`` should be a list of (:class:`Anchor`, :class:`MarkClass`) tuples.""" def __init__(self, base, marks, location=None): Statement.__init__(self, location) self.base, self.marks = base, marks def build(self, builder): """Calls the builder object's ``add_mark_base_pos`` callback.""" builder.add_mark_base_pos(self.location, self.base.glyphSet(), self.marks) def asFea(self, indent=""): res = "pos base {}".format(self.base.asFea()) for a, m in self.marks: res += "\n" + indent + SHIFT + "{} mark @{}".format(a.asFea(), m.name) res += ";" return res class MarkLigPosStatement(Statement): """A mark-to-ligature positioning rule. The ``ligatures`` must be a `glyph-containing object`_. The ``marks`` should be a list of lists: each element in the top-level list represents a component glyph, and is made up of a list of (:class:`Anchor`, :class:`MarkClass`) tuples representing mark attachment points for that position. Example:: m1 = MarkClass("TOP_MARKS") m2 = MarkClass("BOTTOM_MARKS") # ... add definitions to mark classes... glyph = GlyphName("lam_meem_jeem") marks = [ [ (Anchor(625,1800), m1) ], # Attachments on 1st component (lam) [ (Anchor(376,-378), m2) ], # Attachments on 2nd component (meem) [ ] # No attachments on the jeem ] mlp = MarkLigPosStatement(glyph, marks) mlp.asFea() # pos ligature lam_meem_jeem <anchor 625 1800> mark @TOP_MARKS # ligComponent <anchor 376 -378> mark @BOTTOM_MARKS; """ def __init__(self, ligatures, marks, location=None): Statement.__init__(self, location) self.ligatures, self.marks = ligatures, marks def build(self, builder): """Calls the builder object's ``add_mark_lig_pos`` callback.""" builder.add_mark_lig_pos(self.location, self.ligatures.glyphSet(), self.marks) def asFea(self, indent=""): res = "pos ligature {}".format(self.ligatures.asFea()) ligs = [] for l in self.marks: temp = "" if l is None or not len(l): temp = "\n" + indent + SHIFT * 2 + "<anchor NULL>" else: for a, m in l: temp += ( "\n" + indent + SHIFT * 2 + "{} mark @{}".format(a.asFea(), m.name) ) ligs.append(temp) res += ("\n" + indent + SHIFT + "ligComponent").join(ligs) res += ";" return res class MarkMarkPosStatement(Statement): """A mark-to-mark positioning rule. The ``baseMarks`` must be a `glyph-containing object`_. The ``marks`` should be a list of (:class:`Anchor`, :class:`MarkClass`) tuples.""" def __init__(self, baseMarks, marks, location=None): Statement.__init__(self, location) self.baseMarks, self.marks = baseMarks, marks def build(self, builder): """Calls the builder object's ``add_mark_mark_pos`` callback.""" builder.add_mark_mark_pos(self.location, self.baseMarks.glyphSet(), self.marks) def asFea(self, indent=""): res = "pos mark {}".format(self.baseMarks.asFea()) for a, m in self.marks: res += "\n" + indent + SHIFT + "{} mark @{}".format(a.asFea(), m.name) res += ";" return res class MultipleSubstStatement(Statement): """A multiple substitution statement. Args: prefix: a list of `glyph-containing objects`_. glyph: a single glyph-containing object. suffix: a list of glyph-containing objects. replacement: a list of glyph-containing objects. forceChain: If true, the statement is expressed as a chaining rule (e.g. ``sub f' i' by f_i``) even when no context is given. """ def __init__( self, prefix, glyph, suffix, replacement, forceChain=False, location=None ): Statement.__init__(self, location) self.prefix, self.glyph, self.suffix = prefix, glyph, suffix self.replacement = replacement self.forceChain = forceChain def build(self, builder): """Calls the builder object's ``add_multiple_subst`` callback.""" prefix = [p.glyphSet() for p in self.prefix] suffix = [s.glyphSet() for s in self.suffix] if hasattr(self.glyph, "glyphSet"): originals = self.glyph.glyphSet() else: originals = [self.glyph] count = len(originals) replaces = [] for r in self.replacement: if hasattr(r, "glyphSet"): replace = r.glyphSet() else: replace = [r] if len(replace) == 1 and len(replace) != count: replace = replace * count replaces.append(replace) replaces = list(zip(*replaces)) seen_originals = set() for i, original in enumerate(originals): if original not in seen_originals: seen_originals.add(original) builder.add_multiple_subst( self.location, prefix, original, suffix, replaces and replaces[i] or (), self.forceChain, ) def asFea(self, indent=""): res = "sub " if len(self.prefix) or len(self.suffix) or self.forceChain: if len(self.prefix): res += " ".join(map(asFea, self.prefix)) + " " res += asFea(self.glyph) + "'" if len(self.suffix): res += " " + " ".join(map(asFea, self.suffix)) else: res += asFea(self.glyph) replacement = self.replacement or [NullGlyph()] res += " by " res += " ".join(map(asFea, replacement)) res += ";" return res class PairPosStatement(Statement): """A pair positioning statement. ``glyphs1`` and ``glyphs2`` should be `glyph-containing objects`_. ``valuerecord1`` should be a :class:`ValueRecord` object; ``valuerecord2`` should be either a :class:`ValueRecord` object or ``None``. If ``enumerated`` is true, then this is expressed as an `enumerated pair <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#6.b.ii>`_. """ def __init__( self, glyphs1, valuerecord1, glyphs2, valuerecord2, enumerated=False, location=None, ): Statement.__init__(self, location) self.enumerated = enumerated self.glyphs1, self.valuerecord1 = glyphs1, valuerecord1 self.glyphs2, self.valuerecord2 = glyphs2, valuerecord2 def build(self, builder): """Calls a callback on the builder object: * If the rule is enumerated, calls ``add_specific_pair_pos`` on each combination of first and second glyphs. * If the glyphs are both single :class:`GlyphName` objects, calls ``add_specific_pair_pos``. * Else, calls ``add_class_pair_pos``. """ if self.enumerated: g = [self.glyphs1.glyphSet(), self.glyphs2.glyphSet()] seen_pair = False for glyph1, glyph2 in itertools.product(*g): seen_pair = True builder.add_specific_pair_pos( self.location, glyph1, self.valuerecord1, glyph2, self.valuerecord2 ) if not seen_pair: raise FeatureLibError( "Empty glyph class in positioning rule", self.location ) return is_specific = isinstance(self.glyphs1, GlyphName) and isinstance( self.glyphs2, GlyphName ) if is_specific: builder.add_specific_pair_pos( self.location, self.glyphs1.glyph, self.valuerecord1, self.glyphs2.glyph, self.valuerecord2, ) else: builder.add_class_pair_pos( self.location, self.glyphs1.glyphSet(), self.valuerecord1, self.glyphs2.glyphSet(), self.valuerecord2, ) def asFea(self, indent=""): res = "enum " if self.enumerated else "" if self.valuerecord2: res += "pos {} {} {} {};".format( self.glyphs1.asFea(), self.valuerecord1.asFea(), self.glyphs2.asFea(), self.valuerecord2.asFea(), ) else: res += "pos {} {} {};".format( self.glyphs1.asFea(), self.glyphs2.asFea(), self.valuerecord1.asFea() ) return res class ReverseChainSingleSubstStatement(Statement): """A reverse chaining substitution statement. You don't see those every day. Note the unusual argument order: ``suffix`` comes `before` ``glyphs``. ``old_prefix``, ``old_suffix``, ``glyphs`` and ``replacements`` should be lists of `glyph-containing objects`_. ``glyphs`` and ``replacements`` should be one-item lists. """ def __init__(self, old_prefix, old_suffix, glyphs, replacements, location=None): Statement.__init__(self, location) self.old_prefix, self.old_suffix = old_prefix, old_suffix self.glyphs = glyphs self.replacements = replacements def build(self, builder): prefix = [p.glyphSet() for p in self.old_prefix] suffix = [s.glyphSet() for s in self.old_suffix] originals = self.glyphs[0].glyphSet() replaces = self.replacements[0].glyphSet() if len(replaces) == 1: replaces = replaces * len(originals) builder.add_reverse_chain_single_subst( self.location, prefix, suffix, dict(zip(originals, replaces)) ) def asFea(self, indent=""): res = "rsub " if len(self.old_prefix) or len(self.old_suffix): if len(self.old_prefix): res += " ".join(asFea(g) for g in self.old_prefix) + " " res += " ".join(asFea(g) + "'" for g in self.glyphs) if len(self.old_suffix): res += " " + " ".join(asFea(g) for g in self.old_suffix) else: res += " ".join(map(asFea, self.glyphs)) res += " by {};".format(" ".join(asFea(g) for g in self.replacements)) return res class SingleSubstStatement(Statement): """A single substitution statement. Note the unusual argument order: ``prefix`` and suffix come `after` the replacement ``glyphs``. ``prefix``, ``suffix``, ``glyphs`` and ``replace`` should be lists of `glyph-containing objects`_. ``glyphs`` and ``replace`` should be one-item lists. """ def __init__(self, glyphs, replace, prefix, suffix, forceChain, location=None): Statement.__init__(self, location) self.prefix, self.suffix = prefix, suffix self.forceChain = forceChain self.glyphs = glyphs self.replacements = replace def build(self, builder): """Calls the builder object's ``add_single_subst`` callback.""" prefix = [p.glyphSet() for p in self.prefix] suffix = [s.glyphSet() for s in self.suffix] originals = self.glyphs[0].glyphSet() replaces = self.replacements[0].glyphSet() if len(replaces) == 1: replaces = replaces * len(originals) builder.add_single_subst( self.location, prefix, suffix, OrderedDict(zip(originals, replaces)), self.forceChain, ) def asFea(self, indent=""): res = "sub " if len(self.prefix) or len(self.suffix) or self.forceChain: if len(self.prefix): res += " ".join(asFea(g) for g in self.prefix) + " " res += " ".join(asFea(g) + "'" for g in self.glyphs) if len(self.suffix): res += " " + " ".join(asFea(g) for g in self.suffix) else: res += " ".join(asFea(g) for g in self.glyphs) res += " by {};".format(" ".join(asFea(g) for g in self.replacements)) return res class ScriptStatement(Statement): """A ``script`` statement.""" def __init__(self, script, location=None): Statement.__init__(self, location) self.script = script #: the script code def build(self, builder): """Calls the builder's ``set_script`` callback.""" builder.set_script(self.location, self.script) def asFea(self, indent=""): return "script {};".format(self.script.strip()) class SinglePosStatement(Statement): """A single position statement. ``prefix`` and ``suffix`` should be lists of `glyph-containing objects`_. ``pos`` should be a one-element list containing a (`glyph-containing object`_, :class:`ValueRecord`) tuple.""" def __init__(self, pos, prefix, suffix, forceChain, location=None): Statement.__init__(self, location) self.pos, self.prefix, self.suffix = pos, prefix, suffix self.forceChain = forceChain def build(self, builder): """Calls the builder object's ``add_single_pos`` callback.""" prefix = [p.glyphSet() for p in self.prefix] suffix = [s.glyphSet() for s in self.suffix] pos = [(g.glyphSet(), value) for g, value in self.pos] builder.add_single_pos(self.location, prefix, suffix, pos, self.forceChain) def asFea(self, indent=""): res = "pos " if len(self.prefix) or len(self.suffix) or self.forceChain: if len(self.prefix): res += " ".join(map(asFea, self.prefix)) + " " res += " ".join( [ asFea(x[0]) + "'" + ((" " + x[1].asFea()) if x[1] else "") for x in self.pos ] ) if len(self.suffix): res += " " + " ".join(map(asFea, self.suffix)) else: res += " ".join( [asFea(x[0]) + " " + (x[1].asFea() if x[1] else "") for x in self.pos] ) res += ";" return res class SubtableStatement(Statement): """Represents a subtable break.""" def __init__(self, location=None): Statement.__init__(self, location) def build(self, builder): """Calls the builder objects's ``add_subtable_break`` callback.""" builder.add_subtable_break(self.location) def asFea(self, indent=""): return "subtable;" class ValueRecord(Expression): """Represents a value record.""" def __init__( self, xPlacement=None, yPlacement=None, xAdvance=None, yAdvance=None, xPlaDevice=None, yPlaDevice=None, xAdvDevice=None, yAdvDevice=None, vertical=False, location=None, ): Expression.__init__(self, location) self.xPlacement, self.yPlacement = (xPlacement, yPlacement) self.xAdvance, self.yAdvance = (xAdvance, yAdvance) self.xPlaDevice, self.yPlaDevice = (xPlaDevice, yPlaDevice) self.xAdvDevice, self.yAdvDevice = (xAdvDevice, yAdvDevice) self.vertical = vertical def __eq__(self, other): return ( self.xPlacement == other.xPlacement and self.yPlacement == other.yPlacement and self.xAdvance == other.xAdvance and self.yAdvance == other.yAdvance and self.xPlaDevice == other.xPlaDevice and self.xAdvDevice == other.xAdvDevice ) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return ( hash(self.xPlacement) ^ hash(self.yPlacement) ^ hash(self.xAdvance) ^ hash(self.yAdvance) ^ hash(self.xPlaDevice) ^ hash(self.yPlaDevice) ^ hash(self.xAdvDevice) ^ hash(self.yAdvDevice) ) def asFea(self, indent=""): if not self: return "<NULL>" x, y = self.xPlacement, self.yPlacement xAdvance, yAdvance = self.xAdvance, self.yAdvance xPlaDevice, yPlaDevice = self.xPlaDevice, self.yPlaDevice xAdvDevice, yAdvDevice = self.xAdvDevice, self.yAdvDevice vertical = self.vertical # Try format A, if possible. if x is None and y is None: if xAdvance is None and vertical: return str(yAdvance) elif yAdvance is None and not vertical: return str(xAdvance) # Make any remaining None value 0 to avoid generating invalid records. x = x or 0 y = y or 0 xAdvance = xAdvance or 0 yAdvance = yAdvance or 0 # Try format B, if possible. if ( xPlaDevice is None and yPlaDevice is None and xAdvDevice is None and yAdvDevice is None ): return "<%s %s %s %s>" % (x, y, xAdvance, yAdvance) # Last resort is format C. return "<%s %s %s %s %s %s %s %s>" % ( x, y, xAdvance, yAdvance, deviceToString(xPlaDevice), deviceToString(yPlaDevice), deviceToString(xAdvDevice), deviceToString(yAdvDevice), ) def __bool__(self): return any( getattr(self, v) is not None for v in [ "xPlacement", "yPlacement", "xAdvance", "yAdvance", "xPlaDevice", "yPlaDevice", "xAdvDevice", "yAdvDevice", ] ) __nonzero__ = __bool__ class ValueRecordDefinition(Statement): """Represents a named value record definition.""" def __init__(self, name, value, location=None): Statement.__init__(self, location) self.name = name #: Value record name as string self.value = value #: :class:`ValueRecord` object def asFea(self, indent=""): return "valueRecordDef {} {};".format(self.value.asFea(), self.name) def simplify_name_attributes(pid, eid, lid): if pid == 3 and eid == 1 and lid == 1033: return "" elif pid == 1 and eid == 0 and lid == 0: return "1" else: return "{} {} {}".format(pid, eid, lid) class NameRecord(Statement): """Represents a name record. (`Section 9.e. <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.e>`_)""" def __init__(self, nameID, platformID, platEncID, langID, string, location=None): Statement.__init__(self, location) self.nameID = nameID #: Name ID as integer (e.g. 9 for designer's name) self.platformID = platformID #: Platform ID as integer self.platEncID = platEncID #: Platform encoding ID as integer self.langID = langID #: Language ID as integer self.string = string #: Name record value def build(self, builder): """Calls the builder object's ``add_name_record`` callback.""" builder.add_name_record( self.location, self.nameID, self.platformID, self.platEncID, self.langID, self.string, ) def asFea(self, indent=""): def escape(c, escape_pattern): # Also escape U+0022 QUOTATION MARK and U+005C REVERSE SOLIDUS if c >= 0x20 and c <= 0x7E and c not in (0x22, 0x5C): return chr(c) else: return escape_pattern % c encoding = getEncoding(self.platformID, self.platEncID, self.langID) if encoding is None: raise FeatureLibError("Unsupported encoding", self.location) s = tobytes(self.string, encoding=encoding) if encoding == "utf_16_be": escaped_string = "".join( [ escape(byteord(s[i]) * 256 + byteord(s[i + 1]), r"\%04x") for i in range(0, len(s), 2) ] ) else: escaped_string = "".join([escape(byteord(b), r"\%02x") for b in s]) plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) if plat != "": plat += " " return 'nameid {} {}"{}";'.format(self.nameID, plat, escaped_string) class FeatureNameStatement(NameRecord): """Represents a ``sizemenuname`` or ``name`` statement.""" def build(self, builder): """Calls the builder object's ``add_featureName`` callback.""" NameRecord.build(self, builder) builder.add_featureName(self.nameID) def asFea(self, indent=""): if self.nameID == "size": tag = "sizemenuname" else: tag = "name" plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) if plat != "": plat += " " return '{} {}"{}";'.format(tag, plat, self.string) class STATNameStatement(NameRecord): """Represents a STAT table ``name`` statement.""" def asFea(self, indent=""): plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) if plat != "": plat += " " return 'name {}"{}";'.format(plat, self.string) class SizeParameters(Statement): """A ``parameters`` statement.""" def __init__(self, DesignSize, SubfamilyID, RangeStart, RangeEnd, location=None): Statement.__init__(self, location) self.DesignSize = DesignSize self.SubfamilyID = SubfamilyID self.RangeStart = RangeStart self.RangeEnd = RangeEnd def build(self, builder): """Calls the builder object's ``set_size_parameters`` callback.""" builder.set_size_parameters( self.location, self.DesignSize, self.SubfamilyID, self.RangeStart, self.RangeEnd, ) def asFea(self, indent=""): res = "parameters {:.1f} {}".format(self.DesignSize, self.SubfamilyID) if self.RangeStart != 0 or self.RangeEnd != 0: res += " {} {}".format(int(self.RangeStart * 10), int(self.RangeEnd * 10)) return res + ";" class CVParametersNameStatement(NameRecord): """Represent a name statement inside a ``cvParameters`` block.""" def __init__( self, nameID, platformID, platEncID, langID, string, block_name, location=None ): NameRecord.__init__( self, nameID, platformID, platEncID, langID, string, location=location ) self.block_name = block_name def build(self, builder): """Calls the builder object's ``add_cv_parameter`` callback.""" item = "" if self.block_name == "ParamUILabelNameID": item = "_{}".format(builder.cv_num_named_params_.get(self.nameID, 0)) builder.add_cv_parameter(self.nameID) self.nameID = (self.nameID, self.block_name + item) NameRecord.build(self, builder) def asFea(self, indent=""): plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) if plat != "": plat += " " return 'name {}"{}";'.format(plat, self.string) class CharacterStatement(Statement): """ Statement used in cvParameters blocks of Character Variant features (cvXX). The Unicode value may be written with either decimal or hexadecimal notation. The value must be preceded by '0x' if it is a hexadecimal value. The largest Unicode value allowed is 0xFFFFFF. """ def __init__(self, character, tag, location=None): Statement.__init__(self, location) self.character = character self.tag = tag def build(self, builder): """Calls the builder object's ``add_cv_character`` callback.""" builder.add_cv_character(self.character, self.tag) def asFea(self, indent=""): return "Character {:#x};".format(self.character) class BaseAxis(Statement): """An axis definition, being either a ``VertAxis.BaseTagList/BaseScriptList`` pair or a ``HorizAxis.BaseTagList/BaseScriptList`` pair.""" def __init__(self, bases, scripts, vertical, location=None): Statement.__init__(self, location) self.bases = bases #: A list of baseline tag names as strings self.scripts = scripts #: A list of script record tuplets (script tag, default baseline tag, base coordinate) self.vertical = vertical #: Boolean; VertAxis if True, HorizAxis if False def build(self, builder): """Calls the builder object's ``set_base_axis`` callback.""" builder.set_base_axis(self.bases, self.scripts, self.vertical) def asFea(self, indent=""): direction = "Vert" if self.vertical else "Horiz" scripts = [ "{} {} {}".format(a[0], a[1], " ".join(map(str, a[2]))) for a in self.scripts ] return "{}Axis.BaseTagList {};\n{}{}Axis.BaseScriptList {};".format( direction, " ".join(self.bases), indent, direction, ", ".join(scripts) ) class OS2Field(Statement): """An entry in the ``OS/2`` table. Most ``values`` should be numbers or strings, apart from when the key is ``UnicodeRange``, ``CodePageRange`` or ``Panose``, in which case it should be an array of integers.""" def __init__(self, key, value, location=None): Statement.__init__(self, location) self.key = key self.value = value def build(self, builder): """Calls the builder object's ``add_os2_field`` callback.""" builder.add_os2_field(self.key, self.value) def asFea(self, indent=""): def intarr2str(x): return " ".join(map(str, x)) numbers = ( "FSType", "TypoAscender", "TypoDescender", "TypoLineGap", "winAscent", "winDescent", "XHeight", "CapHeight", "WeightClass", "WidthClass", "LowerOpSize", "UpperOpSize", ) ranges = ("UnicodeRange", "CodePageRange") keywords = dict([(x.lower(), [x, str]) for x in numbers]) keywords.update([(x.lower(), [x, intarr2str]) for x in ranges]) keywords["panose"] = ["Panose", intarr2str] keywords["vendor"] = ["Vendor", lambda y: '"{}"'.format(y)] if self.key in keywords: return "{} {};".format( keywords[self.key][0], keywords[self.key][1](self.value) ) return "" # should raise exception class HheaField(Statement): """An entry in the ``hhea`` table.""" def __init__(self, key, value, location=None): Statement.__init__(self, location) self.key = key self.value = value def build(self, builder): """Calls the builder object's ``add_hhea_field`` callback.""" builder.add_hhea_field(self.key, self.value) def asFea(self, indent=""): fields = ("CaretOffset", "Ascender", "Descender", "LineGap") keywords = dict([(x.lower(), x) for x in fields]) return "{} {};".format(keywords[self.key], self.value) class VheaField(Statement): """An entry in the ``vhea`` table.""" def __init__(self, key, value, location=None): Statement.__init__(self, location) self.key = key self.value = value def build(self, builder): """Calls the builder object's ``add_vhea_field`` callback.""" builder.add_vhea_field(self.key, self.value) def asFea(self, indent=""): fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap") keywords = dict([(x.lower(), x) for x in fields]) return "{} {};".format(keywords[self.key], self.value) class STATDesignAxisStatement(Statement): """A STAT table Design Axis Args: tag (str): a 4 letter axis tag axisOrder (int): an int names (list): a list of :class:`STATNameStatement` objects """ def __init__(self, tag, axisOrder, names, location=None): Statement.__init__(self, location) self.tag = tag self.axisOrder = axisOrder self.names = names self.location = location def build(self, builder): builder.addDesignAxis(self, self.location) def asFea(self, indent=""): indent += SHIFT res = f"DesignAxis {self.tag} {self.axisOrder} {{ \n" res += ("\n" + indent).join([s.asFea(indent=indent) for s in self.names]) + "\n" res += "};" return res class ElidedFallbackName(Statement): """STAT table ElidedFallbackName Args: names: a list of :class:`STATNameStatement` objects """ def __init__(self, names, location=None): Statement.__init__(self, location) self.names = names self.location = location def build(self, builder): builder.setElidedFallbackName(self.names, self.location) def asFea(self, indent=""): indent += SHIFT res = "ElidedFallbackName { \n" res += ("\n" + indent).join([s.asFea(indent=indent) for s in self.names]) + "\n" res += "};" return res class ElidedFallbackNameID(Statement): """STAT table ElidedFallbackNameID Args: value: an int pointing to an existing name table name ID """ def __init__(self, value, location=None): Statement.__init__(self, location) self.value = value self.location = location def build(self, builder): builder.setElidedFallbackName(self.value, self.location) def asFea(self, indent=""): return f"ElidedFallbackNameID {self.value};" class STATAxisValueStatement(Statement): """A STAT table Axis Value Record Args: names (list): a list of :class:`STATNameStatement` objects locations (list): a list of :class:`AxisValueLocationStatement` objects flags (int): an int """ def __init__(self, names, locations, flags, location=None): Statement.__init__(self, location) self.names = names self.locations = locations self.flags = flags def build(self, builder): builder.addAxisValueRecord(self, self.location) def asFea(self, indent=""): res = "AxisValue {\n" for location in self.locations: res += location.asFea() for nameRecord in self.names: res += nameRecord.asFea() res += "\n" if self.flags: flags = ["OlderSiblingFontAttribute", "ElidableAxisValueName"] flagStrings = [] curr = 1 for i in range(len(flags)): if self.flags & curr != 0: flagStrings.append(flags[i]) curr = curr << 1 res += f"flag {' '.join(flagStrings)};\n" res += "};" return res class AxisValueLocationStatement(Statement): """ A STAT table Axis Value Location Args: tag (str): a 4 letter axis tag values (list): a list of ints and/or floats """ def __init__(self, tag, values, location=None): Statement.__init__(self, location) self.tag = tag self.values = values def asFea(self, res=""): res += f"location {self.tag} " res += f"{' '.join(str(i) for i in self.values)};\n" return res class ConditionsetStatement(Statement): """ A variable layout conditionset Args: name (str): the name of this conditionset conditions (dict): a dictionary mapping axis tags to a tuple of (min,max) userspace coordinates. """ def __init__(self, name, conditions, location=None): Statement.__init__(self, location) self.name = name self.conditions = conditions def build(self, builder): builder.add_conditionset(self.location, self.name, self.conditions) def asFea(self, res="", indent=""): res += indent + f"conditionset {self.name} " + "{\n" for tag, (minvalue, maxvalue) in self.conditions.items(): res += indent + SHIFT + f"{tag} {minvalue} {maxvalue};\n" res += indent + "}" + f" {self.name};\n" return res class VariationBlock(Block): """A variation feature block, applicable in a given set of conditions.""" def __init__(self, name, conditionset, use_extension=False, location=None): Block.__init__(self, location) self.name, self.conditionset, self.use_extension = ( name, conditionset, use_extension, ) def build(self, builder): """Call the ``start_feature`` callback on the builder object, visit all the statements in this feature, and then call ``end_feature``.""" builder.start_feature(self.location, self.name) if ( self.conditionset != "NULL" and self.conditionset not in builder.conditionsets_ ): raise FeatureLibError( f"variation block used undefined conditionset {self.conditionset}", self.location, ) # language exclude_dflt statements modify builder.features_ # limit them to this block with temporary builder.features_ features = builder.features_ builder.features_ = {} Block.build(self, builder) for key, value in builder.features_.items(): items = builder.feature_variations_.setdefault(key, {}).setdefault( self.conditionset, [] ) items.extend(value) if key not in features: features[key] = [] # Ensure we make a feature record builder.features_ = features builder.end_feature() def asFea(self, indent=""): res = indent + "variation %s " % self.name.strip() res += self.conditionset + " " if self.use_extension: res += "useExtension " res += "{\n" res += Block.asFea(self, indent=indent) res += indent + "} %s;\n" % self.name.strip() return res PKaZZZS��!zzfontTools/feaLib/builder.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import Tag, tostr, binary2num, safeEval from fontTools.feaLib.error import FeatureLibError from fontTools.feaLib.lookupDebugInfo import ( LookupDebugInfo, LOOKUP_DEBUG_INFO_KEY, LOOKUP_DEBUG_ENV_VAR, ) from fontTools.feaLib.parser import Parser from fontTools.feaLib.ast import FeatureFile from fontTools.feaLib.variableScalar import VariableScalar from fontTools.otlLib import builder as otl from fontTools.otlLib.maxContextCalc import maxCtxFont from fontTools.ttLib import newTable, getTableModule from fontTools.ttLib.tables import otBase, otTables from fontTools.otlLib.builder import ( AlternateSubstBuilder, ChainContextPosBuilder, ChainContextSubstBuilder, LigatureSubstBuilder, MultipleSubstBuilder, CursivePosBuilder, MarkBasePosBuilder, MarkLigPosBuilder, MarkMarkPosBuilder, ReverseChainSingleSubstBuilder, SingleSubstBuilder, ClassPairPosSubtableBuilder, PairPosBuilder, SinglePosBuilder, ChainContextualRule, ) from fontTools.otlLib.error import OpenTypeLibError from fontTools.varLib.varStore import OnlineVarStoreBuilder from fontTools.varLib.builder import buildVarDevTable from fontTools.varLib.featureVars import addFeatureVariationsRaw from fontTools.varLib.models import normalizeValue, piecewiseLinearMap from collections import defaultdict import copy import itertools from io import StringIO import logging import warnings import os log = logging.getLogger(__name__) def addOpenTypeFeatures(font, featurefile, tables=None, debug=False): """Add features from a file to a font. Note that this replaces any features currently present. Args: font (feaLib.ttLib.TTFont): The font object. featurefile: Either a path or file object (in which case we parse it into an AST), or a pre-parsed AST instance. tables: If passed, restrict the set of affected tables to those in the list. debug: Whether to add source debugging information to the font in the ``Debg`` table """ builder = Builder(font, featurefile) builder.build(tables=tables, debug=debug) def addOpenTypeFeaturesFromString( font, features, filename=None, tables=None, debug=False ): """Add features from a string to a font. Note that this replaces any features currently present. Args: font (feaLib.ttLib.TTFont): The font object. features: A string containing feature code. filename: The directory containing ``filename`` is used as the root of relative ``include()`` paths; if ``None`` is provided, the current directory is assumed. tables: If passed, restrict the set of affected tables to those in the list. debug: Whether to add source debugging information to the font in the ``Debg`` table """ featurefile = StringIO(tostr(features)) if filename: featurefile.name = filename addOpenTypeFeatures(font, featurefile, tables=tables, debug=debug) class Builder(object): supportedTables = frozenset( Tag(tag) for tag in [ "BASE", "GDEF", "GPOS", "GSUB", "OS/2", "head", "hhea", "name", "vhea", "STAT", ] ) def __init__(self, font, featurefile): self.font = font # 'featurefile' can be either a path or file object (in which case we # parse it into an AST), or a pre-parsed AST instance if isinstance(featurefile, FeatureFile): self.parseTree, self.file = featurefile, None else: self.parseTree, self.file = None, featurefile self.glyphMap = font.getReverseGlyphMap() self.varstorebuilder = None if "fvar" in font: self.axes = font["fvar"].axes self.varstorebuilder = OnlineVarStoreBuilder( [ax.axisTag for ax in self.axes] ) self.default_language_systems_ = set() self.script_ = None self.lookupflag_ = 0 self.lookupflag_markFilterSet_ = None self.language_systems = set() self.seen_non_DFLT_script_ = False self.named_lookups_ = {} self.cur_lookup_ = None self.cur_lookup_name_ = None self.cur_feature_name_ = None self.lookups_ = [] self.lookup_locations = {"GSUB": {}, "GPOS": {}} self.features_ = {} # ('latn', 'DEU ', 'smcp') --> [LookupBuilder*] self.required_features_ = {} # ('latn', 'DEU ') --> 'scmp' self.feature_variations_ = {} # for feature 'aalt' self.aalt_features_ = [] # [(location, featureName)*], for 'aalt' self.aalt_location_ = None self.aalt_alternates_ = {} # for 'featureNames' self.featureNames_ = set() self.featureNames_ids_ = {} # for 'cvParameters' self.cv_parameters_ = set() self.cv_parameters_ids_ = {} self.cv_num_named_params_ = {} self.cv_characters_ = defaultdict(list) # for feature 'size' self.size_parameters_ = None # for table 'head' self.fontRevision_ = None # 2.71 # for table 'name' self.names_ = [] # for table 'BASE' self.base_horiz_axis_ = None self.base_vert_axis_ = None # for table 'GDEF' self.attachPoints_ = {} # "a" --> {3, 7} self.ligCaretCoords_ = {} # "f_f_i" --> {300, 600} self.ligCaretPoints_ = {} # "f_f_i" --> {3, 7} self.glyphClassDefs_ = {} # "fi" --> (2, (file, line, column)) self.markAttach_ = {} # "acute" --> (4, (file, line, column)) self.markAttachClassID_ = {} # frozenset({"acute", "grave"}) --> 4 self.markFilterSets_ = {} # frozenset({"acute", "grave"}) --> 4 # for table 'OS/2' self.os2_ = {} # for table 'hhea' self.hhea_ = {} # for table 'vhea' self.vhea_ = {} # for table 'STAT' self.stat_ = {} # for conditionsets self.conditionsets_ = {} # We will often use exactly the same locations (i.e. the font's masters) # for a large number of variable scalars. Instead of creating a model # for each, let's share the models. self.model_cache = {} def build(self, tables=None, debug=False): if self.parseTree is None: self.parseTree = Parser(self.file, self.glyphMap).parse() self.parseTree.build(self) # by default, build all the supported tables if tables is None: tables = self.supportedTables else: tables = frozenset(tables) unsupported = tables - self.supportedTables if unsupported: unsupported_string = ", ".join(sorted(unsupported)) raise NotImplementedError( "The following tables were requested but are unsupported: " f"{unsupported_string}." ) if "GSUB" in tables: self.build_feature_aalt_() if "head" in tables: self.build_head() if "hhea" in tables: self.build_hhea() if "vhea" in tables: self.build_vhea() if "name" in tables: self.build_name() if "OS/2" in tables: self.build_OS_2() if "STAT" in tables: self.build_STAT() for tag in ("GPOS", "GSUB"): if tag not in tables: continue table = self.makeTable(tag) if self.feature_variations_: self.makeFeatureVariations(table, tag) if ( table.ScriptList.ScriptCount > 0 or table.FeatureList.FeatureCount > 0 or table.LookupList.LookupCount > 0 ): fontTable = self.font[tag] = newTable(tag) fontTable.table = table elif tag in self.font: del self.font[tag] if any(tag in self.font for tag in ("GPOS", "GSUB")) and "OS/2" in self.font: self.font["OS/2"].usMaxContext = maxCtxFont(self.font) if "GDEF" in tables: gdef = self.buildGDEF() if gdef: self.font["GDEF"] = gdef elif "GDEF" in self.font: del self.font["GDEF"] if "BASE" in tables: base = self.buildBASE() if base: self.font["BASE"] = base elif "BASE" in self.font: del self.font["BASE"] if debug or os.environ.get(LOOKUP_DEBUG_ENV_VAR): self.buildDebg() def get_chained_lookup_(self, location, builder_class): result = builder_class(self.font, location) result.lookupflag = self.lookupflag_ result.markFilterSet = self.lookupflag_markFilterSet_ self.lookups_.append(result) return result def add_lookup_to_feature_(self, lookup, feature_name): for script, lang in self.language_systems: key = (script, lang, feature_name) self.features_.setdefault(key, []).append(lookup) def get_lookup_(self, location, builder_class): if ( self.cur_lookup_ and type(self.cur_lookup_) == builder_class and self.cur_lookup_.lookupflag == self.lookupflag_ and self.cur_lookup_.markFilterSet == self.lookupflag_markFilterSet_ ): return self.cur_lookup_ if self.cur_lookup_name_ and self.cur_lookup_: raise FeatureLibError( "Within a named lookup block, all rules must be of " "the same lookup type and flag", location, ) self.cur_lookup_ = builder_class(self.font, location) self.cur_lookup_.lookupflag = self.lookupflag_ self.cur_lookup_.markFilterSet = self.lookupflag_markFilterSet_ self.lookups_.append(self.cur_lookup_) if self.cur_lookup_name_: # We are starting a lookup rule inside a named lookup block. self.named_lookups_[self.cur_lookup_name_] = self.cur_lookup_ if self.cur_feature_name_: # We are starting a lookup rule inside a feature. This includes # lookup rules inside named lookups inside features. self.add_lookup_to_feature_(self.cur_lookup_, self.cur_feature_name_) return self.cur_lookup_ def build_feature_aalt_(self): if not self.aalt_features_ and not self.aalt_alternates_: return # > alternate glyphs will be sorted in the order that the source features # > are named in the aalt definition, not the order of the feature definitions # > in the file. Alternates defined explicitly ... will precede all others. # https://github.com/fonttools/fonttools/issues/836 alternates = {g: list(a) for g, a in self.aalt_alternates_.items()} for location, name in self.aalt_features_ + [(None, "aalt")]: feature = [ (script, lang, feature, lookups) for (script, lang, feature), lookups in self.features_.items() if feature == name ] # "aalt" does not have to specify its own lookups, but it might. if not feature and name != "aalt": warnings.warn("%s: Feature %s has not been defined" % (location, name)) continue for script, lang, feature, lookups in feature: for lookuplist in lookups: if not isinstance(lookuplist, list): lookuplist = [lookuplist] for lookup in lookuplist: for glyph, alts in lookup.getAlternateGlyphs().items(): alts_for_glyph = alternates.setdefault(glyph, []) alts_for_glyph.extend( g for g in alts if g not in alts_for_glyph ) single = { glyph: repl[0] for glyph, repl in alternates.items() if len(repl) == 1 } multi = {glyph: repl for glyph, repl in alternates.items() if len(repl) > 1} if not single and not multi: return self.features_ = { (script, lang, feature): lookups for (script, lang, feature), lookups in self.features_.items() if feature != "aalt" } old_lookups = self.lookups_ self.lookups_ = [] self.start_feature(self.aalt_location_, "aalt") if single: single_lookup = self.get_lookup_(location, SingleSubstBuilder) single_lookup.mapping = single if multi: multi_lookup = self.get_lookup_(location, AlternateSubstBuilder) multi_lookup.alternates = multi self.end_feature() self.lookups_.extend(old_lookups) def build_head(self): if not self.fontRevision_: return table = self.font.get("head") if not table: # this only happens for unit tests table = self.font["head"] = newTable("head") table.decompile(b"\0" * 54, self.font) table.tableVersion = 1.0 table.created = table.modified = 3406620153 # 2011-12-13 11:22:33 table.fontRevision = self.fontRevision_ def build_hhea(self): if not self.hhea_: return table = self.font.get("hhea") if not table: # this only happens for unit tests table = self.font["hhea"] = newTable("hhea") table.decompile(b"\0" * 36, self.font) table.tableVersion = 0x00010000 if "caretoffset" in self.hhea_: table.caretOffset = self.hhea_["caretoffset"] if "ascender" in self.hhea_: table.ascent = self.hhea_["ascender"] if "descender" in self.hhea_: table.descent = self.hhea_["descender"] if "linegap" in self.hhea_: table.lineGap = self.hhea_["linegap"] def build_vhea(self): if not self.vhea_: return table = self.font.get("vhea") if not table: # this only happens for unit tests table = self.font["vhea"] = newTable("vhea") table.decompile(b"\0" * 36, self.font) table.tableVersion = 0x00011000 if "verttypoascender" in self.vhea_: table.ascent = self.vhea_["verttypoascender"] if "verttypodescender" in self.vhea_: table.descent = self.vhea_["verttypodescender"] if "verttypolinegap" in self.vhea_: table.lineGap = self.vhea_["verttypolinegap"] def get_user_name_id(self, table): # Try to find first unused font-specific name id nameIDs = [name.nameID for name in table.names] for user_name_id in range(256, 32767): if user_name_id not in nameIDs: return user_name_id def buildFeatureParams(self, tag): params = None if tag == "size": params = otTables.FeatureParamsSize() ( params.DesignSize, params.SubfamilyID, params.RangeStart, params.RangeEnd, ) = self.size_parameters_ if tag in self.featureNames_ids_: params.SubfamilyNameID = self.featureNames_ids_[tag] else: params.SubfamilyNameID = 0 elif tag in self.featureNames_: if not self.featureNames_ids_: # name table wasn't selected among the tables to build; skip pass else: assert tag in self.featureNames_ids_ params = otTables.FeatureParamsStylisticSet() params.Version = 0 params.UINameID = self.featureNames_ids_[tag] elif tag in self.cv_parameters_: params = otTables.FeatureParamsCharacterVariants() params.Format = 0 params.FeatUILabelNameID = self.cv_parameters_ids_.get( (tag, "FeatUILabelNameID"), 0 ) params.FeatUITooltipTextNameID = self.cv_parameters_ids_.get( (tag, "FeatUITooltipTextNameID"), 0 ) params.SampleTextNameID = self.cv_parameters_ids_.get( (tag, "SampleTextNameID"), 0 ) params.NumNamedParameters = self.cv_num_named_params_.get(tag, 0) params.FirstParamUILabelNameID = self.cv_parameters_ids_.get( (tag, "ParamUILabelNameID_0"), 0 ) params.CharCount = len(self.cv_characters_[tag]) params.Character = self.cv_characters_[tag] return params def build_name(self): if not self.names_: return table = self.font.get("name") if not table: # this only happens for unit tests table = self.font["name"] = newTable("name") table.names = [] for name in self.names_: nameID, platformID, platEncID, langID, string = name # For featureNames block, nameID is 'feature tag' # For cvParameters blocks, nameID is ('feature tag', 'block name') if not isinstance(nameID, int): tag = nameID if tag in self.featureNames_: if tag not in self.featureNames_ids_: self.featureNames_ids_[tag] = self.get_user_name_id(table) assert self.featureNames_ids_[tag] is not None nameID = self.featureNames_ids_[tag] elif tag[0] in self.cv_parameters_: if tag not in self.cv_parameters_ids_: self.cv_parameters_ids_[tag] = self.get_user_name_id(table) assert self.cv_parameters_ids_[tag] is not None nameID = self.cv_parameters_ids_[tag] table.setName(string, nameID, platformID, platEncID, langID) table.names.sort() def build_OS_2(self): if not self.os2_: return table = self.font.get("OS/2") if not table: # this only happens for unit tests table = self.font["OS/2"] = newTable("OS/2") data = b"\0" * sstruct.calcsize(getTableModule("OS/2").OS2_format_0) table.decompile(data, self.font) version = 0 if "fstype" in self.os2_: table.fsType = self.os2_["fstype"] if "panose" in self.os2_: panose = getTableModule("OS/2").Panose() ( panose.bFamilyType, panose.bSerifStyle, panose.bWeight, panose.bProportion, panose.bContrast, panose.bStrokeVariation, panose.bArmStyle, panose.bLetterForm, panose.bMidline, panose.bXHeight, ) = self.os2_["panose"] table.panose = panose if "typoascender" in self.os2_: table.sTypoAscender = self.os2_["typoascender"] if "typodescender" in self.os2_: table.sTypoDescender = self.os2_["typodescender"] if "typolinegap" in self.os2_: table.sTypoLineGap = self.os2_["typolinegap"] if "winascent" in self.os2_: table.usWinAscent = self.os2_["winascent"] if "windescent" in self.os2_: table.usWinDescent = self.os2_["windescent"] if "vendor" in self.os2_: table.achVendID = safeEval("'''" + self.os2_["vendor"] + "'''") if "weightclass" in self.os2_: table.usWeightClass = self.os2_["weightclass"] if "widthclass" in self.os2_: table.usWidthClass = self.os2_["widthclass"] if "unicoderange" in self.os2_: table.setUnicodeRanges(self.os2_["unicoderange"]) if "codepagerange" in self.os2_: pages = self.build_codepages_(self.os2_["codepagerange"]) table.ulCodePageRange1, table.ulCodePageRange2 = pages version = 1 if "xheight" in self.os2_: table.sxHeight = self.os2_["xheight"] version = 2 if "capheight" in self.os2_: table.sCapHeight = self.os2_["capheight"] version = 2 if "loweropsize" in self.os2_: table.usLowerOpticalPointSize = self.os2_["loweropsize"] version = 5 if "upperopsize" in self.os2_: table.usUpperOpticalPointSize = self.os2_["upperopsize"] version = 5 def checkattr(table, attrs): for attr in attrs: if not hasattr(table, attr): setattr(table, attr, 0) table.version = max(version, table.version) # this only happens for unit tests if version >= 1: checkattr(table, ("ulCodePageRange1", "ulCodePageRange2")) if version >= 2: checkattr( table, ( "sxHeight", "sCapHeight", "usDefaultChar", "usBreakChar", "usMaxContext", ), ) if version >= 5: checkattr(table, ("usLowerOpticalPointSize", "usUpperOpticalPointSize")) def setElidedFallbackName(self, value, location): # ElidedFallbackName is a convenience method for setting # ElidedFallbackNameID so only one can be allowed for token in ("ElidedFallbackName", "ElidedFallbackNameID"): if token in self.stat_: raise FeatureLibError( f"{token} is already set.", location, ) if isinstance(value, int): self.stat_["ElidedFallbackNameID"] = value elif isinstance(value, list): self.stat_["ElidedFallbackName"] = value else: raise AssertionError(value) def addDesignAxis(self, designAxis, location): if "DesignAxes" not in self.stat_: self.stat_["DesignAxes"] = [] if designAxis.tag in (r.tag for r in self.stat_["DesignAxes"]): raise FeatureLibError( f'DesignAxis already defined for tag "{designAxis.tag}".', location, ) if designAxis.axisOrder in (r.axisOrder for r in self.stat_["DesignAxes"]): raise FeatureLibError( f"DesignAxis already defined for axis number {designAxis.axisOrder}.", location, ) self.stat_["DesignAxes"].append(designAxis) def addAxisValueRecord(self, axisValueRecord, location): if "AxisValueRecords" not in self.stat_: self.stat_["AxisValueRecords"] = [] # Check for duplicate AxisValueRecords for record_ in self.stat_["AxisValueRecords"]: if ( {n.asFea() for n in record_.names} == {n.asFea() for n in axisValueRecord.names} and {n.asFea() for n in record_.locations} == {n.asFea() for n in axisValueRecord.locations} and record_.flags == axisValueRecord.flags ): raise FeatureLibError( "An AxisValueRecord with these values is already defined.", location, ) self.stat_["AxisValueRecords"].append(axisValueRecord) def build_STAT(self): if not self.stat_: return axes = self.stat_.get("DesignAxes") if not axes: raise FeatureLibError("DesignAxes not defined", None) axisValueRecords = self.stat_.get("AxisValueRecords") axisValues = {} format4_locations = [] for tag in axes: axisValues[tag.tag] = [] if axisValueRecords is not None: for avr in axisValueRecords: valuesDict = {} if avr.flags > 0: valuesDict["flags"] = avr.flags if len(avr.locations) == 1: location = avr.locations[0] values = location.values if len(values) == 1: # format1 valuesDict.update({"value": values[0], "name": avr.names}) if len(values) == 2: # format3 valuesDict.update( { "value": values[0], "linkedValue": values[1], "name": avr.names, } ) if len(values) == 3: # format2 nominal, minVal, maxVal = values valuesDict.update( { "nominalValue": nominal, "rangeMinValue": minVal, "rangeMaxValue": maxVal, "name": avr.names, } ) axisValues[location.tag].append(valuesDict) else: valuesDict.update( { "location": {i.tag: i.values[0] for i in avr.locations}, "name": avr.names, } ) format4_locations.append(valuesDict) designAxes = [ { "ordering": a.axisOrder, "tag": a.tag, "name": a.names, "values": axisValues[a.tag], } for a in axes ] nameTable = self.font.get("name") if not nameTable: # this only happens for unit tests nameTable = self.font["name"] = newTable("name") nameTable.names = [] if "ElidedFallbackNameID" in self.stat_: nameID = self.stat_["ElidedFallbackNameID"] name = nameTable.getDebugName(nameID) if not name: raise FeatureLibError( f"ElidedFallbackNameID {nameID} points " "to a nameID that does not exist in the " '"name" table', None, ) elif "ElidedFallbackName" in self.stat_: nameID = self.stat_["ElidedFallbackName"] otl.buildStatTable( self.font, designAxes, locations=format4_locations, elidedFallbackName=nameID, ) def build_codepages_(self, pages): pages2bits = { 1252: 0, 1250: 1, 1251: 2, 1253: 3, 1254: 4, 1255: 5, 1256: 6, 1257: 7, 1258: 8, 874: 16, 932: 17, 936: 18, 949: 19, 950: 20, 1361: 21, 869: 48, 866: 49, 865: 50, 864: 51, 863: 52, 862: 53, 861: 54, 860: 55, 857: 56, 855: 57, 852: 58, 775: 59, 737: 60, 708: 61, 850: 62, 437: 63, } bits = [pages2bits[p] for p in pages if p in pages2bits] pages = [] for i in range(2): pages.append("") for j in range(i * 32, (i + 1) * 32): if j in bits: pages[i] += "1" else: pages[i] += "0" return [binary2num(p[::-1]) for p in pages] def buildBASE(self): if not self.base_horiz_axis_ and not self.base_vert_axis_: return None base = otTables.BASE() base.Version = 0x00010000 base.HorizAxis = self.buildBASEAxis(self.base_horiz_axis_) base.VertAxis = self.buildBASEAxis(self.base_vert_axis_) result = newTable("BASE") result.table = base return result def buildBASEAxis(self, axis): if not axis: return bases, scripts = axis axis = otTables.Axis() axis.BaseTagList = otTables.BaseTagList() axis.BaseTagList.BaselineTag = bases axis.BaseTagList.BaseTagCount = len(bases) axis.BaseScriptList = otTables.BaseScriptList() axis.BaseScriptList.BaseScriptRecord = [] axis.BaseScriptList.BaseScriptCount = len(scripts) for script in sorted(scripts): record = otTables.BaseScriptRecord() record.BaseScriptTag = script[0] record.BaseScript = otTables.BaseScript() record.BaseScript.BaseLangSysCount = 0 record.BaseScript.BaseValues = otTables.BaseValues() record.BaseScript.BaseValues.DefaultIndex = bases.index(script[1]) record.BaseScript.BaseValues.BaseCoord = [] record.BaseScript.BaseValues.BaseCoordCount = len(script[2]) for c in script[2]: coord = otTables.BaseCoord() coord.Format = 1 coord.Coordinate = c record.BaseScript.BaseValues.BaseCoord.append(coord) axis.BaseScriptList.BaseScriptRecord.append(record) return axis def buildGDEF(self): gdef = otTables.GDEF() gdef.GlyphClassDef = self.buildGDEFGlyphClassDef_() gdef.AttachList = otl.buildAttachList(self.attachPoints_, self.glyphMap) gdef.LigCaretList = otl.buildLigCaretList( self.ligCaretCoords_, self.ligCaretPoints_, self.glyphMap ) gdef.MarkAttachClassDef = self.buildGDEFMarkAttachClassDef_() gdef.MarkGlyphSetsDef = self.buildGDEFMarkGlyphSetsDef_() gdef.Version = 0x00010002 if gdef.MarkGlyphSetsDef else 0x00010000 if self.varstorebuilder: store = self.varstorebuilder.finish() if store: gdef.Version = 0x00010003 gdef.VarStore = store varidx_map = store.optimize() gdef.remap_device_varidxes(varidx_map) if "GPOS" in self.font: self.font["GPOS"].table.remap_device_varidxes(varidx_map) self.model_cache.clear() if any( ( gdef.GlyphClassDef, gdef.AttachList, gdef.LigCaretList, gdef.MarkAttachClassDef, gdef.MarkGlyphSetsDef, ) ) or hasattr(gdef, "VarStore"): result = newTable("GDEF") result.table = gdef return result else: return None def buildGDEFGlyphClassDef_(self): if self.glyphClassDefs_: classes = {g: c for (g, (c, _)) in self.glyphClassDefs_.items()} else: classes = {} for lookup in self.lookups_: classes.update(lookup.inferGlyphClasses()) for markClass in self.parseTree.markClasses.values(): for markClassDef in markClass.definitions: for glyph in markClassDef.glyphSet(): classes[glyph] = 3 if classes: result = otTables.GlyphClassDef() result.classDefs = classes return result else: return None def buildGDEFMarkAttachClassDef_(self): classDefs = {g: c for g, (c, _) in self.markAttach_.items()} if not classDefs: return None result = otTables.MarkAttachClassDef() result.classDefs = classDefs return result def buildGDEFMarkGlyphSetsDef_(self): sets = [] for glyphs, id_ in sorted( self.markFilterSets_.items(), key=lambda item: item[1] ): sets.append(glyphs) return otl.buildMarkGlyphSetsDef(sets, self.glyphMap) def buildDebg(self): if "Debg" not in self.font: self.font["Debg"] = newTable("Debg") self.font["Debg"].data = {} self.font["Debg"].data[LOOKUP_DEBUG_INFO_KEY] = self.lookup_locations def buildLookups_(self, tag): assert tag in ("GPOS", "GSUB"), tag for lookup in self.lookups_: lookup.lookup_index = None lookups = [] for lookup in self.lookups_: if lookup.table != tag: continue lookup.lookup_index = len(lookups) self.lookup_locations[tag][str(lookup.lookup_index)] = LookupDebugInfo( location=str(lookup.location), name=self.get_lookup_name_(lookup), feature=None, ) lookups.append(lookup) otLookups = [] for l in lookups: try: otLookups.append(l.build()) except OpenTypeLibError as e: raise FeatureLibError(str(e), e.location) from e except Exception as e: location = self.lookup_locations[tag][str(l.lookup_index)].location raise FeatureLibError(str(e), location) from e return otLookups def makeTable(self, tag): table = getattr(otTables, tag, None)() table.Version = 0x00010000 table.ScriptList = otTables.ScriptList() table.ScriptList.ScriptRecord = [] table.FeatureList = otTables.FeatureList() table.FeatureList.FeatureRecord = [] table.LookupList = otTables.LookupList() table.LookupList.Lookup = self.buildLookups_(tag) # Build a table for mapping (tag, lookup_indices) to feature_index. # For example, ('liga', (2,3,7)) --> 23. feature_indices = {} required_feature_indices = {} # ('latn', 'DEU') --> 23 scripts = {} # 'latn' --> {'DEU': [23, 24]} for feature #23,24 # Sort the feature table by feature tag: # https://github.com/fonttools/fonttools/issues/568 sortFeatureTag = lambda f: (f[0][2], f[0][1], f[0][0], f[1]) for key, lookups in sorted(self.features_.items(), key=sortFeatureTag): script, lang, feature_tag = key # l.lookup_index will be None when a lookup is not needed # for the table under construction. For example, substitution # rules will have no lookup_index while building GPOS tables. lookup_indices = tuple( [l.lookup_index for l in lookups if l.lookup_index is not None] ) size_feature = tag == "GPOS" and feature_tag == "size" force_feature = self.any_feature_variations(feature_tag, tag) if len(lookup_indices) == 0 and not size_feature and not force_feature: continue for ix in lookup_indices: try: self.lookup_locations[tag][str(ix)] = self.lookup_locations[tag][ str(ix) ]._replace(feature=key) except KeyError: warnings.warn( "feaLib.Builder subclass needs upgrading to " "stash debug information. See fonttools#2065." ) feature_key = (feature_tag, lookup_indices) feature_index = feature_indices.get(feature_key) if feature_index is None: feature_index = len(table.FeatureList.FeatureRecord) frec = otTables.FeatureRecord() frec.FeatureTag = feature_tag frec.Feature = otTables.Feature() frec.Feature.FeatureParams = self.buildFeatureParams(feature_tag) frec.Feature.LookupListIndex = list(lookup_indices) frec.Feature.LookupCount = len(lookup_indices) table.FeatureList.FeatureRecord.append(frec) feature_indices[feature_key] = feature_index scripts.setdefault(script, {}).setdefault(lang, []).append(feature_index) if self.required_features_.get((script, lang)) == feature_tag: required_feature_indices[(script, lang)] = feature_index # Build ScriptList. for script, lang_features in sorted(scripts.items()): srec = otTables.ScriptRecord() srec.ScriptTag = script srec.Script = otTables.Script() srec.Script.DefaultLangSys = None srec.Script.LangSysRecord = [] for lang, feature_indices in sorted(lang_features.items()): langrec = otTables.LangSysRecord() langrec.LangSys = otTables.LangSys() langrec.LangSys.LookupOrder = None req_feature_index = required_feature_indices.get((script, lang)) if req_feature_index is None: langrec.LangSys.ReqFeatureIndex = 0xFFFF else: langrec.LangSys.ReqFeatureIndex = req_feature_index langrec.LangSys.FeatureIndex = [ i for i in feature_indices if i != req_feature_index ] langrec.LangSys.FeatureCount = len(langrec.LangSys.FeatureIndex) if lang == "dflt": srec.Script.DefaultLangSys = langrec.LangSys else: langrec.LangSysTag = lang srec.Script.LangSysRecord.append(langrec) srec.Script.LangSysCount = len(srec.Script.LangSysRecord) table.ScriptList.ScriptRecord.append(srec) table.ScriptList.ScriptCount = len(table.ScriptList.ScriptRecord) table.FeatureList.FeatureCount = len(table.FeatureList.FeatureRecord) table.LookupList.LookupCount = len(table.LookupList.Lookup) return table def makeFeatureVariations(self, table, table_tag): feature_vars = {} has_any_variations = False # Sort out which lookups to build, gather their indices for (_, _, feature_tag), variations in self.feature_variations_.items(): feature_vars[feature_tag] = [] for conditionset, builders in variations.items(): raw_conditionset = self.conditionsets_[conditionset] indices = [] for b in builders: if b.table != table_tag: continue assert b.lookup_index is not None indices.append(b.lookup_index) has_any_variations = True feature_vars[feature_tag].append((raw_conditionset, indices)) if has_any_variations: for feature_tag, conditions_and_lookups in feature_vars.items(): addFeatureVariationsRaw( self.font, table, conditions_and_lookups, feature_tag ) def any_feature_variations(self, feature_tag, table_tag): for (_, _, feature), variations in self.feature_variations_.items(): if feature != feature_tag: continue for conditionset, builders in variations.items(): if any(b.table == table_tag for b in builders): return True return False def get_lookup_name_(self, lookup): rev = {v: k for k, v in self.named_lookups_.items()} if lookup in rev: return rev[lookup] return None def add_language_system(self, location, script, language): # OpenType Feature File Specification, section 4.b.i if script == "DFLT" and language == "dflt" and self.default_language_systems_: raise FeatureLibError( 'If "languagesystem DFLT dflt" is present, it must be ' "the first of the languagesystem statements", location, ) if script == "DFLT": if self.seen_non_DFLT_script_: raise FeatureLibError( 'languagesystems using the "DFLT" script tag must ' "precede all other languagesystems", location, ) else: self.seen_non_DFLT_script_ = True if (script, language) in self.default_language_systems_: raise FeatureLibError( '"languagesystem %s %s" has already been specified' % (script.strip(), language.strip()), location, ) self.default_language_systems_.add((script, language)) def get_default_language_systems_(self): # OpenType Feature File specification, 4.b.i. languagesystem: # If no "languagesystem" statement is present, then the # implementation must behave exactly as though the following # statement were present at the beginning of the feature file: # languagesystem DFLT dflt; if self.default_language_systems_: return frozenset(self.default_language_systems_) else: return frozenset({("DFLT", "dflt")}) def start_feature(self, location, name): self.language_systems = self.get_default_language_systems_() self.script_ = "DFLT" self.cur_lookup_ = None self.cur_feature_name_ = name self.lookupflag_ = 0 self.lookupflag_markFilterSet_ = None if name == "aalt": self.aalt_location_ = location def end_feature(self): assert self.cur_feature_name_ is not None self.cur_feature_name_ = None self.language_systems = None self.cur_lookup_ = None self.lookupflag_ = 0 self.lookupflag_markFilterSet_ = None def start_lookup_block(self, location, name): if name in self.named_lookups_: raise FeatureLibError( 'Lookup "%s" has already been defined' % name, location ) if self.cur_feature_name_ == "aalt": raise FeatureLibError( "Lookup blocks cannot be placed inside 'aalt' features; " "move it out, and then refer to it with a lookup statement", location, ) self.cur_lookup_name_ = name self.named_lookups_[name] = None self.cur_lookup_ = None if self.cur_feature_name_ is None: self.lookupflag_ = 0 self.lookupflag_markFilterSet_ = None def end_lookup_block(self): assert self.cur_lookup_name_ is not None self.cur_lookup_name_ = None self.cur_lookup_ = None if self.cur_feature_name_ is None: self.lookupflag_ = 0 self.lookupflag_markFilterSet_ = None def add_lookup_call(self, lookup_name): assert lookup_name in self.named_lookups_, lookup_name self.cur_lookup_ = None lookup = self.named_lookups_[lookup_name] if lookup is not None: # skip empty named lookup self.add_lookup_to_feature_(lookup, self.cur_feature_name_) def set_font_revision(self, location, revision): self.fontRevision_ = revision def set_language(self, location, language, include_default, required): assert len(language) == 4 if self.cur_feature_name_ in ("aalt", "size"): raise FeatureLibError( "Language statements are not allowed " 'within "feature %s"' % self.cur_feature_name_, location, ) if self.cur_feature_name_ is None: raise FeatureLibError( "Language statements are not allowed " "within standalone lookup blocks", location, ) self.cur_lookup_ = None key = (self.script_, language, self.cur_feature_name_) lookups = self.features_.get((key[0], "dflt", key[2])) if (language == "dflt" or include_default) and lookups: self.features_[key] = lookups[:] else: self.features_[key] = [] self.language_systems = frozenset([(self.script_, language)]) if required: key = (self.script_, language) if key in self.required_features_: raise FeatureLibError( "Language %s (script %s) has already " "specified feature %s as its required feature" % ( language.strip(), self.script_.strip(), self.required_features_[key].strip(), ), location, ) self.required_features_[key] = self.cur_feature_name_ def getMarkAttachClass_(self, location, glyphs): glyphs = frozenset(glyphs) id_ = self.markAttachClassID_.get(glyphs) if id_ is not None: return id_ id_ = len(self.markAttachClassID_) + 1 self.markAttachClassID_[glyphs] = id_ for glyph in glyphs: if glyph in self.markAttach_: _, loc = self.markAttach_[glyph] raise FeatureLibError( "Glyph %s already has been assigned " "a MarkAttachmentType at %s" % (glyph, loc), location, ) self.markAttach_[glyph] = (id_, location) return id_ def getMarkFilterSet_(self, location, glyphs): glyphs = frozenset(glyphs) id_ = self.markFilterSets_.get(glyphs) if id_ is not None: return id_ id_ = len(self.markFilterSets_) self.markFilterSets_[glyphs] = id_ return id_ def set_lookup_flag(self, location, value, markAttach, markFilter): value = value & 0xFF if markAttach: markAttachClass = self.getMarkAttachClass_(location, markAttach) value = value | (markAttachClass << 8) if markFilter: markFilterSet = self.getMarkFilterSet_(location, markFilter) value = value | 0x10 self.lookupflag_markFilterSet_ = markFilterSet else: self.lookupflag_markFilterSet_ = None self.lookupflag_ = value def set_script(self, location, script): if self.cur_feature_name_ in ("aalt", "size"): raise FeatureLibError( "Script statements are not allowed " 'within "feature %s"' % self.cur_feature_name_, location, ) if self.cur_feature_name_ is None: raise FeatureLibError( "Script statements are not allowed " "within standalone lookup blocks", location, ) if self.language_systems == {(script, "dflt")}: # Nothing to do. return self.cur_lookup_ = None self.script_ = script self.lookupflag_ = 0 self.lookupflag_markFilterSet_ = None self.set_language(location, "dflt", include_default=True, required=False) def find_lookup_builders_(self, lookups): """Helper for building chain contextual substitutions Given a list of lookup names, finds the LookupBuilder for each name. If an input name is None, it gets mapped to a None LookupBuilder. """ lookup_builders = [] for lookuplist in lookups: if lookuplist is not None: lookup_builders.append( [self.named_lookups_.get(l.name) for l in lookuplist] ) else: lookup_builders.append(None) return lookup_builders def add_attach_points(self, location, glyphs, contourPoints): for glyph in glyphs: self.attachPoints_.setdefault(glyph, set()).update(contourPoints) def add_feature_reference(self, location, featureName): if self.cur_feature_name_ != "aalt": raise FeatureLibError( 'Feature references are only allowed inside "feature aalt"', location ) self.aalt_features_.append((location, featureName)) def add_featureName(self, tag): self.featureNames_.add(tag) def add_cv_parameter(self, tag): self.cv_parameters_.add(tag) def add_to_cv_num_named_params(self, tag): """Adds new items to ``self.cv_num_named_params_`` or increments the count of existing items.""" if tag in self.cv_num_named_params_: self.cv_num_named_params_[tag] += 1 else: self.cv_num_named_params_[tag] = 1 def add_cv_character(self, character, tag): self.cv_characters_[tag].append(character) def set_base_axis(self, bases, scripts, vertical): if vertical: self.base_vert_axis_ = (bases, scripts) else: self.base_horiz_axis_ = (bases, scripts) def set_size_parameters( self, location, DesignSize, SubfamilyID, RangeStart, RangeEnd ): if self.cur_feature_name_ != "size": raise FeatureLibError( "Parameters statements are not allowed " 'within "feature %s"' % self.cur_feature_name_, location, ) self.size_parameters_ = [DesignSize, SubfamilyID, RangeStart, RangeEnd] for script, lang in self.language_systems: key = (script, lang, self.cur_feature_name_) self.features_.setdefault(key, []) # GSUB rules # GSUB 1 def add_single_subst(self, location, prefix, suffix, mapping, forceChain): if self.cur_feature_name_ == "aalt": for from_glyph, to_glyph in mapping.items(): alts = self.aalt_alternates_.setdefault(from_glyph, []) if to_glyph not in alts: alts.append(to_glyph) return if prefix or suffix or forceChain: self.add_single_subst_chained_(location, prefix, suffix, mapping) return lookup = self.get_lookup_(location, SingleSubstBuilder) for from_glyph, to_glyph in mapping.items(): if from_glyph in lookup.mapping: if to_glyph == lookup.mapping[from_glyph]: log.info( "Removing duplicate single substitution from glyph" ' "%s" to "%s" at %s', from_glyph, to_glyph, location, ) else: raise FeatureLibError( 'Already defined rule for replacing glyph "%s" by "%s"' % (from_glyph, lookup.mapping[from_glyph]), location, ) lookup.mapping[from_glyph] = to_glyph # GSUB 2 def add_multiple_subst( self, location, prefix, glyph, suffix, replacements, forceChain=False ): if prefix or suffix or forceChain: chain = self.get_lookup_(location, ChainContextSubstBuilder) sub = self.get_chained_lookup_(location, MultipleSubstBuilder) sub.mapping[glyph] = replacements chain.rules.append(ChainContextualRule(prefix, [{glyph}], suffix, [sub])) return lookup = self.get_lookup_(location, MultipleSubstBuilder) if glyph in lookup.mapping: if replacements == lookup.mapping[glyph]: log.info( "Removing duplicate multiple substitution from glyph" ' "%s" to %s%s', glyph, replacements, f" at {location}" if location else "", ) else: raise FeatureLibError( 'Already defined substitution for glyph "%s"' % glyph, location ) lookup.mapping[glyph] = replacements # GSUB 3 def add_alternate_subst(self, location, prefix, glyph, suffix, replacement): if self.cur_feature_name_ == "aalt": alts = self.aalt_alternates_.setdefault(glyph, []) alts.extend(g for g in replacement if g not in alts) return if prefix or suffix: chain = self.get_lookup_(location, ChainContextSubstBuilder) lookup = self.get_chained_lookup_(location, AlternateSubstBuilder) chain.rules.append(ChainContextualRule(prefix, [{glyph}], suffix, [lookup])) else: lookup = self.get_lookup_(location, AlternateSubstBuilder) if glyph in lookup.alternates: raise FeatureLibError( 'Already defined alternates for glyph "%s"' % glyph, location ) # We allow empty replacement glyphs here. lookup.alternates[glyph] = replacement # GSUB 4 def add_ligature_subst( self, location, prefix, glyphs, suffix, replacement, forceChain ): if prefix or suffix or forceChain: chain = self.get_lookup_(location, ChainContextSubstBuilder) lookup = self.get_chained_lookup_(location, LigatureSubstBuilder) chain.rules.append(ChainContextualRule(prefix, glyphs, suffix, [lookup])) else: lookup = self.get_lookup_(location, LigatureSubstBuilder) if not all(glyphs): raise FeatureLibError("Empty glyph class in substitution", location) # OpenType feature file syntax, section 5.d, "Ligature substitution": # "Since the OpenType specification does not allow ligature # substitutions to be specified on target sequences that contain # glyph classes, the implementation software will enumerate # all specific glyph sequences if glyph classes are detected" for g in itertools.product(*glyphs): lookup.ligatures[g] = replacement # GSUB 5/6 def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups): if not all(glyphs) or not all(prefix) or not all(suffix): raise FeatureLibError( "Empty glyph class in contextual substitution", location ) lookup = self.get_lookup_(location, ChainContextSubstBuilder) lookup.rules.append( ChainContextualRule( prefix, glyphs, suffix, self.find_lookup_builders_(lookups) ) ) def add_single_subst_chained_(self, location, prefix, suffix, mapping): if not mapping or not all(prefix) or not all(suffix): raise FeatureLibError( "Empty glyph class in contextual substitution", location ) # https://github.com/fonttools/fonttools/issues/512 # https://github.com/fonttools/fonttools/issues/2150 chain = self.get_lookup_(location, ChainContextSubstBuilder) sub = chain.find_chainable_single_subst(mapping) if sub is None: sub = self.get_chained_lookup_(location, SingleSubstBuilder) sub.mapping.update(mapping) chain.rules.append( ChainContextualRule(prefix, [list(mapping.keys())], suffix, [sub]) ) # GSUB 8 def add_reverse_chain_single_subst(self, location, old_prefix, old_suffix, mapping): if not mapping: raise FeatureLibError("Empty glyph class in substitution", location) lookup = self.get_lookup_(location, ReverseChainSingleSubstBuilder) lookup.rules.append((old_prefix, old_suffix, mapping)) # GPOS rules # GPOS 1 def add_single_pos(self, location, prefix, suffix, pos, forceChain): if prefix or suffix or forceChain: self.add_single_pos_chained_(location, prefix, suffix, pos) else: lookup = self.get_lookup_(location, SinglePosBuilder) for glyphs, value in pos: if not glyphs: raise FeatureLibError( "Empty glyph class in positioning rule", location ) otValueRecord = self.makeOpenTypeValueRecord( location, value, pairPosContext=False ) for glyph in glyphs: try: lookup.add_pos(location, glyph, otValueRecord) except OpenTypeLibError as e: raise FeatureLibError(str(e), e.location) from e # GPOS 2 def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2): if not glyphclass1 or not glyphclass2: raise FeatureLibError("Empty glyph class in positioning rule", location) lookup = self.get_lookup_(location, PairPosBuilder) v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True) v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True) lookup.addClassPair(location, glyphclass1, v1, glyphclass2, v2) def add_specific_pair_pos(self, location, glyph1, value1, glyph2, value2): if not glyph1 or not glyph2: raise FeatureLibError("Empty glyph class in positioning rule", location) lookup = self.get_lookup_(location, PairPosBuilder) v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True) v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True) lookup.addGlyphPair(location, glyph1, v1, glyph2, v2) # GPOS 3 def add_cursive_pos(self, location, glyphclass, entryAnchor, exitAnchor): if not glyphclass: raise FeatureLibError("Empty glyph class in positioning rule", location) lookup = self.get_lookup_(location, CursivePosBuilder) lookup.add_attachment( location, glyphclass, self.makeOpenTypeAnchor(location, entryAnchor), self.makeOpenTypeAnchor(location, exitAnchor), ) # GPOS 4 def add_mark_base_pos(self, location, bases, marks): builder = self.get_lookup_(location, MarkBasePosBuilder) self.add_marks_(location, builder, marks) if not bases: raise FeatureLibError("Empty glyph class in positioning rule", location) for baseAnchor, markClass in marks: otBaseAnchor = self.makeOpenTypeAnchor(location, baseAnchor) for base in bases: builder.bases.setdefault(base, {})[markClass.name] = otBaseAnchor # GPOS 5 def add_mark_lig_pos(self, location, ligatures, components): builder = self.get_lookup_(location, MarkLigPosBuilder) componentAnchors = [] if not ligatures: raise FeatureLibError("Empty glyph class in positioning rule", location) for marks in components: anchors = {} self.add_marks_(location, builder, marks) for ligAnchor, markClass in marks: anchors[markClass.name] = self.makeOpenTypeAnchor(location, ligAnchor) componentAnchors.append(anchors) for glyph in ligatures: builder.ligatures[glyph] = componentAnchors # GPOS 6 def add_mark_mark_pos(self, location, baseMarks, marks): builder = self.get_lookup_(location, MarkMarkPosBuilder) self.add_marks_(location, builder, marks) if not baseMarks: raise FeatureLibError("Empty glyph class in positioning rule", location) for baseAnchor, markClass in marks: otBaseAnchor = self.makeOpenTypeAnchor(location, baseAnchor) for baseMark in baseMarks: builder.baseMarks.setdefault(baseMark, {})[ markClass.name ] = otBaseAnchor # GPOS 7/8 def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups): if not all(glyphs) or not all(prefix) or not all(suffix): raise FeatureLibError( "Empty glyph class in contextual positioning rule", location ) lookup = self.get_lookup_(location, ChainContextPosBuilder) lookup.rules.append( ChainContextualRule( prefix, glyphs, suffix, self.find_lookup_builders_(lookups) ) ) def add_single_pos_chained_(self, location, prefix, suffix, pos): if not pos or not all(prefix) or not all(suffix): raise FeatureLibError( "Empty glyph class in contextual positioning rule", location ) # https://github.com/fonttools/fonttools/issues/514 chain = self.get_lookup_(location, ChainContextPosBuilder) targets = [] for _, _, _, lookups in chain.rules: targets.extend(lookups) subs = [] for glyphs, value in pos: if value is None: subs.append(None) continue otValue = self.makeOpenTypeValueRecord( location, value, pairPosContext=False ) sub = chain.find_chainable_single_pos(targets, glyphs, otValue) if sub is None: sub = self.get_chained_lookup_(location, SinglePosBuilder) targets.append(sub) for glyph in glyphs: sub.add_pos(location, glyph, otValue) subs.append(sub) assert len(pos) == len(subs), (pos, subs) chain.rules.append( ChainContextualRule(prefix, [g for g, v in pos], suffix, subs) ) def add_marks_(self, location, lookupBuilder, marks): """Helper for add_mark_{base,liga,mark}_pos.""" for _, markClass in marks: for markClassDef in markClass.definitions: for mark in markClassDef.glyphs.glyphSet(): if mark not in lookupBuilder.marks: otMarkAnchor = self.makeOpenTypeAnchor( location, copy.deepcopy(markClassDef.anchor) ) lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor) else: existingMarkClass = lookupBuilder.marks[mark][0] if markClass.name != existingMarkClass: raise FeatureLibError( "Glyph %s cannot be in both @%s and @%s" % (mark, existingMarkClass, markClass.name), location, ) def add_subtable_break(self, location): self.cur_lookup_.add_subtable_break(location) def setGlyphClass_(self, location, glyph, glyphClass): oldClass, oldLocation = self.glyphClassDefs_.get(glyph, (None, None)) if oldClass and oldClass != glyphClass: raise FeatureLibError( "Glyph %s was assigned to a different class at %s" % (glyph, oldLocation), location, ) self.glyphClassDefs_[glyph] = (glyphClass, location) def add_glyphClassDef( self, location, baseGlyphs, ligatureGlyphs, markGlyphs, componentGlyphs ): for glyph in baseGlyphs: self.setGlyphClass_(location, glyph, 1) for glyph in ligatureGlyphs: self.setGlyphClass_(location, glyph, 2) for glyph in markGlyphs: self.setGlyphClass_(location, glyph, 3) for glyph in componentGlyphs: self.setGlyphClass_(location, glyph, 4) def add_ligatureCaretByIndex_(self, location, glyphs, carets): for glyph in glyphs: if glyph not in self.ligCaretPoints_: self.ligCaretPoints_[glyph] = carets def makeLigCaret(self, location, caret): if not isinstance(caret, VariableScalar): return caret default, device = self.makeVariablePos(location, caret) if device is not None: return (default, device) return default def add_ligatureCaretByPos_(self, location, glyphs, carets): carets = [self.makeLigCaret(location, caret) for caret in carets] for glyph in glyphs: if glyph not in self.ligCaretCoords_: self.ligCaretCoords_[glyph] = carets def add_name_record(self, location, nameID, platformID, platEncID, langID, string): self.names_.append([nameID, platformID, platEncID, langID, string]) def add_os2_field(self, key, value): self.os2_[key] = value def add_hhea_field(self, key, value): self.hhea_[key] = value def add_vhea_field(self, key, value): self.vhea_[key] = value def add_conditionset(self, location, key, value): if "fvar" not in self.font: raise FeatureLibError( "Cannot add feature variations to a font without an 'fvar' table", location, ) # Normalize axisMap = { axis.axisTag: (axis.minValue, axis.defaultValue, axis.maxValue) for axis in self.axes } value = { tag: ( normalizeValue(bottom, axisMap[tag]), normalizeValue(top, axisMap[tag]), ) for tag, (bottom, top) in value.items() } # NOTE: This might result in rounding errors (off-by-ones) compared to # rules in Designspace files, since we're working with what's in the # `avar` table rather than the original values. if "avar" in self.font: mapping = self.font["avar"].segments value = { axis: tuple( piecewiseLinearMap(v, mapping[axis]) if axis in mapping else v for v in condition_range ) for axis, condition_range in value.items() } self.conditionsets_[key] = value def makeVariablePos(self, location, varscalar): if not self.varstorebuilder: raise FeatureLibError( "Can't define a variable scalar in a non-variable font", location ) varscalar.axes = self.axes if not varscalar.does_vary: return varscalar.default, None default, index = varscalar.add_to_variation_store( self.varstorebuilder, self.model_cache, self.font.get("avar") ) device = None if index is not None and index != 0xFFFFFFFF: device = buildVarDevTable(index) return default, device def makeOpenTypeAnchor(self, location, anchor): """ast.Anchor --> otTables.Anchor""" if anchor is None: return None variable = False deviceX, deviceY = None, None if anchor.xDeviceTable is not None: deviceX = otl.buildDevice(dict(anchor.xDeviceTable)) if anchor.yDeviceTable is not None: deviceY = otl.buildDevice(dict(anchor.yDeviceTable)) for dim in ("x", "y"): varscalar = getattr(anchor, dim) if not isinstance(varscalar, VariableScalar): continue if getattr(anchor, dim + "DeviceTable") is not None: raise FeatureLibError( "Can't define a device coordinate and variable scalar", location ) default, device = self.makeVariablePos(location, varscalar) setattr(anchor, dim, default) if device is not None: if dim == "x": deviceX = device else: deviceY = device variable = True otlanchor = otl.buildAnchor( anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY ) if variable: otlanchor.Format = 3 return otlanchor _VALUEREC_ATTRS = { name[0].lower() + name[1:]: (name, isDevice) for _, name, isDevice, _ in otBase.valueRecordFormat if not name.startswith("Reserved") } def makeOpenTypeValueRecord(self, location, v, pairPosContext): """ast.ValueRecord --> otBase.ValueRecord""" if not v: return None vr = {} for astName, (otName, isDevice) in self._VALUEREC_ATTRS.items(): val = getattr(v, astName, None) if not val: continue if isDevice: vr[otName] = otl.buildDevice(dict(val)) elif isinstance(val, VariableScalar): otDeviceName = otName[0:4] + "Device" feaDeviceName = otDeviceName[0].lower() + otDeviceName[1:] if getattr(v, feaDeviceName): raise FeatureLibError( "Can't define a device coordinate and variable scalar", location ) vr[otName], device = self.makeVariablePos(location, val) if device is not None: vr[otDeviceName] = device else: vr[otName] = val if pairPosContext and not vr: vr = {"YAdvance": 0} if v.vertical else {"XAdvance": 0} valRec = otl.buildValue(vr) return valRec PKaZZZh[I��fontTools/feaLib/error.pyclass FeatureLibError(Exception): def __init__(self, message, location): Exception.__init__(self, message) self.location = location def __str__(self): message = Exception.__str__(self) if self.location: return f"{self.location}: {message}" else: return message class IncludedFeaNotFound(FeatureLibError): def __str__(self): assert self.location is not None message = ( "The following feature file should be included but cannot be found: " f"{Exception.__str__(self)}" ) return f"{self.location}: {message}" PKaZZZ��ދm+m+fontTools/feaLib/lexer.pyfrom fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound from fontTools.feaLib.location import FeatureLibLocation import re import os try: import cython except ImportError: # if cython not installed, use mock module with no-op decorators and types from fontTools.misc import cython class Lexer(object): NUMBER = "NUMBER" HEXADECIMAL = "HEXADECIMAL" OCTAL = "OCTAL" NUMBERS = (NUMBER, HEXADECIMAL, OCTAL) FLOAT = "FLOAT" STRING = "STRING" NAME = "NAME" FILENAME = "FILENAME" GLYPHCLASS = "GLYPHCLASS" CID = "CID" SYMBOL = "SYMBOL" COMMENT = "COMMENT" NEWLINE = "NEWLINE" ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK" CHAR_WHITESPACE_ = " \t" CHAR_NEWLINE_ = "\r\n" CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" CHAR_DIGIT_ = "0123456789" CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-" RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$") MODE_NORMAL_ = "NORMAL" MODE_FILENAME_ = "FILENAME" def __init__(self, text, filename): self.filename_ = filename self.line_ = 1 self.pos_ = 0 self.line_start_ = 0 self.text_ = text self.text_length_ = len(text) self.mode_ = Lexer.MODE_NORMAL_ def __iter__(self): return self def next(self): # Python 2 return self.__next__() def __next__(self): # Python 3 while True: token_type, token, location = self.next_() if token_type != Lexer.NEWLINE: return (token_type, token, location) def location_(self): column = self.pos_ - self.line_start_ + 1 return FeatureLibLocation(self.filename_ or "<features>", self.line_, column) def next_(self): self.scan_over_(Lexer.CHAR_WHITESPACE_) location = self.location_() start = self.pos_ text = self.text_ limit = len(text) if start >= limit: raise StopIteration() cur_char = text[start] next_char = text[start + 1] if start + 1 < limit else None if cur_char == "\n": self.pos_ += 1 self.line_ += 1 self.line_start_ = self.pos_ return (Lexer.NEWLINE, None, location) if cur_char == "\r": self.pos_ += 2 if next_char == "\n" else 1 self.line_ += 1 self.line_start_ = self.pos_ return (Lexer.NEWLINE, None, location) if cur_char == "#": self.scan_until_(Lexer.CHAR_NEWLINE_) return (Lexer.COMMENT, text[start : self.pos_], location) if self.mode_ is Lexer.MODE_FILENAME_: if cur_char != "(": raise FeatureLibError("Expected '(' before file name", location) self.scan_until_(")") cur_char = text[self.pos_] if self.pos_ < limit else None if cur_char != ")": raise FeatureLibError("Expected ')' after file name", location) self.pos_ += 1 self.mode_ = Lexer.MODE_NORMAL_ return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location) if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: self.pos_ += 1 self.scan_over_(Lexer.CHAR_DIGIT_) return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) if cur_char == "@": self.pos_ += 1 self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) glyphclass = text[start + 1 : self.pos_] if len(glyphclass) < 1: raise FeatureLibError("Expected glyph class name", location) if not Lexer.RE_GLYPHCLASS.match(glyphclass): raise FeatureLibError( "Glyph class names must consist of letters, digits, " "underscore, period or hyphen", location, ) return (Lexer.GLYPHCLASS, glyphclass, location) if cur_char in Lexer.CHAR_NAME_START_: self.pos_ += 1 self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) token = text[start : self.pos_] if token == "include": self.mode_ = Lexer.MODE_FILENAME_ return (Lexer.NAME, token, location) if cur_char == "0" and next_char in "xX": self.pos_ += 2 self.scan_over_(Lexer.CHAR_HEXDIGIT_) return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: self.scan_over_(Lexer.CHAR_DIGIT_) return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) if cur_char in Lexer.CHAR_DIGIT_: self.scan_over_(Lexer.CHAR_DIGIT_) if self.pos_ >= limit or text[self.pos_] != ".": return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) self.scan_over_(".") self.scan_over_(Lexer.CHAR_DIGIT_) return (Lexer.FLOAT, float(text[start : self.pos_]), location) if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: self.pos_ += 1 self.scan_over_(Lexer.CHAR_DIGIT_) if self.pos_ >= limit or text[self.pos_] != ".": return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) self.scan_over_(".") self.scan_over_(Lexer.CHAR_DIGIT_) return (Lexer.FLOAT, float(text[start : self.pos_]), location) if cur_char in Lexer.CHAR_SYMBOL_: self.pos_ += 1 return (Lexer.SYMBOL, cur_char, location) if cur_char == '"': self.pos_ += 1 self.scan_until_('"') if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': self.pos_ += 1 # strip newlines embedded within a string string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1]) return (Lexer.STRING, string, location) else: raise FeatureLibError("Expected '\"' to terminate string", location) raise FeatureLibError("Unexpected character: %r" % cur_char, location) def scan_over_(self, valid): p = self.pos_ while p < self.text_length_ and self.text_[p] in valid: p += 1 self.pos_ = p def scan_until_(self, stop_at): p = self.pos_ while p < self.text_length_ and self.text_[p] not in stop_at: p += 1 self.pos_ = p def scan_anonymous_block(self, tag): location = self.location_() tag = tag.strip() self.scan_until_(Lexer.CHAR_NEWLINE_) self.scan_over_(Lexer.CHAR_NEWLINE_) regexp = r"}\s*" + tag + r"\s*;" split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) if len(split) != 2: raise FeatureLibError( "Expected '} %s;' to terminate anonymous block" % tag, location ) self.pos_ += len(split[0]) return (Lexer.ANONYMOUS_BLOCK, split[0], location) class IncludingLexer(object): """A Lexer that follows include statements. The OpenType feature file specification states that due to historical reasons, relative imports should be resolved in this order: 1. If the source font is UFO format, then relative to the UFO's font directory 2. relative to the top-level include file 3. relative to the parent include file We only support 1 (via includeDir) and 2. """ def __init__(self, featurefile, *, includeDir=None): """Initializes an IncludingLexer. Behavior: If includeDir is passed, it will be used to determine the top-level include directory to use for all encountered include statements. If it is not passed, ``os.path.dirname(featurefile)`` will be considered the include directory. """ self.lexers_ = [self.make_lexer_(featurefile)] self.featurefilepath = self.lexers_[0].filename_ self.includeDir = includeDir def __iter__(self): return self def next(self): # Python 2 return self.__next__() def __next__(self): # Python 3 while self.lexers_: lexer = self.lexers_[-1] try: token_type, token, location = next(lexer) except StopIteration: self.lexers_.pop() continue if token_type is Lexer.NAME and token == "include": fname_type, fname_token, fname_location = lexer.next() if fname_type is not Lexer.FILENAME: raise FeatureLibError("Expected file name", fname_location) # semi_type, semi_token, semi_location = lexer.next() # if semi_type is not Lexer.SYMBOL or semi_token != ";": # raise FeatureLibError("Expected ';'", semi_location) if os.path.isabs(fname_token): path = fname_token else: if self.includeDir is not None: curpath = self.includeDir elif self.featurefilepath is not None: curpath = os.path.dirname(self.featurefilepath) else: # if the IncludingLexer was initialized from an in-memory # file-like stream, it doesn't have a 'name' pointing to # its filesystem path, therefore we fall back to using the # current working directory to resolve relative includes curpath = os.getcwd() path = os.path.join(curpath, fname_token) if len(self.lexers_) >= 5: raise FeatureLibError("Too many recursive includes", fname_location) try: self.lexers_.append(self.make_lexer_(path)) except FileNotFoundError as err: raise IncludedFeaNotFound(fname_token, fname_location) from err else: return (token_type, token, location) raise StopIteration() @staticmethod def make_lexer_(file_or_path): if hasattr(file_or_path, "read"): fileobj, closing = file_or_path, False else: filename, closing = file_or_path, True fileobj = open(filename, "r", encoding="utf-8") data = fileobj.read() filename = getattr(fileobj, "name", None) if closing: fileobj.close() return Lexer(data, filename) def scan_anonymous_block(self, tag): return self.lexers_[-1].scan_anonymous_block(tag) class NonIncludingLexer(IncludingLexer): """Lexer that does not follow `include` statements, emits them as-is.""" def __next__(self): # Python 3 return next(self.lexers_[0]) PKaZZZ8�3:��fontTools/feaLib/location.pyfrom typing import NamedTuple class FeatureLibLocation(NamedTuple): """A location in a feature file""" file: str line: int column: int def __str__(self): return f"{self.file}:{self.line}:{self.column}" PKaZZZ����00#fontTools/feaLib/lookupDebugInfo.pyfrom typing import NamedTuple LOOKUP_DEBUG_INFO_KEY = "com.github.fonttools.feaLib" LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING" class LookupDebugInfo(NamedTuple): """Information about where a lookup came from, to be embedded in a font""" location: str name: str feature: list PKaZZZd+ݢ���fontTools/feaLib/parser.pyfrom fontTools.feaLib.error import FeatureLibError from fontTools.feaLib.lexer import Lexer, IncludingLexer, NonIncludingLexer from fontTools.feaLib.variableScalar import VariableScalar from fontTools.misc.encodingTools import getEncoding from fontTools.misc.textTools import bytechr, tobytes, tostr import fontTools.feaLib.ast as ast import logging import os import re log = logging.getLogger(__name__) class Parser(object): """Initializes a Parser object. Example: .. code:: python from fontTools.feaLib.parser import Parser parser = Parser(file, font.getReverseGlyphMap()) parsetree = parser.parse() Note: the ``glyphNames`` iterable serves a double role to help distinguish glyph names from ranges in the presence of hyphens and to ensure that glyph names referenced in a feature file are actually part of a font's glyph set. If the iterable is left empty, no glyph name in glyph set checking takes place, and all glyph tokens containing hyphens are treated as literal glyph names, not as ranges. (Adding a space around the hyphen can, in any case, help to disambiguate ranges from glyph names containing hyphens.) By default, the parser will follow ``include()`` statements in the feature file. To turn this off, pass ``followIncludes=False``. Pass a directory string as ``includeDir`` to explicitly declare a directory to search included feature files in. """ extensions = {} ast = ast SS_FEATURE_TAGS = {"ss%02d" % i for i in range(1, 20 + 1)} CV_FEATURE_TAGS = {"cv%02d" % i for i in range(1, 99 + 1)} def __init__( self, featurefile, glyphNames=(), followIncludes=True, includeDir=None, **kwargs ): if "glyphMap" in kwargs: from fontTools.misc.loggingTools import deprecateArgument deprecateArgument("glyphMap", "use 'glyphNames' (iterable) instead") if glyphNames: raise TypeError( "'glyphNames' and (deprecated) 'glyphMap' are " "mutually exclusive" ) glyphNames = kwargs.pop("glyphMap") if kwargs: raise TypeError( "unsupported keyword argument%s: %s" % ("" if len(kwargs) == 1 else "s", ", ".join(repr(k) for k in kwargs)) ) self.glyphNames_ = set(glyphNames) self.doc_ = self.ast.FeatureFile() self.anchors_ = SymbolTable() self.glyphclasses_ = SymbolTable() self.lookups_ = SymbolTable() self.valuerecords_ = SymbolTable() self.symbol_tables_ = {self.anchors_, self.valuerecords_} self.next_token_type_, self.next_token_ = (None, None) self.cur_comments_ = [] self.next_token_location_ = None lexerClass = IncludingLexer if followIncludes else NonIncludingLexer self.lexer_ = lexerClass(featurefile, includeDir=includeDir) self.missing = {} self.advance_lexer_(comments=True) def parse(self): """Parse the file, and return a :class:`fontTools.feaLib.ast.FeatureFile` object representing the root of the abstract syntax tree containing the parsed contents of the file.""" statements = self.doc_.statements while self.next_token_type_ is not None or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: statements.append( self.ast.Comment(self.cur_token_, location=self.cur_token_location_) ) elif self.is_cur_keyword_("include"): statements.append(self.parse_include_()) elif self.cur_token_type_ is Lexer.GLYPHCLASS: statements.append(self.parse_glyphclass_definition_()) elif self.is_cur_keyword_(("anon", "anonymous")): statements.append(self.parse_anonymous_()) elif self.is_cur_keyword_("anchorDef"): statements.append(self.parse_anchordef_()) elif self.is_cur_keyword_("languagesystem"): statements.append(self.parse_languagesystem_()) elif self.is_cur_keyword_("lookup"): statements.append(self.parse_lookup_(vertical=False)) elif self.is_cur_keyword_("markClass"): statements.append(self.parse_markClass_()) elif self.is_cur_keyword_("feature"): statements.append(self.parse_feature_block_()) elif self.is_cur_keyword_("conditionset"): statements.append(self.parse_conditionset_()) elif self.is_cur_keyword_("variation"): statements.append(self.parse_feature_block_(variation=True)) elif self.is_cur_keyword_("table"): statements.append(self.parse_table_()) elif self.is_cur_keyword_("valueRecordDef"): statements.append(self.parse_valuerecord_definition_(vertical=False)) elif ( self.cur_token_type_ is Lexer.NAME and self.cur_token_ in self.extensions ): statements.append(self.extensions[self.cur_token_](self)) elif self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ";": continue else: raise FeatureLibError( "Expected feature, languagesystem, lookup, markClass, " 'table, or glyph class definition, got {} "{}"'.format( self.cur_token_type_, self.cur_token_ ), self.cur_token_location_, ) # Report any missing glyphs at the end of parsing if self.missing: error = [ " %s (first found at %s)" % (name, loc) for name, loc in self.missing.items() ] raise FeatureLibError( "The following glyph names are referenced but are missing from the " "glyph set:\n" + ("\n".join(error)), None, ) return self.doc_ def parse_anchor_(self): # Parses an anchor in any of the four formats given in the feature # file specification (2.e.vii). self.expect_symbol_("<") self.expect_keyword_("anchor") location = self.cur_token_location_ if self.next_token_ == "NULL": # Format D self.expect_keyword_("NULL") self.expect_symbol_(">") return None if self.next_token_type_ == Lexer.NAME: # Format E name = self.expect_name_() anchordef = self.anchors_.resolve(name) if anchordef is None: raise FeatureLibError( 'Unknown anchor "%s"' % name, self.cur_token_location_ ) self.expect_symbol_(">") return self.ast.Anchor( anchordef.x, anchordef.y, name=name, contourpoint=anchordef.contourpoint, xDeviceTable=None, yDeviceTable=None, location=location, ) x, y = self.expect_number_(variable=True), self.expect_number_(variable=True) contourpoint = None if self.next_token_ == "contourpoint": # Format B self.expect_keyword_("contourpoint") contourpoint = self.expect_number_() if self.next_token_ == "<": # Format C xDeviceTable = self.parse_device_() yDeviceTable = self.parse_device_() else: xDeviceTable, yDeviceTable = None, None self.expect_symbol_(">") return self.ast.Anchor( x, y, name=None, contourpoint=contourpoint, xDeviceTable=xDeviceTable, yDeviceTable=yDeviceTable, location=location, ) def parse_anchor_marks_(self): # Parses a sequence of ``[<anchor> mark @MARKCLASS]*.`` anchorMarks = [] # [(self.ast.Anchor, markClassName)*] while self.next_token_ == "<": anchor = self.parse_anchor_() if anchor is None and self.next_token_ != "mark": continue # <anchor NULL> without mark, eg. in GPOS type 5 self.expect_keyword_("mark") markClass = self.expect_markClass_reference_() anchorMarks.append((anchor, markClass)) return anchorMarks def parse_anchordef_(self): # Parses a named anchor definition (`section 2.e.viii <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#2.e.vii>`_). assert self.is_cur_keyword_("anchorDef") location = self.cur_token_location_ x, y = self.expect_number_(), self.expect_number_() contourpoint = None if self.next_token_ == "contourpoint": self.expect_keyword_("contourpoint") contourpoint = self.expect_number_() name = self.expect_name_() self.expect_symbol_(";") anchordef = self.ast.AnchorDefinition( name, x, y, contourpoint=contourpoint, location=location ) self.anchors_.define(name, anchordef) return anchordef def parse_anonymous_(self): # Parses an anonymous data block (`section 10 <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#10>`_). assert self.is_cur_keyword_(("anon", "anonymous")) tag = self.expect_tag_() _, content, location = self.lexer_.scan_anonymous_block(tag) self.advance_lexer_() self.expect_symbol_("}") end_tag = self.expect_tag_() assert tag == end_tag, "bad splitting in Lexer.scan_anonymous_block()" self.expect_symbol_(";") return self.ast.AnonymousBlock(tag, content, location=location) def parse_attach_(self): # Parses a GDEF Attach statement (`section 9.b <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.b>`_) assert self.is_cur_keyword_("Attach") location = self.cur_token_location_ glyphs = self.parse_glyphclass_(accept_glyphname=True) contourPoints = {self.expect_number_()} while self.next_token_ != ";": contourPoints.add(self.expect_number_()) self.expect_symbol_(";") return self.ast.AttachStatement(glyphs, contourPoints, location=location) def parse_enumerate_(self, vertical): # Parse an enumerated pair positioning rule (`section 6.b.ii <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#6.b.ii>`_). assert self.cur_token_ in {"enumerate", "enum"} self.advance_lexer_() return self.parse_position_(enumerated=True, vertical=vertical) def parse_GlyphClassDef_(self): # Parses 'GlyphClassDef @BASE, @LIGATURES, @MARKS, @COMPONENTS;' assert self.is_cur_keyword_("GlyphClassDef") location = self.cur_token_location_ if self.next_token_ != ",": baseGlyphs = self.parse_glyphclass_(accept_glyphname=False) else: baseGlyphs = None self.expect_symbol_(",") if self.next_token_ != ",": ligatureGlyphs = self.parse_glyphclass_(accept_glyphname=False) else: ligatureGlyphs = None self.expect_symbol_(",") if self.next_token_ != ",": markGlyphs = self.parse_glyphclass_(accept_glyphname=False) else: markGlyphs = None self.expect_symbol_(",") if self.next_token_ != ";": componentGlyphs = self.parse_glyphclass_(accept_glyphname=False) else: componentGlyphs = None self.expect_symbol_(";") return self.ast.GlyphClassDefStatement( baseGlyphs, markGlyphs, ligatureGlyphs, componentGlyphs, location=location ) def parse_glyphclass_definition_(self): # Parses glyph class definitions such as '@UPPERCASE = [A-Z];' location, name = self.cur_token_location_, self.cur_token_ self.expect_symbol_("=") glyphs = self.parse_glyphclass_(accept_glyphname=False) self.expect_symbol_(";") glyphclass = self.ast.GlyphClassDefinition(name, glyphs, location=location) self.glyphclasses_.define(name, glyphclass) return glyphclass def split_glyph_range_(self, name, location): # Since v1.20, the OpenType Feature File specification allows # for dashes in glyph names. A sequence like "a-b-c-d" could # therefore mean a single glyph whose name happens to be # "a-b-c-d", or it could mean a range from glyph "a" to glyph # "b-c-d", or a range from glyph "a-b" to glyph "c-d", or a # range from glyph "a-b-c" to glyph "d".Technically, this # example could be resolved because the (pretty complex) # definition of glyph ranges renders most of these splits # invalid. But the specification does not say that a compiler # should try to apply such fancy heuristics. To encourage # unambiguous feature files, we therefore try all possible # splits and reject the feature file if there are multiple # splits possible. It is intentional that we don't just emit a # warning; warnings tend to get ignored. To fix the problem, # font designers can trivially add spaces around the intended # split point, and we emit a compiler error that suggests # how exactly the source should be rewritten to make things # unambiguous. parts = name.split("-") solutions = [] for i in range(len(parts)): start, limit = "-".join(parts[0:i]), "-".join(parts[i:]) if start in self.glyphNames_ and limit in self.glyphNames_: solutions.append((start, limit)) if len(solutions) == 1: start, limit = solutions[0] return start, limit elif len(solutions) == 0: raise FeatureLibError( '"%s" is not a glyph in the font, and it can not be split ' "into a range of known glyphs" % name, location, ) else: ranges = " or ".join(['"%s - %s"' % (s, l) for s, l in solutions]) raise FeatureLibError( 'Ambiguous glyph range "%s"; ' "please use %s to clarify what you mean" % (name, ranges), location, ) def parse_glyphclass_(self, accept_glyphname, accept_null=False): # Parses a glyph class, either named or anonymous, or (if # ``bool(accept_glyphname)``) a glyph name. If ``bool(accept_null)`` then # also accept the special NULL glyph. if accept_glyphname and self.next_token_type_ in (Lexer.NAME, Lexer.CID): if accept_null and self.next_token_ == "NULL": # If you want a glyph called NULL, you should escape it. self.advance_lexer_() return self.ast.NullGlyph(location=self.cur_token_location_) glyph = self.expect_glyph_() self.check_glyph_name_in_glyph_set(glyph) return self.ast.GlyphName(glyph, location=self.cur_token_location_) if self.next_token_type_ is Lexer.GLYPHCLASS: self.advance_lexer_() gc = self.glyphclasses_.resolve(self.cur_token_) if gc is None: raise FeatureLibError( "Unknown glyph class @%s" % self.cur_token_, self.cur_token_location_, ) if isinstance(gc, self.ast.MarkClass): return self.ast.MarkClassName(gc, location=self.cur_token_location_) else: return self.ast.GlyphClassName(gc, location=self.cur_token_location_) self.expect_symbol_("[") location = self.cur_token_location_ glyphs = self.ast.GlyphClass(location=location) while self.next_token_ != "]": if self.next_token_type_ is Lexer.NAME: glyph = self.expect_glyph_() location = self.cur_token_location_ if "-" in glyph and self.glyphNames_ and glyph not in self.glyphNames_: start, limit = self.split_glyph_range_(glyph, location) self.check_glyph_name_in_glyph_set(start, limit) glyphs.add_range( start, limit, self.make_glyph_range_(location, start, limit) ) elif self.next_token_ == "-": start = glyph self.expect_symbol_("-") limit = self.expect_glyph_() self.check_glyph_name_in_glyph_set(start, limit) glyphs.add_range( start, limit, self.make_glyph_range_(location, start, limit) ) else: if "-" in glyph and not self.glyphNames_: log.warning( str( FeatureLibError( f"Ambiguous glyph name that looks like a range: {glyph!r}", location, ) ) ) self.check_glyph_name_in_glyph_set(glyph) glyphs.append(glyph) elif self.next_token_type_ is Lexer.CID: glyph = self.expect_glyph_() if self.next_token_ == "-": range_location = self.cur_token_location_ range_start = self.cur_token_ self.expect_symbol_("-") range_end = self.expect_cid_() self.check_glyph_name_in_glyph_set( f"cid{range_start:05d}", f"cid{range_end:05d}", ) glyphs.add_cid_range( range_start, range_end, self.make_cid_range_(range_location, range_start, range_end), ) else: glyph_name = f"cid{self.cur_token_:05d}" self.check_glyph_name_in_glyph_set(glyph_name) glyphs.append(glyph_name) elif self.next_token_type_ is Lexer.GLYPHCLASS: self.advance_lexer_() gc = self.glyphclasses_.resolve(self.cur_token_) if gc is None: raise FeatureLibError( "Unknown glyph class @%s" % self.cur_token_, self.cur_token_location_, ) if isinstance(gc, self.ast.MarkClass): gc = self.ast.MarkClassName(gc, location=self.cur_token_location_) else: gc = self.ast.GlyphClassName(gc, location=self.cur_token_location_) glyphs.add_class(gc) else: raise FeatureLibError( "Expected glyph name, glyph range, " f"or glyph class reference, found {self.next_token_!r}", self.next_token_location_, ) self.expect_symbol_("]") return glyphs def parse_glyph_pattern_(self, vertical): # Parses a glyph pattern, including lookups and context, e.g.:: # # a b # a b c' d e # a b c' lookup ChangeC d e prefix, glyphs, lookups, values, suffix = ([], [], [], [], []) hasMarks = False while self.next_token_ not in {"by", "from", ";", ","}: gc = self.parse_glyphclass_(accept_glyphname=True) marked = False if self.next_token_ == "'": self.expect_symbol_("'") hasMarks = marked = True if marked: if suffix: # makeotf also reports this as an error, while FontForge # silently inserts ' in all the intervening glyphs. # https://github.com/fonttools/fonttools/pull/1096 raise FeatureLibError( "Unsupported contextual target sequence: at most " "one run of marked (') glyph/class names allowed", self.cur_token_location_, ) glyphs.append(gc) elif glyphs: suffix.append(gc) else: prefix.append(gc) if self.is_next_value_(): values.append(self.parse_valuerecord_(vertical)) else: values.append(None) lookuplist = None while self.next_token_ == "lookup": if lookuplist is None: lookuplist = [] self.expect_keyword_("lookup") if not marked: raise FeatureLibError( "Lookups can only follow marked glyphs", self.cur_token_location_, ) lookup_name = self.expect_name_() lookup = self.lookups_.resolve(lookup_name) if lookup is None: raise FeatureLibError( 'Unknown lookup "%s"' % lookup_name, self.cur_token_location_ ) lookuplist.append(lookup) if marked: lookups.append(lookuplist) if not glyphs and not suffix: # eg., "sub f f i by" assert lookups == [] return ([], prefix, [None] * len(prefix), values, [], hasMarks) else: if any(values[: len(prefix)]): raise FeatureLibError( "Positioning cannot be applied in the bactrack glyph sequence, " "before the marked glyph sequence.", self.cur_token_location_, ) marked_values = values[len(prefix) : len(prefix) + len(glyphs)] if any(marked_values): if any(values[len(prefix) + len(glyphs) :]): raise FeatureLibError( "Positioning values are allowed only in the marked glyph " "sequence, or after the final glyph node when only one glyph " "node is marked.", self.cur_token_location_, ) values = marked_values elif values and values[-1]: if len(glyphs) > 1 or any(values[:-1]): raise FeatureLibError( "Positioning values are allowed only in the marked glyph " "sequence, or after the final glyph node when only one glyph " "node is marked.", self.cur_token_location_, ) values = values[-1:] elif any(values): raise FeatureLibError( "Positioning values are allowed only in the marked glyph " "sequence, or after the final glyph node when only one glyph " "node is marked.", self.cur_token_location_, ) return (prefix, glyphs, lookups, values, suffix, hasMarks) def parse_ignore_glyph_pattern_(self, sub): location = self.cur_token_location_ prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_( vertical=False ) if any(lookups): raise FeatureLibError( f'No lookups can be specified for "ignore {sub}"', location ) if not hasMarks: error = FeatureLibError( f'Ambiguous "ignore {sub}", there should be least one marked glyph', location, ) log.warning(str(error)) suffix, glyphs = glyphs[1:], glyphs[0:1] chainContext = (prefix, glyphs, suffix) return chainContext def parse_ignore_context_(self, sub): location = self.cur_token_location_ chainContext = [self.parse_ignore_glyph_pattern_(sub)] while self.next_token_ == ",": self.expect_symbol_(",") chainContext.append(self.parse_ignore_glyph_pattern_(sub)) self.expect_symbol_(";") return chainContext def parse_ignore_(self): # Parses an ignore sub/pos rule. assert self.is_cur_keyword_("ignore") location = self.cur_token_location_ self.advance_lexer_() if self.cur_token_ in ["substitute", "sub"]: chainContext = self.parse_ignore_context_("sub") return self.ast.IgnoreSubstStatement(chainContext, location=location) if self.cur_token_ in ["position", "pos"]: chainContext = self.parse_ignore_context_("pos") return self.ast.IgnorePosStatement(chainContext, location=location) raise FeatureLibError( 'Expected "substitute" or "position"', self.cur_token_location_ ) def parse_include_(self): assert self.cur_token_ == "include" location = self.cur_token_location_ filename = self.expect_filename_() # self.expect_symbol_(";") return ast.IncludeStatement(filename, location=location) def parse_language_(self): assert self.is_cur_keyword_("language") location = self.cur_token_location_ language = self.expect_language_tag_() include_default, required = (True, False) if self.next_token_ in {"exclude_dflt", "include_dflt"}: include_default = self.expect_name_() == "include_dflt" if self.next_token_ == "required": self.expect_keyword_("required") required = True self.expect_symbol_(";") return self.ast.LanguageStatement( language, include_default, required, location=location ) def parse_ligatureCaretByIndex_(self): assert self.is_cur_keyword_("LigatureCaretByIndex") location = self.cur_token_location_ glyphs = self.parse_glyphclass_(accept_glyphname=True) carets = [self.expect_number_()] while self.next_token_ != ";": carets.append(self.expect_number_()) self.expect_symbol_(";") return self.ast.LigatureCaretByIndexStatement(glyphs, carets, location=location) def parse_ligatureCaretByPos_(self): assert self.is_cur_keyword_("LigatureCaretByPos") location = self.cur_token_location_ glyphs = self.parse_glyphclass_(accept_glyphname=True) carets = [self.expect_number_(variable=True)] while self.next_token_ != ";": carets.append(self.expect_number_(variable=True)) self.expect_symbol_(";") return self.ast.LigatureCaretByPosStatement(glyphs, carets, location=location) def parse_lookup_(self, vertical): # Parses a ``lookup`` - either a lookup block, or a lookup reference # inside a feature. assert self.is_cur_keyword_("lookup") location, name = self.cur_token_location_, self.expect_name_() if self.next_token_ == ";": lookup = self.lookups_.resolve(name) if lookup is None: raise FeatureLibError( 'Unknown lookup "%s"' % name, self.cur_token_location_ ) self.expect_symbol_(";") return self.ast.LookupReferenceStatement(lookup, location=location) use_extension = False if self.next_token_ == "useExtension": self.expect_keyword_("useExtension") use_extension = True block = self.ast.LookupBlock(name, use_extension, location=location) self.parse_block_(block, vertical) self.lookups_.define(name, block) return block def parse_lookupflag_(self): # Parses a ``lookupflag`` statement, either specified by number or # in words. assert self.is_cur_keyword_("lookupflag") location = self.cur_token_location_ # format B: "lookupflag 6;" if self.next_token_type_ == Lexer.NUMBER: value = self.expect_number_() self.expect_symbol_(";") return self.ast.LookupFlagStatement(value, location=location) # format A: "lookupflag RightToLeft MarkAttachmentType @M;" value_seen = False value, markAttachment, markFilteringSet = 0, None, None flags = { "RightToLeft": 1, "IgnoreBaseGlyphs": 2, "IgnoreLigatures": 4, "IgnoreMarks": 8, } seen = set() while self.next_token_ != ";": if self.next_token_ in seen: raise FeatureLibError( "%s can be specified only once" % self.next_token_, self.next_token_location_, ) seen.add(self.next_token_) if self.next_token_ == "MarkAttachmentType": self.expect_keyword_("MarkAttachmentType") markAttachment = self.parse_glyphclass_(accept_glyphname=False) elif self.next_token_ == "UseMarkFilteringSet": self.expect_keyword_("UseMarkFilteringSet") markFilteringSet = self.parse_glyphclass_(accept_glyphname=False) elif self.next_token_ in flags: value_seen = True value = value | flags[self.expect_name_()] else: raise FeatureLibError( '"%s" is not a recognized lookupflag' % self.next_token_, self.next_token_location_, ) self.expect_symbol_(";") if not any([value_seen, markAttachment, markFilteringSet]): raise FeatureLibError( "lookupflag must have a value", self.next_token_location_ ) return self.ast.LookupFlagStatement( value, markAttachment=markAttachment, markFilteringSet=markFilteringSet, location=location, ) def parse_markClass_(self): assert self.is_cur_keyword_("markClass") location = self.cur_token_location_ glyphs = self.parse_glyphclass_(accept_glyphname=True) if not glyphs.glyphSet(): raise FeatureLibError( "Empty glyph class in mark class definition", location ) anchor = self.parse_anchor_() name = self.expect_class_name_() self.expect_symbol_(";") markClass = self.doc_.markClasses.get(name) if markClass is None: markClass = self.ast.MarkClass(name) self.doc_.markClasses[name] = markClass self.glyphclasses_.define(name, markClass) mcdef = self.ast.MarkClassDefinition( markClass, anchor, glyphs, location=location ) markClass.addDefinition(mcdef) return mcdef def parse_position_(self, enumerated, vertical): assert self.cur_token_ in {"position", "pos"} if self.next_token_ == "cursive": # GPOS type 3 return self.parse_position_cursive_(enumerated, vertical) elif self.next_token_ == "base": # GPOS type 4 return self.parse_position_base_(enumerated, vertical) elif self.next_token_ == "ligature": # GPOS type 5 return self.parse_position_ligature_(enumerated, vertical) elif self.next_token_ == "mark": # GPOS type 6 return self.parse_position_mark_(enumerated, vertical) location = self.cur_token_location_ prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_( vertical ) self.expect_symbol_(";") if any(lookups): # GPOS type 8: Chaining contextual positioning; explicit lookups if any(values): raise FeatureLibError( 'If "lookup" is present, no values must be specified', location ) return self.ast.ChainContextPosStatement( prefix, glyphs, suffix, lookups, location=location ) # Pair positioning, format A: "pos V 10 A -10;" # Pair positioning, format B: "pos V A -20;" if not prefix and not suffix and len(glyphs) == 2 and not hasMarks: if values[0] is None: # Format B: "pos V A -20;" values.reverse() return self.ast.PairPosStatement( glyphs[0], values[0], glyphs[1], values[1], enumerated=enumerated, location=location, ) if enumerated: raise FeatureLibError( '"enumerate" is only allowed with pair positionings', location ) return self.ast.SinglePosStatement( list(zip(glyphs, values)), prefix, suffix, forceChain=hasMarks, location=location, ) def parse_position_cursive_(self, enumerated, vertical): location = self.cur_token_location_ self.expect_keyword_("cursive") if enumerated: raise FeatureLibError( '"enumerate" is not allowed with ' "cursive attachment positioning", location, ) glyphclass = self.parse_glyphclass_(accept_glyphname=True) entryAnchor = self.parse_anchor_() exitAnchor = self.parse_anchor_() self.expect_symbol_(";") return self.ast.CursivePosStatement( glyphclass, entryAnchor, exitAnchor, location=location ) def parse_position_base_(self, enumerated, vertical): location = self.cur_token_location_ self.expect_keyword_("base") if enumerated: raise FeatureLibError( '"enumerate" is not allowed with ' "mark-to-base attachment positioning", location, ) base = self.parse_glyphclass_(accept_glyphname=True) marks = self.parse_anchor_marks_() self.expect_symbol_(";") return self.ast.MarkBasePosStatement(base, marks, location=location) def parse_position_ligature_(self, enumerated, vertical): location = self.cur_token_location_ self.expect_keyword_("ligature") if enumerated: raise FeatureLibError( '"enumerate" is not allowed with ' "mark-to-ligature attachment positioning", location, ) ligatures = self.parse_glyphclass_(accept_glyphname=True) marks = [self.parse_anchor_marks_()] while self.next_token_ == "ligComponent": self.expect_keyword_("ligComponent") marks.append(self.parse_anchor_marks_()) self.expect_symbol_(";") return self.ast.MarkLigPosStatement(ligatures, marks, location=location) def parse_position_mark_(self, enumerated, vertical): location = self.cur_token_location_ self.expect_keyword_("mark") if enumerated: raise FeatureLibError( '"enumerate" is not allowed with ' "mark-to-mark attachment positioning", location, ) baseMarks = self.parse_glyphclass_(accept_glyphname=True) marks = self.parse_anchor_marks_() self.expect_symbol_(";") return self.ast.MarkMarkPosStatement(baseMarks, marks, location=location) def parse_script_(self): assert self.is_cur_keyword_("script") location, script = self.cur_token_location_, self.expect_script_tag_() self.expect_symbol_(";") return self.ast.ScriptStatement(script, location=location) def parse_substitute_(self): assert self.cur_token_ in {"substitute", "sub", "reversesub", "rsub"} location = self.cur_token_location_ reverse = self.cur_token_ in {"reversesub", "rsub"} ( old_prefix, old, lookups, values, old_suffix, hasMarks, ) = self.parse_glyph_pattern_(vertical=False) if any(values): raise FeatureLibError( "Substitution statements cannot contain values", location ) new = [] if self.next_token_ == "by": keyword = self.expect_keyword_("by") while self.next_token_ != ";": gc = self.parse_glyphclass_(accept_glyphname=True, accept_null=True) new.append(gc) elif self.next_token_ == "from": keyword = self.expect_keyword_("from") new = [self.parse_glyphclass_(accept_glyphname=False)] else: keyword = None self.expect_symbol_(";") if len(new) == 0 and not any(lookups): raise FeatureLibError( 'Expected "by", "from" or explicit lookup references', self.cur_token_location_, ) # GSUB lookup type 3: Alternate substitution. # Format: "substitute a from [a.1 a.2 a.3];" if keyword == "from": if reverse: raise FeatureLibError( 'Reverse chaining substitutions do not support "from"', location ) if len(old) != 1 or len(old[0].glyphSet()) != 1: raise FeatureLibError('Expected a single glyph before "from"', location) if len(new) != 1: raise FeatureLibError( 'Expected a single glyphclass after "from"', location ) return self.ast.AlternateSubstStatement( old_prefix, old[0], old_suffix, new[0], location=location ) num_lookups = len([l for l in lookups if l is not None]) is_deletion = False if len(new) == 1 and isinstance(new[0], ast.NullGlyph): new = [] # Deletion is_deletion = True # GSUB lookup type 1: Single substitution. # Format A: "substitute a by a.sc;" # Format B: "substitute [one.fitted one.oldstyle] by one;" # Format C: "substitute [a-d] by [A.sc-D.sc];" if not reverse and len(old) == 1 and len(new) == 1 and num_lookups == 0: glyphs = list(old[0].glyphSet()) replacements = list(new[0].glyphSet()) if len(replacements) == 1: replacements = replacements * len(glyphs) if len(glyphs) != len(replacements): raise FeatureLibError( 'Expected a glyph class with %d elements after "by", ' "but found a glyph class with %d elements" % (len(glyphs), len(replacements)), location, ) return self.ast.SingleSubstStatement( old, new, old_prefix, old_suffix, forceChain=hasMarks, location=location ) # Glyph deletion, built as GSUB lookup type 2: Multiple substitution # with empty replacement. if is_deletion and len(old) == 1 and num_lookups == 0: return self.ast.MultipleSubstStatement( old_prefix, old[0], old_suffix, (), forceChain=hasMarks, location=location, ) # GSUB lookup type 2: Multiple substitution. # Format: "substitute f_f_i by f f i;" # # GlyphsApp introduces two additional formats: # Format 1: "substitute [f_i f_l] by [f f] [i l];" # Format 2: "substitute [f_i f_l] by f [i l];" # http://handbook.glyphsapp.com/en/layout/multiple-substitution-with-classes/ if not reverse and len(old) == 1 and len(new) > 1 and num_lookups == 0: count = len(old[0].glyphSet()) for n in new: if not list(n.glyphSet()): raise FeatureLibError("Empty class in replacement", location) if len(n.glyphSet()) != 1 and len(n.glyphSet()) != count: raise FeatureLibError( f'Expected a glyph class with 1 or {count} elements after "by", ' f"but found a glyph class with {len(n.glyphSet())} elements", location, ) return self.ast.MultipleSubstStatement( old_prefix, old[0], old_suffix, new, forceChain=hasMarks, location=location, ) # GSUB lookup type 4: Ligature substitution. # Format: "substitute f f i by f_f_i;" if ( not reverse and len(old) > 1 and len(new) == 1 and len(new[0].glyphSet()) == 1 and num_lookups == 0 ): return self.ast.LigatureSubstStatement( old_prefix, old, old_suffix, list(new[0].glyphSet())[0], forceChain=hasMarks, location=location, ) # GSUB lookup type 8: Reverse chaining substitution. if reverse: if len(old) != 1: raise FeatureLibError( "In reverse chaining single substitutions, " "only a single glyph or glyph class can be replaced", location, ) if len(new) != 1: raise FeatureLibError( "In reverse chaining single substitutions, " 'the replacement (after "by") must be a single glyph ' "or glyph class", location, ) if num_lookups != 0: raise FeatureLibError( "Reverse chaining substitutions cannot call named lookups", location ) glyphs = sorted(list(old[0].glyphSet())) replacements = sorted(list(new[0].glyphSet())) if len(replacements) == 1: replacements = replacements * len(glyphs) if len(glyphs) != len(replacements): raise FeatureLibError( 'Expected a glyph class with %d elements after "by", ' "but found a glyph class with %d elements" % (len(glyphs), len(replacements)), location, ) return self.ast.ReverseChainSingleSubstStatement( old_prefix, old_suffix, old, new, location=location ) if len(old) > 1 and len(new) > 1: raise FeatureLibError( "Direct substitution of multiple glyphs by multiple glyphs " "is not supported", location, ) # If there are remaining glyphs to parse, this is an invalid GSUB statement if len(new) != 0 or is_deletion: raise FeatureLibError("Invalid substitution statement", location) # GSUB lookup type 6: Chaining contextual substitution. rule = self.ast.ChainContextSubstStatement( old_prefix, old, old_suffix, lookups, location=location ) return rule def parse_subtable_(self): assert self.is_cur_keyword_("subtable") location = self.cur_token_location_ self.expect_symbol_(";") return self.ast.SubtableStatement(location=location) def parse_size_parameters_(self): # Parses a ``parameters`` statement used in ``size`` features. See # `section 8.b <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.b>`_. assert self.is_cur_keyword_("parameters") location = self.cur_token_location_ DesignSize = self.expect_decipoint_() SubfamilyID = self.expect_number_() RangeStart = 0.0 RangeEnd = 0.0 if self.next_token_type_ in (Lexer.NUMBER, Lexer.FLOAT) or SubfamilyID != 0: RangeStart = self.expect_decipoint_() RangeEnd = self.expect_decipoint_() self.expect_symbol_(";") return self.ast.SizeParameters( DesignSize, SubfamilyID, RangeStart, RangeEnd, location=location ) def parse_size_menuname_(self): assert self.is_cur_keyword_("sizemenuname") location = self.cur_token_location_ platformID, platEncID, langID, string = self.parse_name_() return self.ast.FeatureNameStatement( "size", platformID, platEncID, langID, string, location=location ) def parse_table_(self): assert self.is_cur_keyword_("table") location, name = self.cur_token_location_, self.expect_tag_() table = self.ast.TableBlock(name, location=location) self.expect_symbol_("{") handler = { "GDEF": self.parse_table_GDEF_, "head": self.parse_table_head_, "hhea": self.parse_table_hhea_, "vhea": self.parse_table_vhea_, "name": self.parse_table_name_, "BASE": self.parse_table_BASE_, "OS/2": self.parse_table_OS_2_, "STAT": self.parse_table_STAT_, }.get(name) if handler: handler(table) else: raise FeatureLibError( '"table %s" is not supported' % name.strip(), location ) self.expect_symbol_("}") end_tag = self.expect_tag_() if end_tag != name: raise FeatureLibError( 'Expected "%s"' % name.strip(), self.cur_token_location_ ) self.expect_symbol_(";") return table def parse_table_GDEF_(self, table): statements = table.statements while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: statements.append( self.ast.Comment(self.cur_token_, location=self.cur_token_location_) ) elif self.is_cur_keyword_("Attach"): statements.append(self.parse_attach_()) elif self.is_cur_keyword_("GlyphClassDef"): statements.append(self.parse_GlyphClassDef_()) elif self.is_cur_keyword_("LigatureCaretByIndex"): statements.append(self.parse_ligatureCaretByIndex_()) elif self.is_cur_keyword_("LigatureCaretByPos"): statements.append(self.parse_ligatureCaretByPos_()) elif self.cur_token_ == ";": continue else: raise FeatureLibError( "Expected Attach, LigatureCaretByIndex, " "or LigatureCaretByPos", self.cur_token_location_, ) def parse_table_head_(self, table): statements = table.statements while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: statements.append( self.ast.Comment(self.cur_token_, location=self.cur_token_location_) ) elif self.is_cur_keyword_("FontRevision"): statements.append(self.parse_FontRevision_()) elif self.cur_token_ == ";": continue else: raise FeatureLibError("Expected FontRevision", self.cur_token_location_) def parse_table_hhea_(self, table): statements = table.statements fields = ("CaretOffset", "Ascender", "Descender", "LineGap") while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: statements.append( self.ast.Comment(self.cur_token_, location=self.cur_token_location_) ) elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields: key = self.cur_token_.lower() value = self.expect_number_() statements.append( self.ast.HheaField(key, value, location=self.cur_token_location_) ) if self.next_token_ != ";": raise FeatureLibError( "Incomplete statement", self.next_token_location_ ) elif self.cur_token_ == ";": continue else: raise FeatureLibError( "Expected CaretOffset, Ascender, " "Descender or LineGap", self.cur_token_location_, ) def parse_table_vhea_(self, table): statements = table.statements fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap") while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: statements.append( self.ast.Comment(self.cur_token_, location=self.cur_token_location_) ) elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields: key = self.cur_token_.lower() value = self.expect_number_() statements.append( self.ast.VheaField(key, value, location=self.cur_token_location_) ) if self.next_token_ != ";": raise FeatureLibError( "Incomplete statement", self.next_token_location_ ) elif self.cur_token_ == ";": continue else: raise FeatureLibError( "Expected VertTypoAscender, " "VertTypoDescender or VertTypoLineGap", self.cur_token_location_, ) def parse_table_name_(self, table): statements = table.statements while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: statements.append( self.ast.Comment(self.cur_token_, location=self.cur_token_location_) ) elif self.is_cur_keyword_("nameid"): statement = self.parse_nameid_() if statement: statements.append(statement) elif self.cur_token_ == ";": continue else: raise FeatureLibError("Expected nameid", self.cur_token_location_) def parse_name_(self): """Parses a name record. See `section 9.e <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.e>`_.""" platEncID = None langID = None if self.next_token_type_ in Lexer.NUMBERS: platformID = self.expect_any_number_() location = self.cur_token_location_ if platformID not in (1, 3): raise FeatureLibError("Expected platform id 1 or 3", location) if self.next_token_type_ in Lexer.NUMBERS: platEncID = self.expect_any_number_() langID = self.expect_any_number_() else: platformID = 3 location = self.cur_token_location_ if platformID == 1: # Macintosh platEncID = platEncID or 0 # Roman langID = langID or 0 # English else: # 3, Windows platEncID = platEncID or 1 # Unicode langID = langID or 0x0409 # English string = self.expect_string_() self.expect_symbol_(";") encoding = getEncoding(platformID, platEncID, langID) if encoding is None: raise FeatureLibError("Unsupported encoding", location) unescaped = self.unescape_string_(string, encoding) return platformID, platEncID, langID, unescaped def parse_stat_name_(self): platEncID = None langID = None if self.next_token_type_ in Lexer.NUMBERS: platformID = self.expect_any_number_() location = self.cur_token_location_ if platformID not in (1, 3): raise FeatureLibError("Expected platform id 1 or 3", location) if self.next_token_type_ in Lexer.NUMBERS: platEncID = self.expect_any_number_() langID = self.expect_any_number_() else: platformID = 3 location = self.cur_token_location_ if platformID == 1: # Macintosh platEncID = platEncID or 0 # Roman langID = langID or 0 # English else: # 3, Windows platEncID = platEncID or 1 # Unicode langID = langID or 0x0409 # English string = self.expect_string_() encoding = getEncoding(platformID, platEncID, langID) if encoding is None: raise FeatureLibError("Unsupported encoding", location) unescaped = self.unescape_string_(string, encoding) return platformID, platEncID, langID, unescaped def parse_nameid_(self): assert self.cur_token_ == "nameid", self.cur_token_ location, nameID = self.cur_token_location_, self.expect_any_number_() if nameID > 32767: raise FeatureLibError( "Name id value cannot be greater than 32767", self.cur_token_location_ ) platformID, platEncID, langID, string = self.parse_name_() return self.ast.NameRecord( nameID, platformID, platEncID, langID, string, location=location ) def unescape_string_(self, string, encoding): if encoding == "utf_16_be": s = re.sub(r"\\[0-9a-fA-F]{4}", self.unescape_unichr_, string) else: unescape = lambda m: self.unescape_byte_(m, encoding) s = re.sub(r"\\[0-9a-fA-F]{2}", unescape, string) # We now have a Unicode string, but it might contain surrogate pairs. # We convert surrogates to actual Unicode by round-tripping through # Python's UTF-16 codec in a special mode. utf16 = tobytes(s, "utf_16_be", "surrogatepass") return tostr(utf16, "utf_16_be") @staticmethod def unescape_unichr_(match): n = match.group(0)[1:] return chr(int(n, 16)) @staticmethod def unescape_byte_(match, encoding): n = match.group(0)[1:] return bytechr(int(n, 16)).decode(encoding) def parse_table_BASE_(self, table): statements = table.statements while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: statements.append( self.ast.Comment(self.cur_token_, location=self.cur_token_location_) ) elif self.is_cur_keyword_("HorizAxis.BaseTagList"): horiz_bases = self.parse_base_tag_list_() elif self.is_cur_keyword_("HorizAxis.BaseScriptList"): horiz_scripts = self.parse_base_script_list_(len(horiz_bases)) statements.append( self.ast.BaseAxis( horiz_bases, horiz_scripts, False, location=self.cur_token_location_, ) ) elif self.is_cur_keyword_("VertAxis.BaseTagList"): vert_bases = self.parse_base_tag_list_() elif self.is_cur_keyword_("VertAxis.BaseScriptList"): vert_scripts = self.parse_base_script_list_(len(vert_bases)) statements.append( self.ast.BaseAxis( vert_bases, vert_scripts, True, location=self.cur_token_location_, ) ) elif self.cur_token_ == ";": continue def parse_table_OS_2_(self, table): statements = table.statements numbers = ( "FSType", "TypoAscender", "TypoDescender", "TypoLineGap", "winAscent", "winDescent", "XHeight", "CapHeight", "WeightClass", "WidthClass", "LowerOpSize", "UpperOpSize", ) ranges = ("UnicodeRange", "CodePageRange") while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: statements.append( self.ast.Comment(self.cur_token_, location=self.cur_token_location_) ) elif self.cur_token_type_ is Lexer.NAME: key = self.cur_token_.lower() value = None if self.cur_token_ in numbers: value = self.expect_number_() elif self.is_cur_keyword_("Panose"): value = [] for i in range(10): value.append(self.expect_number_()) elif self.cur_token_ in ranges: value = [] while self.next_token_ != ";": value.append(self.expect_number_()) elif self.is_cur_keyword_("Vendor"): value = self.expect_string_() statements.append( self.ast.OS2Field(key, value, location=self.cur_token_location_) ) elif self.cur_token_ == ";": continue def parse_STAT_ElidedFallbackName(self): assert self.is_cur_keyword_("ElidedFallbackName") self.expect_symbol_("{") names = [] while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_() if self.is_cur_keyword_("name"): platformID, platEncID, langID, string = self.parse_stat_name_() nameRecord = self.ast.STATNameStatement( "stat", platformID, platEncID, langID, string, location=self.cur_token_location_, ) names.append(nameRecord) else: if self.cur_token_ != ";": raise FeatureLibError( f"Unexpected token {self.cur_token_} " f"in ElidedFallbackName", self.cur_token_location_, ) self.expect_symbol_("}") if not names: raise FeatureLibError('Expected "name"', self.cur_token_location_) return names def parse_STAT_design_axis(self): assert self.is_cur_keyword_("DesignAxis") names = [] axisTag = self.expect_tag_() if ( axisTag not in ("ital", "opsz", "slnt", "wdth", "wght") and not axisTag.isupper() ): log.warning(f"Unregistered axis tag {axisTag} should be uppercase.") axisOrder = self.expect_number_() self.expect_symbol_("{") while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_() if self.cur_token_type_ is Lexer.COMMENT: continue elif self.is_cur_keyword_("name"): location = self.cur_token_location_ platformID, platEncID, langID, string = self.parse_stat_name_() name = self.ast.STATNameStatement( "stat", platformID, platEncID, langID, string, location=location ) names.append(name) elif self.cur_token_ == ";": continue else: raise FeatureLibError( f'Expected "name", got {self.cur_token_}', self.cur_token_location_ ) self.expect_symbol_("}") return self.ast.STATDesignAxisStatement( axisTag, axisOrder, names, self.cur_token_location_ ) def parse_STAT_axis_value_(self): assert self.is_cur_keyword_("AxisValue") self.expect_symbol_("{") locations = [] names = [] flags = 0 while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: continue elif self.is_cur_keyword_("name"): location = self.cur_token_location_ platformID, platEncID, langID, string = self.parse_stat_name_() name = self.ast.STATNameStatement( "stat", platformID, platEncID, langID, string, location=location ) names.append(name) elif self.is_cur_keyword_("location"): location = self.parse_STAT_location() locations.append(location) elif self.is_cur_keyword_("flag"): flags = self.expect_stat_flags() elif self.cur_token_ == ";": continue else: raise FeatureLibError( f"Unexpected token {self.cur_token_} " f"in AxisValue", self.cur_token_location_, ) self.expect_symbol_("}") if not names: raise FeatureLibError('Expected "Axis Name"', self.cur_token_location_) if not locations: raise FeatureLibError('Expected "Axis location"', self.cur_token_location_) if len(locations) > 1: for location in locations: if len(location.values) > 1: raise FeatureLibError( "Only one value is allowed in a " "Format 4 Axis Value Record, but " f"{len(location.values)} were found.", self.cur_token_location_, ) format4_tags = [] for location in locations: tag = location.tag if tag in format4_tags: raise FeatureLibError( f"Axis tag {tag} already " "defined.", self.cur_token_location_ ) format4_tags.append(tag) return self.ast.STATAxisValueStatement( names, locations, flags, self.cur_token_location_ ) def parse_STAT_location(self): values = [] tag = self.expect_tag_() if len(tag.strip()) != 4: raise FeatureLibError( f"Axis tag {self.cur_token_} must be 4 " "characters", self.cur_token_location_, ) while self.next_token_ != ";": if self.next_token_type_ is Lexer.FLOAT: value = self.expect_float_() values.append(value) elif self.next_token_type_ is Lexer.NUMBER: value = self.expect_number_() values.append(value) else: raise FeatureLibError( f'Unexpected value "{self.next_token_}". ' "Expected integer or float.", self.next_token_location_, ) if len(values) == 3: nominal, min_val, max_val = values if nominal < min_val or nominal > max_val: raise FeatureLibError( f"Default value {nominal} is outside " f"of specified range " f"{min_val}-{max_val}.", self.next_token_location_, ) return self.ast.AxisValueLocationStatement(tag, values) def parse_table_STAT_(self, table): statements = table.statements design_axes = [] while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: statements.append( self.ast.Comment(self.cur_token_, location=self.cur_token_location_) ) elif self.cur_token_type_ is Lexer.NAME: if self.is_cur_keyword_("ElidedFallbackName"): names = self.parse_STAT_ElidedFallbackName() statements.append(self.ast.ElidedFallbackName(names)) elif self.is_cur_keyword_("ElidedFallbackNameID"): value = self.expect_number_() statements.append(self.ast.ElidedFallbackNameID(value)) self.expect_symbol_(";") elif self.is_cur_keyword_("DesignAxis"): designAxis = self.parse_STAT_design_axis() design_axes.append(designAxis.tag) statements.append(designAxis) self.expect_symbol_(";") elif self.is_cur_keyword_("AxisValue"): axisValueRecord = self.parse_STAT_axis_value_() for location in axisValueRecord.locations: if location.tag not in design_axes: # Tag must be defined in a DesignAxis before it # can be referenced raise FeatureLibError( "DesignAxis not defined for " f"{location.tag}.", self.cur_token_location_, ) statements.append(axisValueRecord) self.expect_symbol_(";") else: raise FeatureLibError( f"Unexpected token {self.cur_token_}", self.cur_token_location_ ) elif self.cur_token_ == ";": continue def parse_base_tag_list_(self): # Parses BASE table entries. (See `section 9.a <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.a>`_) assert self.cur_token_ in ( "HorizAxis.BaseTagList", "VertAxis.BaseTagList", ), self.cur_token_ bases = [] while self.next_token_ != ";": bases.append(self.expect_script_tag_()) self.expect_symbol_(";") return bases def parse_base_script_list_(self, count): assert self.cur_token_ in ( "HorizAxis.BaseScriptList", "VertAxis.BaseScriptList", ), self.cur_token_ scripts = [(self.parse_base_script_record_(count))] while self.next_token_ == ",": self.expect_symbol_(",") scripts.append(self.parse_base_script_record_(count)) self.expect_symbol_(";") return scripts def parse_base_script_record_(self, count): script_tag = self.expect_script_tag_() base_tag = self.expect_script_tag_() coords = [self.expect_number_() for i in range(count)] return script_tag, base_tag, coords def parse_device_(self): result = None self.expect_symbol_("<") self.expect_keyword_("device") if self.next_token_ == "NULL": self.expect_keyword_("NULL") else: result = [(self.expect_number_(), self.expect_number_())] while self.next_token_ == ",": self.expect_symbol_(",") result.append((self.expect_number_(), self.expect_number_())) result = tuple(result) # make it hashable self.expect_symbol_(">") return result def is_next_value_(self): return ( self.next_token_type_ is Lexer.NUMBER or self.next_token_ == "<" or self.next_token_ == "(" ) def parse_valuerecord_(self, vertical): if ( self.next_token_type_ is Lexer.SYMBOL and self.next_token_ == "(" ) or self.next_token_type_ is Lexer.NUMBER: number, location = ( self.expect_number_(variable=True), self.cur_token_location_, ) if vertical: val = self.ast.ValueRecord( yAdvance=number, vertical=vertical, location=location ) else: val = self.ast.ValueRecord( xAdvance=number, vertical=vertical, location=location ) return val self.expect_symbol_("<") location = self.cur_token_location_ if self.next_token_type_ is Lexer.NAME: name = self.expect_name_() if name == "NULL": self.expect_symbol_(">") return self.ast.ValueRecord() vrd = self.valuerecords_.resolve(name) if vrd is None: raise FeatureLibError( 'Unknown valueRecordDef "%s"' % name, self.cur_token_location_ ) value = vrd.value xPlacement, yPlacement = (value.xPlacement, value.yPlacement) xAdvance, yAdvance = (value.xAdvance, value.yAdvance) else: xPlacement, yPlacement, xAdvance, yAdvance = ( self.expect_number_(variable=True), self.expect_number_(variable=True), self.expect_number_(variable=True), self.expect_number_(variable=True), ) if self.next_token_ == "<": xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = ( self.parse_device_(), self.parse_device_(), self.parse_device_(), self.parse_device_(), ) allDeltas = sorted( [ delta for size, delta in (xPlaDevice if xPlaDevice else ()) + (yPlaDevice if yPlaDevice else ()) + (xAdvDevice if xAdvDevice else ()) + (yAdvDevice if yAdvDevice else ()) ] ) if allDeltas[0] < -128 or allDeltas[-1] > 127: raise FeatureLibError( "Device value out of valid range (-128..127)", self.cur_token_location_, ) else: xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (None, None, None, None) self.expect_symbol_(">") return self.ast.ValueRecord( xPlacement, yPlacement, xAdvance, yAdvance, xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice, vertical=vertical, location=location, ) def parse_valuerecord_definition_(self, vertical): # Parses a named value record definition. (See section `2.e.v <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#2.e.v>`_) assert self.is_cur_keyword_("valueRecordDef") location = self.cur_token_location_ value = self.parse_valuerecord_(vertical) name = self.expect_name_() self.expect_symbol_(";") vrd = self.ast.ValueRecordDefinition(name, value, location=location) self.valuerecords_.define(name, vrd) return vrd def parse_languagesystem_(self): assert self.cur_token_ == "languagesystem" location = self.cur_token_location_ script = self.expect_script_tag_() language = self.expect_language_tag_() self.expect_symbol_(";") return self.ast.LanguageSystemStatement(script, language, location=location) def parse_feature_block_(self, variation=False): if variation: assert self.cur_token_ == "variation" else: assert self.cur_token_ == "feature" location = self.cur_token_location_ tag = self.expect_tag_() vertical = tag in {"vkrn", "vpal", "vhal", "valt"} stylisticset = None cv_feature = None size_feature = False if tag in self.SS_FEATURE_TAGS: stylisticset = tag elif tag in self.CV_FEATURE_TAGS: cv_feature = tag elif tag == "size": size_feature = True if variation: conditionset = self.expect_name_() use_extension = False if self.next_token_ == "useExtension": self.expect_keyword_("useExtension") use_extension = True if variation: block = self.ast.VariationBlock( tag, conditionset, use_extension=use_extension, location=location ) else: block = self.ast.FeatureBlock( tag, use_extension=use_extension, location=location ) self.parse_block_(block, vertical, stylisticset, size_feature, cv_feature) return block def parse_feature_reference_(self): assert self.cur_token_ == "feature", self.cur_token_ location = self.cur_token_location_ featureName = self.expect_tag_() self.expect_symbol_(";") return self.ast.FeatureReferenceStatement(featureName, location=location) def parse_featureNames_(self, tag): """Parses a ``featureNames`` statement found in stylistic set features. See section `8.c <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.c>`_. """ assert self.cur_token_ == "featureNames", self.cur_token_ block = self.ast.NestedBlock( tag, self.cur_token_, location=self.cur_token_location_ ) self.expect_symbol_("{") for symtab in self.symbol_tables_: symtab.enter_scope() while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: block.statements.append( self.ast.Comment(self.cur_token_, location=self.cur_token_location_) ) elif self.is_cur_keyword_("name"): location = self.cur_token_location_ platformID, platEncID, langID, string = self.parse_name_() block.statements.append( self.ast.FeatureNameStatement( tag, platformID, platEncID, langID, string, location=location ) ) elif self.cur_token_ == ";": continue else: raise FeatureLibError('Expected "name"', self.cur_token_location_) self.expect_symbol_("}") for symtab in self.symbol_tables_: symtab.exit_scope() self.expect_symbol_(";") return block def parse_cvParameters_(self, tag): # Parses a ``cvParameters`` block found in Character Variant features. # See section `8.d <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.d>`_. assert self.cur_token_ == "cvParameters", self.cur_token_ block = self.ast.NestedBlock( tag, self.cur_token_, location=self.cur_token_location_ ) self.expect_symbol_("{") for symtab in self.symbol_tables_: symtab.enter_scope() statements = block.statements while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: statements.append( self.ast.Comment(self.cur_token_, location=self.cur_token_location_) ) elif self.is_cur_keyword_( { "FeatUILabelNameID", "FeatUITooltipTextNameID", "SampleTextNameID", "ParamUILabelNameID", } ): statements.append(self.parse_cvNameIDs_(tag, self.cur_token_)) elif self.is_cur_keyword_("Character"): statements.append(self.parse_cvCharacter_(tag)) elif self.cur_token_ == ";": continue else: raise FeatureLibError( "Expected statement: got {} {}".format( self.cur_token_type_, self.cur_token_ ), self.cur_token_location_, ) self.expect_symbol_("}") for symtab in self.symbol_tables_: symtab.exit_scope() self.expect_symbol_(";") return block def parse_cvNameIDs_(self, tag, block_name): assert self.cur_token_ == block_name, self.cur_token_ block = self.ast.NestedBlock(tag, block_name, location=self.cur_token_location_) self.expect_symbol_("{") for symtab in self.symbol_tables_: symtab.enter_scope() while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: block.statements.append( self.ast.Comment(self.cur_token_, location=self.cur_token_location_) ) elif self.is_cur_keyword_("name"): location = self.cur_token_location_ platformID, platEncID, langID, string = self.parse_name_() block.statements.append( self.ast.CVParametersNameStatement( tag, platformID, platEncID, langID, string, block_name, location=location, ) ) elif self.cur_token_ == ";": continue else: raise FeatureLibError('Expected "name"', self.cur_token_location_) self.expect_symbol_("}") for symtab in self.symbol_tables_: symtab.exit_scope() self.expect_symbol_(";") return block def parse_cvCharacter_(self, tag): assert self.cur_token_ == "Character", self.cur_token_ location, character = self.cur_token_location_, self.expect_any_number_() self.expect_symbol_(";") if not (0xFFFFFF >= character >= 0): raise FeatureLibError( "Character value must be between " "{:#x} and {:#x}".format(0, 0xFFFFFF), location, ) return self.ast.CharacterStatement(character, tag, location=location) def parse_FontRevision_(self): # Parses a ``FontRevision`` statement found in the head table. See # `section 9.c <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.c>`_. assert self.cur_token_ == "FontRevision", self.cur_token_ location, version = self.cur_token_location_, self.expect_float_() self.expect_symbol_(";") if version <= 0: raise FeatureLibError("Font revision numbers must be positive", location) return self.ast.FontRevisionStatement(version, location=location) def parse_conditionset_(self): name = self.expect_name_() conditions = {} self.expect_symbol_("{") while self.next_token_ != "}": self.advance_lexer_() if self.cur_token_type_ is not Lexer.NAME: raise FeatureLibError("Expected an axis name", self.cur_token_location_) axis = self.cur_token_ if axis in conditions: raise FeatureLibError( f"Repeated condition for axis {axis}", self.cur_token_location_ ) if self.next_token_type_ is Lexer.FLOAT: min_value = self.expect_float_() elif self.next_token_type_ is Lexer.NUMBER: min_value = self.expect_number_(variable=False) if self.next_token_type_ is Lexer.FLOAT: max_value = self.expect_float_() elif self.next_token_type_ is Lexer.NUMBER: max_value = self.expect_number_(variable=False) self.expect_symbol_(";") conditions[axis] = (min_value, max_value) self.expect_symbol_("}") finalname = self.expect_name_() if finalname != name: raise FeatureLibError('Expected "%s"' % name, self.cur_token_location_) return self.ast.ConditionsetStatement(name, conditions) def parse_block_( self, block, vertical, stylisticset=None, size_feature=False, cv_feature=None ): self.expect_symbol_("{") for symtab in self.symbol_tables_: symtab.enter_scope() statements = block.statements while self.next_token_ != "}" or self.cur_comments_: self.advance_lexer_(comments=True) if self.cur_token_type_ is Lexer.COMMENT: statements.append( self.ast.Comment(self.cur_token_, location=self.cur_token_location_) ) elif self.cur_token_type_ is Lexer.GLYPHCLASS: statements.append(self.parse_glyphclass_definition_()) elif self.is_cur_keyword_("anchorDef"): statements.append(self.parse_anchordef_()) elif self.is_cur_keyword_({"enum", "enumerate"}): statements.append(self.parse_enumerate_(vertical=vertical)) elif self.is_cur_keyword_("feature"): statements.append(self.parse_feature_reference_()) elif self.is_cur_keyword_("ignore"): statements.append(self.parse_ignore_()) elif self.is_cur_keyword_("language"): statements.append(self.parse_language_()) elif self.is_cur_keyword_("lookup"): statements.append(self.parse_lookup_(vertical)) elif self.is_cur_keyword_("lookupflag"): statements.append(self.parse_lookupflag_()) elif self.is_cur_keyword_("markClass"): statements.append(self.parse_markClass_()) elif self.is_cur_keyword_({"pos", "position"}): statements.append( self.parse_position_(enumerated=False, vertical=vertical) ) elif self.is_cur_keyword_("script"): statements.append(self.parse_script_()) elif self.is_cur_keyword_({"sub", "substitute", "rsub", "reversesub"}): statements.append(self.parse_substitute_()) elif self.is_cur_keyword_("subtable"): statements.append(self.parse_subtable_()) elif self.is_cur_keyword_("valueRecordDef"): statements.append(self.parse_valuerecord_definition_(vertical)) elif stylisticset and self.is_cur_keyword_("featureNames"): statements.append(self.parse_featureNames_(stylisticset)) elif cv_feature and self.is_cur_keyword_("cvParameters"): statements.append(self.parse_cvParameters_(cv_feature)) elif size_feature and self.is_cur_keyword_("parameters"): statements.append(self.parse_size_parameters_()) elif size_feature and self.is_cur_keyword_("sizemenuname"): statements.append(self.parse_size_menuname_()) elif ( self.cur_token_type_ is Lexer.NAME and self.cur_token_ in self.extensions ): statements.append(self.extensions[self.cur_token_](self)) elif self.cur_token_ == ";": continue else: raise FeatureLibError( "Expected glyph class definition or statement: got {} {}".format( self.cur_token_type_, self.cur_token_ ), self.cur_token_location_, ) self.expect_symbol_("}") for symtab in self.symbol_tables_: symtab.exit_scope() name = self.expect_name_() if name != block.name.strip(): raise FeatureLibError( 'Expected "%s"' % block.name.strip(), self.cur_token_location_ ) self.expect_symbol_(";") # A multiple substitution may have a single destination, in which case # it will look just like a single substitution. So if there are both # multiple and single substitutions, upgrade all the single ones to # multiple substitutions. # Check if we have a mix of non-contextual singles and multiples. has_single = False has_multiple = False for s in statements: if isinstance(s, self.ast.SingleSubstStatement): has_single = not any([s.prefix, s.suffix, s.forceChain]) elif isinstance(s, self.ast.MultipleSubstStatement): has_multiple = not any([s.prefix, s.suffix, s.forceChain]) # Upgrade all single substitutions to multiple substitutions. if has_single and has_multiple: statements = [] for s in block.statements: if isinstance(s, self.ast.SingleSubstStatement): glyphs = s.glyphs[0].glyphSet() replacements = s.replacements[0].glyphSet() if len(replacements) == 1: replacements *= len(glyphs) for i, glyph in enumerate(glyphs): statements.append( self.ast.MultipleSubstStatement( s.prefix, glyph, s.suffix, [replacements[i]], s.forceChain, location=s.location, ) ) else: statements.append(s) block.statements = statements def is_cur_keyword_(self, k): if self.cur_token_type_ is Lexer.NAME: if isinstance(k, type("")): # basestring is gone in Python3 return self.cur_token_ == k else: return self.cur_token_ in k return False def expect_class_name_(self): self.advance_lexer_() if self.cur_token_type_ is not Lexer.GLYPHCLASS: raise FeatureLibError("Expected @NAME", self.cur_token_location_) return self.cur_token_ def expect_cid_(self): self.advance_lexer_() if self.cur_token_type_ is Lexer.CID: return self.cur_token_ raise FeatureLibError("Expected a CID", self.cur_token_location_) def expect_filename_(self): self.advance_lexer_() if self.cur_token_type_ is not Lexer.FILENAME: raise FeatureLibError("Expected file name", self.cur_token_location_) return self.cur_token_ def expect_glyph_(self): self.advance_lexer_() if self.cur_token_type_ is Lexer.NAME: return self.cur_token_.lstrip("\\") elif self.cur_token_type_ is Lexer.CID: return "cid%05d" % self.cur_token_ raise FeatureLibError("Expected a glyph name or CID", self.cur_token_location_) def check_glyph_name_in_glyph_set(self, *names): """Adds a glyph name (just `start`) or glyph names of a range (`start` and `end`) which are not in the glyph set to the "missing list" for future error reporting. If no glyph set is present, does nothing. """ if self.glyphNames_: for name in names: if name in self.glyphNames_: continue if name not in self.missing: self.missing[name] = self.cur_token_location_ def expect_markClass_reference_(self): name = self.expect_class_name_() mc = self.glyphclasses_.resolve(name) if mc is None: raise FeatureLibError( "Unknown markClass @%s" % name, self.cur_token_location_ ) if not isinstance(mc, self.ast.MarkClass): raise FeatureLibError( "@%s is not a markClass" % name, self.cur_token_location_ ) return mc def expect_tag_(self): self.advance_lexer_() if self.cur_token_type_ is not Lexer.NAME: raise FeatureLibError("Expected a tag", self.cur_token_location_) if len(self.cur_token_) > 4: raise FeatureLibError( "Tags cannot be longer than 4 characters", self.cur_token_location_ ) return (self.cur_token_ + " ")[:4] def expect_script_tag_(self): tag = self.expect_tag_() if tag == "dflt": raise FeatureLibError( '"dflt" is not a valid script tag; use "DFLT" instead', self.cur_token_location_, ) return tag def expect_language_tag_(self): tag = self.expect_tag_() if tag == "DFLT": raise FeatureLibError( '"DFLT" is not a valid language tag; use "dflt" instead', self.cur_token_location_, ) return tag def expect_symbol_(self, symbol): self.advance_lexer_() if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol: return symbol raise FeatureLibError("Expected '%s'" % symbol, self.cur_token_location_) def expect_keyword_(self, keyword): self.advance_lexer_() if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword: return self.cur_token_ raise FeatureLibError('Expected "%s"' % keyword, self.cur_token_location_) def expect_name_(self): self.advance_lexer_() if self.cur_token_type_ is Lexer.NAME: return self.cur_token_ raise FeatureLibError("Expected a name", self.cur_token_location_) def expect_number_(self, variable=False): self.advance_lexer_() if self.cur_token_type_ is Lexer.NUMBER: return self.cur_token_ if variable and self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "(": return self.expect_variable_scalar_() raise FeatureLibError("Expected a number", self.cur_token_location_) def expect_variable_scalar_(self): self.advance_lexer_() # "(" scalar = VariableScalar() while True: if self.cur_token_type_ == Lexer.SYMBOL and self.cur_token_ == ")": break location, value = self.expect_master_() scalar.add_value(location, value) return scalar def expect_master_(self): location = {} while True: if self.cur_token_type_ is not Lexer.NAME: raise FeatureLibError("Expected an axis name", self.cur_token_location_) axis = self.cur_token_ self.advance_lexer_() if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "="): raise FeatureLibError( "Expected an equals sign", self.cur_token_location_ ) value = self.expect_number_() location[axis] = value if self.next_token_type_ is Lexer.NAME and self.next_token_[0] == ":": # Lexer has just read the value as a glyph name. We'll correct it later break self.advance_lexer_() if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ","): raise FeatureLibError( "Expected an comma or an equals sign", self.cur_token_location_ ) self.advance_lexer_() self.advance_lexer_() value = int(self.cur_token_[1:]) self.advance_lexer_() return location, value def expect_any_number_(self): self.advance_lexer_() if self.cur_token_type_ in Lexer.NUMBERS: return self.cur_token_ raise FeatureLibError( "Expected a decimal, hexadecimal or octal number", self.cur_token_location_ ) def expect_float_(self): self.advance_lexer_() if self.cur_token_type_ is Lexer.FLOAT: return self.cur_token_ raise FeatureLibError( "Expected a floating-point number", self.cur_token_location_ ) def expect_decipoint_(self): if self.next_token_type_ == Lexer.FLOAT: return self.expect_float_() elif self.next_token_type_ is Lexer.NUMBER: return self.expect_number_() / 10 else: raise FeatureLibError( "Expected an integer or floating-point number", self.cur_token_location_ ) def expect_stat_flags(self): value = 0 flags = { "OlderSiblingFontAttribute": 1, "ElidableAxisValueName": 2, } while self.next_token_ != ";": if self.next_token_ in flags: name = self.expect_name_() value = value | flags[name] else: raise FeatureLibError( f"Unexpected STAT flag {self.cur_token_}", self.cur_token_location_ ) return value def expect_stat_values_(self): if self.next_token_type_ == Lexer.FLOAT: return self.expect_float_() elif self.next_token_type_ is Lexer.NUMBER: return self.expect_number_() else: raise FeatureLibError( "Expected an integer or floating-point number", self.cur_token_location_ ) def expect_string_(self): self.advance_lexer_() if self.cur_token_type_ is Lexer.STRING: return self.cur_token_ raise FeatureLibError("Expected a string", self.cur_token_location_) def advance_lexer_(self, comments=False): if comments and self.cur_comments_: self.cur_token_type_ = Lexer.COMMENT self.cur_token_, self.cur_token_location_ = self.cur_comments_.pop(0) return else: self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( self.next_token_type_, self.next_token_, self.next_token_location_, ) while True: try: ( self.next_token_type_, self.next_token_, self.next_token_location_, ) = next(self.lexer_) except StopIteration: self.next_token_type_, self.next_token_ = (None, None) if self.next_token_type_ != Lexer.COMMENT: break self.cur_comments_.append((self.next_token_, self.next_token_location_)) @staticmethod def reverse_string_(s): """'abc' --> 'cba'""" return "".join(reversed(list(s))) def make_cid_range_(self, location, start, limit): """(location, 999, 1001) --> ["cid00999", "cid01000", "cid01001"]""" result = list() if start > limit: raise FeatureLibError( "Bad range: start should be less than limit", location ) for cid in range(start, limit + 1): result.append("cid%05d" % cid) return result def make_glyph_range_(self, location, start, limit): """(location, "a.sc", "d.sc") --> ["a.sc", "b.sc", "c.sc", "d.sc"]""" result = list() if len(start) != len(limit): raise FeatureLibError( 'Bad range: "%s" and "%s" should have the same length' % (start, limit), location, ) rev = self.reverse_string_ prefix = os.path.commonprefix([start, limit]) suffix = rev(os.path.commonprefix([rev(start), rev(limit)])) if len(suffix) > 0: start_range = start[len(prefix) : -len(suffix)] limit_range = limit[len(prefix) : -len(suffix)] else: start_range = start[len(prefix) :] limit_range = limit[len(prefix) :] if start_range >= limit_range: raise FeatureLibError( "Start of range must be smaller than its end", location ) uppercase = re.compile(r"^[A-Z]$") if uppercase.match(start_range) and uppercase.match(limit_range): for c in range(ord(start_range), ord(limit_range) + 1): result.append("%s%c%s" % (prefix, c, suffix)) return result lowercase = re.compile(r"^[a-z]$") if lowercase.match(start_range) and lowercase.match(limit_range): for c in range(ord(start_range), ord(limit_range) + 1): result.append("%s%c%s" % (prefix, c, suffix)) return result digits = re.compile(r"^[0-9]{1,3}$") if digits.match(start_range) and digits.match(limit_range): for i in range(int(start_range, 10), int(limit_range, 10) + 1): number = ("000" + str(i))[-len(start_range) :] result.append("%s%s%s" % (prefix, number, suffix)) return result raise FeatureLibError('Bad range: "%s-%s"' % (start, limit), location) class SymbolTable(object): def __init__(self): self.scopes_ = [{}] def enter_scope(self): self.scopes_.append({}) def exit_scope(self): self.scopes_.pop() def define(self, name, item): self.scopes_[-1][name] = item def resolve(self, name): for scope in reversed(self.scopes_): item = scope.get(name) if item: return item return None PKaZZZ&� ���"fontTools/feaLib/variableScalar.pyfrom fontTools.varLib.models import VariationModel, normalizeValue, piecewiseLinearMap def Location(loc): return tuple(sorted(loc.items())) class VariableScalar: """A scalar with different values at different points in the designspace.""" def __init__(self, location_value={}): self.values = {} self.axes = {} for location, value in location_value.items(): self.add_value(location, value) def __repr__(self): items = [] for location, value in self.values.items(): loc = ",".join(["%s=%i" % (ax, loc) for ax, loc in location]) items.append("%s:%i" % (loc, value)) return "(" + (" ".join(items)) + ")" @property def does_vary(self): values = list(self.values.values()) return any(v != values[0] for v in values[1:]) @property def axes_dict(self): if not self.axes: raise ValueError( ".axes must be defined on variable scalar before interpolating" ) return {ax.axisTag: ax for ax in self.axes} def _normalized_location(self, location): location = self.fix_location(location) normalized_location = {} for axtag in location.keys(): if axtag not in self.axes_dict: raise ValueError("Unknown axis %s in %s" % (axtag, location)) axis = self.axes_dict[axtag] normalized_location[axtag] = normalizeValue( location[axtag], (axis.minValue, axis.defaultValue, axis.maxValue) ) return Location(normalized_location) def fix_location(self, location): location = dict(location) for tag, axis in self.axes_dict.items(): if tag not in location: location[tag] = axis.defaultValue return location def add_value(self, location, value): if self.axes: location = self.fix_location(location) self.values[Location(location)] = value def fix_all_locations(self): self.values = { Location(self.fix_location(l)): v for l, v in self.values.items() } @property def default(self): self.fix_all_locations() key = Location({ax.axisTag: ax.defaultValue for ax in self.axes}) if key not in self.values: raise ValueError("Default value could not be found") # I *guess* we could interpolate one, but I don't know how. return self.values[key] def value_at_location(self, location, model_cache=None, avar=None): loc = location if loc in self.values.keys(): return self.values[loc] values = list(self.values.values()) return self.model(model_cache, avar).interpolateFromMasters(loc, values) def model(self, model_cache=None, avar=None): if model_cache is not None: key = tuple(self.values.keys()) if key in model_cache: return model_cache[key] locations = [dict(self._normalized_location(k)) for k in self.values.keys()] if avar is not None: mapping = avar.segments locations = [ { k: piecewiseLinearMap(v, mapping[k]) if k in mapping else v for k, v in location.items() } for location in locations ] m = VariationModel(locations) if model_cache is not None: model_cache[key] = m return m def get_deltas_and_supports(self, model_cache=None, avar=None): values = list(self.values.values()) return self.model(model_cache, avar).getDeltasAndSupports(values) def add_to_variation_store(self, store_builder, model_cache=None, avar=None): deltas, supports = self.get_deltas_and_supports(model_cache, avar) store_builder.setSupports(supports) index = store_builder.storeDeltas(deltas) return int(self.default), index PKaZZZx�.[9 9 fontTools/merge/__init__.py# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): Behdad Esfahbod, Roozbeh Pournader from fontTools import ttLib import fontTools.merge.base from fontTools.merge.cmap import ( computeMegaGlyphOrder, computeMegaCmap, renameCFFCharStrings, ) from fontTools.merge.layout import layoutPreMerge, layoutPostMerge from fontTools.merge.options import Options import fontTools.merge.tables from fontTools.misc.loggingTools import Timer from functools import reduce import sys import logging log = logging.getLogger("fontTools.merge") timer = Timer(logger=logging.getLogger(__name__ + ".timer"), level=logging.INFO) class Merger(object): """Font merger. This class merges multiple files into a single OpenType font, taking into account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across all the fonts). If multiple glyphs map to the same Unicode value, and the glyphs are considered sufficiently different (that is, they differ in any of paths, widths, or height), then subsequent glyphs are renamed and a lookup in the ``locl`` feature will be created to disambiguate them. For example, if the arguments are an Arabic font and a Latin font and both contain a set of parentheses, the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``, and a lookup will be inserted into the to ``locl`` feature (creating it if necessary) under the ``latn`` script to substitute ``parenleft`` with ``parenleft#1`` etc. Restrictions: - All fonts must have the same units per em. - If duplicate glyph disambiguation takes place as described above then the fonts must have a ``GSUB`` table. Attributes: options: Currently unused. """ def __init__(self, options=None): if not options: options = Options() self.options = options def _openFonts(self, fontfiles): fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles] for font, fontfile in zip(fonts, fontfiles): font._merger__fontfile = fontfile font._merger__name = font["name"].getDebugName(4) return fonts def merge(self, fontfiles): """Merges fonts together. Args: fontfiles: A list of file names to be merged Returns: A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on this to write it out to an OTF file. """ # # Settle on a mega glyph order. # fonts = self._openFonts(fontfiles) glyphOrders = [list(font.getGlyphOrder()) for font in fonts] computeMegaGlyphOrder(self, glyphOrders) # Take first input file sfntVersion sfntVersion = fonts[0].sfntVersion # Reload fonts and set new glyph names on them. fonts = self._openFonts(fontfiles) for font, glyphOrder in zip(fonts, glyphOrders): font.setGlyphOrder(glyphOrder) if "CFF " in font: renameCFFCharStrings(self, glyphOrder, font["CFF "]) cmaps = [font["cmap"] for font in fonts] self.duplicateGlyphsPerFont = [{} for _ in fonts] computeMegaCmap(self, cmaps) mega = ttLib.TTFont(sfntVersion=sfntVersion) mega.setGlyphOrder(self.glyphOrder) for font in fonts: self._preMerge(font) self.fonts = fonts allTags = reduce(set.union, (list(font.keys()) for font in fonts), set()) allTags.remove("GlyphOrder") for tag in sorted(allTags): if tag in self.options.drop_tables: continue with timer("merge '%s'" % tag): tables = [font.get(tag, NotImplemented) for font in fonts] log.info("Merging '%s'.", tag) clazz = ttLib.getTableClass(tag) table = clazz(tag).merge(self, tables) # XXX Clean this up and use: table = mergeObjects(tables) if table is not NotImplemented and table is not False: mega[tag] = table log.info("Merged '%s'.", tag) else: log.info("Dropped '%s'.", tag) del self.duplicateGlyphsPerFont del self.fonts self._postMerge(mega) return mega def mergeObjects(self, returnTable, logic, tables): # Right now we don't use self at all. Will use in the future # for options and logging. allKeys = set.union( set(), *(vars(table).keys() for table in tables if table is not NotImplemented), ) for key in allKeys: log.info(" %s", key) try: mergeLogic = logic[key] except KeyError: try: mergeLogic = logic["*"] except KeyError: raise Exception( "Don't know how to merge key %s of class %s" % (key, returnTable.__class__.__name__) ) if mergeLogic is NotImplemented: continue value = mergeLogic(getattr(table, key, NotImplemented) for table in tables) if value is not NotImplemented: setattr(returnTable, key, value) return returnTable def _preMerge(self, font): layoutPreMerge(font) def _postMerge(self, font): layoutPostMerge(font) if "OS/2" in font: # https://github.com/fonttools/fonttools/issues/2538 # TODO: Add an option to disable this? font["OS/2"].recalcAvgCharWidth(font) __all__ = ["Options", "Merger", "main"] @timer("make one with everything (TOTAL TIME)") def main(args=None): """Merge multiple fonts into one""" from fontTools import configLogger if args is None: args = sys.argv[1:] options = Options() args = options.parse_opts(args) fontfiles = [] if options.input_file: with open(options.input_file) as inputfile: fontfiles = [ line.strip() for line in inputfile.readlines() if not line.lstrip().startswith("#") ] for g in args: fontfiles.append(g) if len(fontfiles) < 1: print( "usage: pyftmerge [font1 ... fontN] [--input-file=filelist.txt] [--output-file=merged.ttf] [--import-file=tables.ttx]", file=sys.stderr, ) print( " [--drop-tables=tags] [--verbose] [--timing]", file=sys.stderr, ) print("", file=sys.stderr) print(" font1 ... fontN Files to merge.", file=sys.stderr) print( " --input-file=<filename> Read files to merge from a text file, each path new line. # Comment lines allowed.", file=sys.stderr, ) print( " --output-file=<filename> Specify output file name (default: merged.ttf).", file=sys.stderr, ) print( " --import-file=<filename> TTX file to import after merging. This can be used to set metadata.", file=sys.stderr, ) print( " --drop-tables=<table tags> Comma separated list of table tags to skip, case sensitive.", file=sys.stderr, ) print( " --verbose Output progress information.", file=sys.stderr, ) print(" --timing Output progress timing.", file=sys.stderr) return 1 configLogger(level=logging.INFO if options.verbose else logging.WARNING) if options.timing: timer.logger.setLevel(logging.DEBUG) else: timer.logger.disabled = True merger = Merger(options=options) font = merger.merge(fontfiles) if options.import_file: font.importXML(options.import_file) with timer("compile and save font"): font.save(options.output_file) if __name__ == "__main__": sys.exit(main()) PKaZZZJ_<�^^fontTools/merge/__main__.pyimport sys from fontTools.merge import main if __name__ == "__main__": sys.exit(main()) PKaZZZ��OU U fontTools/merge/base.py# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): Behdad Esfahbod, Roozbeh Pournader from fontTools.ttLib.tables.DefaultTable import DefaultTable import logging log = logging.getLogger("fontTools.merge") def add_method(*clazzes, **kwargs): """Returns a decorator function that adds a new method to one or more classes.""" allowDefault = kwargs.get("allowDefaultTable", False) def wrapper(method): done = [] for clazz in clazzes: if clazz in done: continue # Support multiple names of a clazz done.append(clazz) assert allowDefault or clazz != DefaultTable, "Oops, table class not found." assert ( method.__name__ not in clazz.__dict__ ), "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__) setattr(clazz, method.__name__, method) return None return wrapper def mergeObjects(lst): lst = [item for item in lst if item is not NotImplemented] if not lst: return NotImplemented lst = [item for item in lst if item is not None] if not lst: return None clazz = lst[0].__class__ assert all(type(item) == clazz for item in lst), lst logic = clazz.mergeMap returnTable = clazz() returnDict = {} allKeys = set.union(set(), *(vars(table).keys() for table in lst)) for key in allKeys: try: mergeLogic = logic[key] except KeyError: try: mergeLogic = logic["*"] except KeyError: raise Exception( "Don't know how to merge key %s of class %s" % (key, clazz.__name__) ) if mergeLogic is NotImplemented: continue value = mergeLogic(getattr(table, key, NotImplemented) for table in lst) if value is not NotImplemented: returnDict[key] = value returnTable.__dict__ = returnDict return returnTable @add_method(DefaultTable, allowDefaultTable=True) def merge(self, m, tables): if not hasattr(self, "mergeMap"): log.info("Don't know how to merge '%s'.", self.tableTag) return NotImplemented logic = self.mergeMap if isinstance(logic, dict): return m.mergeObjects(self, self.mergeMap, tables) else: return logic(tables) PKaZZZ=z� ��fontTools/merge/cmap.py# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): Behdad Esfahbod, Roozbeh Pournader from fontTools.merge.unicode import is_Default_Ignorable from fontTools.pens.recordingPen import DecomposingRecordingPen import logging log = logging.getLogger("fontTools.merge") def computeMegaGlyphOrder(merger, glyphOrders): """Modifies passed-in glyphOrders to reflect new glyph names. Stores merger.glyphOrder.""" megaOrder = {} for glyphOrder in glyphOrders: for i, glyphName in enumerate(glyphOrder): if glyphName in megaOrder: n = megaOrder[glyphName] while (glyphName + "." + repr(n)) in megaOrder: n += 1 megaOrder[glyphName] = n glyphName += "." + repr(n) glyphOrder[i] = glyphName megaOrder[glyphName] = 1 merger.glyphOrder = megaOrder = list(megaOrder.keys()) def _glyphsAreSame( glyphSet1, glyphSet2, glyph1, glyph2, advanceTolerance=0.05, advanceToleranceEmpty=0.20, ): pen1 = DecomposingRecordingPen(glyphSet1) pen2 = DecomposingRecordingPen(glyphSet2) g1 = glyphSet1[glyph1] g2 = glyphSet2[glyph2] g1.draw(pen1) g2.draw(pen2) if pen1.value != pen2.value: return False # Allow more width tolerance for glyphs with no ink tolerance = advanceTolerance if pen1.value else advanceToleranceEmpty # TODO Warn if advances not the same but within tolerance. if abs(g1.width - g2.width) > g1.width * tolerance: return False if hasattr(g1, "height") and g1.height is not None: if abs(g1.height - g2.height) > g1.height * tolerance: return False return True # Valid (format, platformID, platEncID) triplets for cmap subtables containing # Unicode BMP-only and Unicode Full Repertoire semantics. # Cf. OpenType spec for "Platform specific encodings": # https://docs.microsoft.com/en-us/typography/opentype/spec/name class _CmapUnicodePlatEncodings: BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)} FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)} def computeMegaCmap(merger, cmapTables): """Sets merger.cmap and merger.glyphOrder.""" # TODO Handle format=14. # Only merge format 4 and 12 Unicode subtables, ignores all other subtables # If there is a format 12 table for a font, ignore the format 4 table of it chosenCmapTables = [] for fontIdx, table in enumerate(cmapTables): format4 = None format12 = None for subtable in table.tables: properties = (subtable.format, subtable.platformID, subtable.platEncID) if properties in _CmapUnicodePlatEncodings.BMP: format4 = subtable elif properties in _CmapUnicodePlatEncodings.FullRepertoire: format12 = subtable else: log.warning( "Dropped cmap subtable from font '%s':\t" "format %2s, platformID %2s, platEncID %2s", fontIdx, subtable.format, subtable.platformID, subtable.platEncID, ) if format12 is not None: chosenCmapTables.append((format12, fontIdx)) elif format4 is not None: chosenCmapTables.append((format4, fontIdx)) # Build the unicode mapping merger.cmap = cmap = {} fontIndexForGlyph = {} glyphSets = [None for f in merger.fonts] if hasattr(merger, "fonts") else None for table, fontIdx in chosenCmapTables: # handle duplicates for uni, gid in table.cmap.items(): oldgid = cmap.get(uni, None) if oldgid is None: cmap[uni] = gid fontIndexForGlyph[gid] = fontIdx elif is_Default_Ignorable(uni) or uni in (0x25CC,): # U+25CC DOTTED CIRCLE continue elif oldgid != gid: # Char previously mapped to oldgid, now to gid. # Record, to fix up in GSUB 'locl' later. if merger.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None: if glyphSets is not None: oldFontIdx = fontIndexForGlyph[oldgid] for idx in (fontIdx, oldFontIdx): if glyphSets[idx] is None: glyphSets[idx] = merger.fonts[idx].getGlyphSet() # if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid): # continue merger.duplicateGlyphsPerFont[fontIdx][oldgid] = gid elif merger.duplicateGlyphsPerFont[fontIdx][oldgid] != gid: # Char previously mapped to oldgid but oldgid is already remapped to a different # gid, because of another Unicode character. # TODO: Try harder to do something about these. log.warning( "Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid ) def renameCFFCharStrings(merger, glyphOrder, cffTable): """Rename topDictIndex charStrings based on glyphOrder.""" td = cffTable.cff.topDictIndex[0] charStrings = {} for i, v in enumerate(td.CharStrings.charStrings.values()): glyphName = glyphOrder[i] charStrings[glyphName] = v td.CharStrings.charStrings = charStrings td.charset = list(glyphOrder) PKaZZZ_cf��>�>fontTools/merge/layout.py# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): Behdad Esfahbod, Roozbeh Pournader from fontTools import ttLib from fontTools.ttLib.tables.DefaultTable import DefaultTable from fontTools.ttLib.tables import otTables from fontTools.merge.base import add_method, mergeObjects from fontTools.merge.util import * import logging log = logging.getLogger("fontTools.merge") def mergeLookupLists(lst): # TODO Do smarter merge. return sumLists(lst) def mergeFeatures(lst): assert lst self = otTables.Feature() self.FeatureParams = None self.LookupListIndex = mergeLookupLists( [l.LookupListIndex for l in lst if l.LookupListIndex] ) self.LookupCount = len(self.LookupListIndex) return self def mergeFeatureLists(lst): d = {} for l in lst: for f in l: tag = f.FeatureTag if tag not in d: d[tag] = [] d[tag].append(f.Feature) ret = [] for tag in sorted(d.keys()): rec = otTables.FeatureRecord() rec.FeatureTag = tag rec.Feature = mergeFeatures(d[tag]) ret.append(rec) return ret def mergeLangSyses(lst): assert lst # TODO Support merging ReqFeatureIndex assert all(l.ReqFeatureIndex == 0xFFFF for l in lst) self = otTables.LangSys() self.LookupOrder = None self.ReqFeatureIndex = 0xFFFF self.FeatureIndex = mergeFeatureLists( [l.FeatureIndex for l in lst if l.FeatureIndex] ) self.FeatureCount = len(self.FeatureIndex) return self def mergeScripts(lst): assert lst if len(lst) == 1: return lst[0] langSyses = {} for sr in lst: for lsr in sr.LangSysRecord: if lsr.LangSysTag not in langSyses: langSyses[lsr.LangSysTag] = [] langSyses[lsr.LangSysTag].append(lsr.LangSys) lsrecords = [] for tag, langSys_list in sorted(langSyses.items()): lsr = otTables.LangSysRecord() lsr.LangSys = mergeLangSyses(langSys_list) lsr.LangSysTag = tag lsrecords.append(lsr) self = otTables.Script() self.LangSysRecord = lsrecords self.LangSysCount = len(lsrecords) dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys] if dfltLangSyses: self.DefaultLangSys = mergeLangSyses(dfltLangSyses) else: self.DefaultLangSys = None return self def mergeScriptRecords(lst): d = {} for l in lst: for s in l: tag = s.ScriptTag if tag not in d: d[tag] = [] d[tag].append(s.Script) ret = [] for tag in sorted(d.keys()): rec = otTables.ScriptRecord() rec.ScriptTag = tag rec.Script = mergeScripts(d[tag]) ret.append(rec) return ret otTables.ScriptList.mergeMap = { "ScriptCount": lambda lst: None, # TODO "ScriptRecord": mergeScriptRecords, } otTables.BaseScriptList.mergeMap = { "BaseScriptCount": lambda lst: None, # TODO # TODO: Merge duplicate entries "BaseScriptRecord": lambda lst: sorted( sumLists(lst), key=lambda s: s.BaseScriptTag ), } otTables.FeatureList.mergeMap = { "FeatureCount": sum, "FeatureRecord": lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag), } otTables.LookupList.mergeMap = { "LookupCount": sum, "Lookup": sumLists, } otTables.Coverage.mergeMap = { "Format": min, "glyphs": sumLists, } otTables.ClassDef.mergeMap = { "Format": min, "classDefs": sumDicts, } otTables.LigCaretList.mergeMap = { "Coverage": mergeObjects, "LigGlyphCount": sum, "LigGlyph": sumLists, } otTables.AttachList.mergeMap = { "Coverage": mergeObjects, "GlyphCount": sum, "AttachPoint": sumLists, } # XXX Renumber MarkFilterSets of lookups otTables.MarkGlyphSetsDef.mergeMap = { "MarkSetTableFormat": equal, "MarkSetCount": sum, "Coverage": sumLists, } otTables.Axis.mergeMap = { "*": mergeObjects, } # XXX Fix BASE table merging otTables.BaseTagList.mergeMap = { "BaseTagCount": sum, "BaselineTag": sumLists, } otTables.GDEF.mergeMap = otTables.GSUB.mergeMap = otTables.GPOS.mergeMap = ( otTables.BASE.mergeMap ) = otTables.JSTF.mergeMap = otTables.MATH.mergeMap = { "*": mergeObjects, "Version": max, } ttLib.getTableClass("GDEF").mergeMap = ttLib.getTableClass("GSUB").mergeMap = ( ttLib.getTableClass("GPOS").mergeMap ) = ttLib.getTableClass("BASE").mergeMap = ttLib.getTableClass( "JSTF" ).mergeMap = ttLib.getTableClass( "MATH" ).mergeMap = { "tableTag": onlyExisting(equal), # XXX clean me up "table": mergeObjects, } @add_method(ttLib.getTableClass("GSUB")) def merge(self, m, tables): assert len(tables) == len(m.duplicateGlyphsPerFont) for i, (table, dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)): if not dups: continue if table is None or table is NotImplemented: log.warning( "Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s", m.fonts[i]._merger__name, dups, ) continue synthFeature = None synthLookup = None for script in table.table.ScriptList.ScriptRecord: if script.ScriptTag == "DFLT": continue # XXX for langsys in [script.Script.DefaultLangSys] + [ l.LangSys for l in script.Script.LangSysRecord ]: if langsys is None: continue # XXX Create! feature = [v for v in langsys.FeatureIndex if v.FeatureTag == "locl"] assert len(feature) <= 1 if feature: feature = feature[0] else: if not synthFeature: synthFeature = otTables.FeatureRecord() synthFeature.FeatureTag = "locl" f = synthFeature.Feature = otTables.Feature() f.FeatureParams = None f.LookupCount = 0 f.LookupListIndex = [] table.table.FeatureList.FeatureRecord.append(synthFeature) table.table.FeatureList.FeatureCount += 1 feature = synthFeature langsys.FeatureIndex.append(feature) langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag) if not synthLookup: subtable = otTables.SingleSubst() subtable.mapping = dups synthLookup = otTables.Lookup() synthLookup.LookupFlag = 0 synthLookup.LookupType = 1 synthLookup.SubTableCount = 1 synthLookup.SubTable = [subtable] if table.table.LookupList is None: # mtiLib uses None as default value for LookupList, # while feaLib points to an empty array with count 0 # TODO: make them do the same table.table.LookupList = otTables.LookupList() table.table.LookupList.Lookup = [] table.table.LookupList.LookupCount = 0 table.table.LookupList.Lookup.append(synthLookup) table.table.LookupList.LookupCount += 1 if feature.Feature.LookupListIndex[:1] != [synthLookup]: feature.Feature.LookupListIndex[:0] = [synthLookup] feature.Feature.LookupCount += 1 DefaultTable.merge(self, m, tables) return self @add_method( otTables.SingleSubst, otTables.MultipleSubst, otTables.AlternateSubst, otTables.LigatureSubst, otTables.ReverseChainSingleSubst, otTables.SinglePos, otTables.PairPos, otTables.CursivePos, otTables.MarkBasePos, otTables.MarkLigPos, otTables.MarkMarkPos, ) def mapLookups(self, lookupMap): pass # Copied and trimmed down from subset.py @add_method( otTables.ContextSubst, otTables.ChainContextSubst, otTables.ContextPos, otTables.ChainContextPos, ) def __merge_classify_context(self): class ContextHelper(object): def __init__(self, klass, Format): if klass.__name__.endswith("Subst"): Typ = "Sub" Type = "Subst" else: Typ = "Pos" Type = "Pos" if klass.__name__.startswith("Chain"): Chain = "Chain" else: Chain = "" ChainTyp = Chain + Typ self.Typ = Typ self.Type = Type self.Chain = Chain self.ChainTyp = ChainTyp self.LookupRecord = Type + "LookupRecord" if Format == 1: self.Rule = ChainTyp + "Rule" self.RuleSet = ChainTyp + "RuleSet" elif Format == 2: self.Rule = ChainTyp + "ClassRule" self.RuleSet = ChainTyp + "ClassSet" if self.Format not in [1, 2, 3]: return None # Don't shoot the messenger; let it go if not hasattr(self.__class__, "_merge__ContextHelpers"): self.__class__._merge__ContextHelpers = {} if self.Format not in self.__class__._merge__ContextHelpers: helper = ContextHelper(self.__class__, self.Format) self.__class__._merge__ContextHelpers[self.Format] = helper return self.__class__._merge__ContextHelpers[self.Format] @add_method( otTables.ContextSubst, otTables.ChainContextSubst, otTables.ContextPos, otTables.ChainContextPos, ) def mapLookups(self, lookupMap): c = self.__merge_classify_context() if self.Format in [1, 2]: for rs in getattr(self, c.RuleSet): if not rs: continue for r in getattr(rs, c.Rule): if not r: continue for ll in getattr(r, c.LookupRecord): if not ll: continue ll.LookupListIndex = lookupMap[ll.LookupListIndex] elif self.Format == 3: for ll in getattr(self, c.LookupRecord): if not ll: continue ll.LookupListIndex = lookupMap[ll.LookupListIndex] else: assert 0, "unknown format: %s" % self.Format @add_method(otTables.ExtensionSubst, otTables.ExtensionPos) def mapLookups(self, lookupMap): if self.Format == 1: self.ExtSubTable.mapLookups(lookupMap) else: assert 0, "unknown format: %s" % self.Format @add_method(otTables.Lookup) def mapLookups(self, lookupMap): for st in self.SubTable: if not st: continue st.mapLookups(lookupMap) @add_method(otTables.LookupList) def mapLookups(self, lookupMap): for l in self.Lookup: if not l: continue l.mapLookups(lookupMap) @add_method(otTables.Lookup) def mapMarkFilteringSets(self, markFilteringSetMap): if self.LookupFlag & 0x0010: self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet] @add_method(otTables.LookupList) def mapMarkFilteringSets(self, markFilteringSetMap): for l in self.Lookup: if not l: continue l.mapMarkFilteringSets(markFilteringSetMap) @add_method(otTables.Feature) def mapLookups(self, lookupMap): self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex] @add_method(otTables.FeatureList) def mapLookups(self, lookupMap): for f in self.FeatureRecord: if not f or not f.Feature: continue f.Feature.mapLookups(lookupMap) @add_method(otTables.DefaultLangSys, otTables.LangSys) def mapFeatures(self, featureMap): self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex] if self.ReqFeatureIndex != 65535: self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex] @add_method(otTables.Script) def mapFeatures(self, featureMap): if self.DefaultLangSys: self.DefaultLangSys.mapFeatures(featureMap) for l in self.LangSysRecord: if not l or not l.LangSys: continue l.LangSys.mapFeatures(featureMap) @add_method(otTables.ScriptList) def mapFeatures(self, featureMap): for s in self.ScriptRecord: if not s or not s.Script: continue s.Script.mapFeatures(featureMap) def layoutPreMerge(font): # Map indices to references GDEF = font.get("GDEF") GSUB = font.get("GSUB") GPOS = font.get("GPOS") for t in [GSUB, GPOS]: if not t: continue if t.table.LookupList: lookupMap = {i: v for i, v in enumerate(t.table.LookupList.Lookup)} t.table.LookupList.mapLookups(lookupMap) t.table.FeatureList.mapLookups(lookupMap) if ( GDEF and GDEF.table.Version >= 0x00010002 and GDEF.table.MarkGlyphSetsDef ): markFilteringSetMap = { i: v for i, v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage) } t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap) if t.table.FeatureList and t.table.ScriptList: featureMap = {i: v for i, v in enumerate(t.table.FeatureList.FeatureRecord)} t.table.ScriptList.mapFeatures(featureMap) # TODO FeatureParams nameIDs def layoutPostMerge(font): # Map references back to indices GDEF = font.get("GDEF") GSUB = font.get("GSUB") GPOS = font.get("GPOS") for t in [GSUB, GPOS]: if not t: continue if t.table.FeatureList and t.table.ScriptList: # Collect unregistered (new) features. featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord) t.table.ScriptList.mapFeatures(featureMap) # Record used features. featureMap = AttendanceRecordingIdentityDict( t.table.FeatureList.FeatureRecord ) t.table.ScriptList.mapFeatures(featureMap) usedIndices = featureMap.s # Remove unused features t.table.FeatureList.FeatureRecord = [ f for i, f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices ] # Map back to indices. featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord) t.table.ScriptList.mapFeatures(featureMap) t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord) if t.table.LookupList: # Collect unregistered (new) lookups. lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup) t.table.FeatureList.mapLookups(lookupMap) t.table.LookupList.mapLookups(lookupMap) # Record used lookups. lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup) t.table.FeatureList.mapLookups(lookupMap) t.table.LookupList.mapLookups(lookupMap) usedIndices = lookupMap.s # Remove unused lookups t.table.LookupList.Lookup = [ l for i, l in enumerate(t.table.LookupList.Lookup) if i in usedIndices ] # Map back to indices. lookupMap = NonhashableDict(t.table.LookupList.Lookup) t.table.FeatureList.mapLookups(lookupMap) t.table.LookupList.mapLookups(lookupMap) t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup) if GDEF and GDEF.table.Version >= 0x00010002: markFilteringSetMap = NonhashableDict( GDEF.table.MarkGlyphSetsDef.Coverage ) t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap) # TODO FeatureParams nameIDs PKaZZZ�g�� � fontTools/merge/options.py# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): Behdad Esfahbod, Roozbeh Pournader class Options(object): class UnknownOptionError(Exception): pass def __init__(self, **kwargs): self.verbose = False self.timing = False self.drop_tables = [] self.input_file = None self.output_file = "merged.ttf" self.import_file = None self.set(**kwargs) def set(self, **kwargs): for k, v in kwargs.items(): if not hasattr(self, k): raise self.UnknownOptionError("Unknown option '%s'" % k) setattr(self, k, v) def parse_opts(self, argv, ignore_unknown=[]): ret = [] opts = {} for a in argv: orig_a = a if not a.startswith("--"): ret.append(a) continue a = a[2:] i = a.find("=") op = "=" if i == -1: if a.startswith("no-"): k = a[3:] v = False else: k = a v = True else: k = a[:i] if k[-1] in "-+": op = k[-1] + "=" # Ops is '-=' or '+=' now. k = k[:-1] v = a[i + 1 :] ok = k k = k.replace("-", "_") if not hasattr(self, k): if ignore_unknown is True or ok in ignore_unknown: ret.append(orig_a) continue else: raise self.UnknownOptionError("Unknown option '%s'" % a) ov = getattr(self, k) if isinstance(ov, bool): v = bool(v) elif isinstance(ov, int): v = int(v) elif isinstance(ov, list): vv = v.split(",") if vv == [""]: vv = [] vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] if op == "=": v = vv elif op == "+=": v = ov v.extend(vv) elif op == "-=": v = ov for x in vv: if x in v: v.remove(x) else: assert 0 opts[k] = v self.set(**opts) return ret PKaZZZ�b�s1)1)fontTools/merge/tables.py# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): Behdad Esfahbod, Roozbeh Pournader from fontTools import ttLib, cffLib from fontTools.misc.psCharStrings import T2WidthExtractor from fontTools.ttLib.tables.DefaultTable import DefaultTable from fontTools.merge.base import add_method, mergeObjects from fontTools.merge.cmap import computeMegaCmap from fontTools.merge.util import * import logging log = logging.getLogger("fontTools.merge") ttLib.getTableClass("maxp").mergeMap = { "*": max, "tableTag": equal, "tableVersion": equal, "numGlyphs": sum, "maxStorage": first, "maxFunctionDefs": first, "maxInstructionDefs": first, # TODO When we correctly merge hinting data, update these values: # maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions } headFlagsMergeBitMap = { "size": 16, "*": bitwise_or, 1: bitwise_and, # Baseline at y = 0 2: bitwise_and, # lsb at x = 0 3: bitwise_and, # Force ppem to integer values. FIXME? 5: bitwise_and, # Font is vertical 6: lambda bit: 0, # Always set to zero 11: bitwise_and, # Font data is 'lossless' 13: bitwise_and, # Optimized for ClearType 14: bitwise_and, # Last resort font. FIXME? equal or first may be better 15: lambda bit: 0, # Always set to zero } ttLib.getTableClass("head").mergeMap = { "tableTag": equal, "tableVersion": max, "fontRevision": max, "checkSumAdjustment": lambda lst: 0, # We need *something* here "magicNumber": equal, "flags": mergeBits(headFlagsMergeBitMap), "unitsPerEm": equal, "created": current_time, "modified": current_time, "xMin": min, "yMin": min, "xMax": max, "yMax": max, "macStyle": first, "lowestRecPPEM": max, "fontDirectionHint": lambda lst: 2, "indexToLocFormat": first, "glyphDataFormat": equal, } ttLib.getTableClass("hhea").mergeMap = { "*": equal, "tableTag": equal, "tableVersion": max, "ascent": max, "descent": min, "lineGap": max, "advanceWidthMax": max, "minLeftSideBearing": min, "minRightSideBearing": min, "xMaxExtent": max, "caretSlopeRise": first, "caretSlopeRun": first, "caretOffset": first, "numberOfHMetrics": recalculate, } ttLib.getTableClass("vhea").mergeMap = { "*": equal, "tableTag": equal, "tableVersion": max, "ascent": max, "descent": min, "lineGap": max, "advanceHeightMax": max, "minTopSideBearing": min, "minBottomSideBearing": min, "yMaxExtent": max, "caretSlopeRise": first, "caretSlopeRun": first, "caretOffset": first, "numberOfVMetrics": recalculate, } os2FsTypeMergeBitMap = { "size": 16, "*": lambda bit: 0, 1: bitwise_or, # no embedding permitted 2: bitwise_and, # allow previewing and printing documents 3: bitwise_and, # allow editing documents 8: bitwise_or, # no subsetting permitted 9: bitwise_or, # no embedding of outlines permitted } def mergeOs2FsType(lst): lst = list(lst) if all(item == 0 for item in lst): return 0 # Compute least restrictive logic for each fsType value for i in range(len(lst)): # unset bit 1 (no embedding permitted) if either bit 2 or 3 is set if lst[i] & 0x000C: lst[i] &= ~0x0002 # set bit 2 (allow previewing) if bit 3 is set (allow editing) elif lst[i] & 0x0008: lst[i] |= 0x0004 # set bits 2 and 3 if everything is allowed elif lst[i] == 0: lst[i] = 0x000C fsType = mergeBits(os2FsTypeMergeBitMap)(lst) # unset bits 2 and 3 if bit 1 is set (some font is "no embedding") if fsType & 0x0002: fsType &= ~0x000C return fsType ttLib.getTableClass("OS/2").mergeMap = { "*": first, "tableTag": equal, "version": max, "xAvgCharWidth": first, # Will be recalculated at the end on the merged font "fsType": mergeOs2FsType, # Will be overwritten "panose": first, # FIXME: should really be the first Latin font "ulUnicodeRange1": bitwise_or, "ulUnicodeRange2": bitwise_or, "ulUnicodeRange3": bitwise_or, "ulUnicodeRange4": bitwise_or, "fsFirstCharIndex": min, "fsLastCharIndex": max, "sTypoAscender": max, "sTypoDescender": min, "sTypoLineGap": max, "usWinAscent": max, "usWinDescent": max, # Version 1 "ulCodePageRange1": onlyExisting(bitwise_or), "ulCodePageRange2": onlyExisting(bitwise_or), # Version 2, 3, 4 "sxHeight": onlyExisting(max), "sCapHeight": onlyExisting(max), "usDefaultChar": onlyExisting(first), "usBreakChar": onlyExisting(first), "usMaxContext": onlyExisting(max), # version 5 "usLowerOpticalPointSize": onlyExisting(min), "usUpperOpticalPointSize": onlyExisting(max), } @add_method(ttLib.getTableClass("OS/2")) def merge(self, m, tables): DefaultTable.merge(self, m, tables) if self.version < 2: # bits 8 and 9 are reserved and should be set to zero self.fsType &= ~0x0300 if self.version >= 3: # Only one of bits 1, 2, and 3 may be set. We already take # care of bit 1 implications in mergeOs2FsType. So unset # bit 2 if bit 3 is already set. if self.fsType & 0x0008: self.fsType &= ~0x0004 return self ttLib.getTableClass("post").mergeMap = { "*": first, "tableTag": equal, "formatType": max, "isFixedPitch": min, "minMemType42": max, "maxMemType42": lambda lst: 0, "minMemType1": max, "maxMemType1": lambda lst: 0, "mapping": onlyExisting(sumDicts), "extraNames": lambda lst: [], } ttLib.getTableClass("vmtx").mergeMap = ttLib.getTableClass("hmtx").mergeMap = { "tableTag": equal, "metrics": sumDicts, } ttLib.getTableClass("name").mergeMap = { "tableTag": equal, "names": first, # FIXME? Does mixing name records make sense? } ttLib.getTableClass("loca").mergeMap = { "*": recalculate, "tableTag": equal, } ttLib.getTableClass("glyf").mergeMap = { "tableTag": equal, "glyphs": sumDicts, "glyphOrder": sumLists, "_reverseGlyphOrder": recalculate, "axisTags": equal, } @add_method(ttLib.getTableClass("glyf")) def merge(self, m, tables): for i, table in enumerate(tables): for g in table.glyphs.values(): if i: # Drop hints for all but first font, since # we don't map functions / CVT values. g.removeHinting() # Expand composite glyphs to load their # composite glyph names. if g.isComposite() or g.isVarComposite(): g.expand(table) return DefaultTable.merge(self, m, tables) ttLib.getTableClass("prep").mergeMap = lambda self, lst: first(lst) ttLib.getTableClass("fpgm").mergeMap = lambda self, lst: first(lst) ttLib.getTableClass("cvt ").mergeMap = lambda self, lst: first(lst) ttLib.getTableClass("gasp").mergeMap = lambda self, lst: first( lst ) # FIXME? Appears irreconcilable @add_method(ttLib.getTableClass("CFF ")) def merge(self, m, tables): if any(hasattr(table.cff[0], "FDSelect") for table in tables): raise NotImplementedError("Merging CID-keyed CFF tables is not supported yet") for table in tables: table.cff.desubroutinize() newcff = tables[0] newfont = newcff.cff[0] private = newfont.Private newDefaultWidthX, newNominalWidthX = private.defaultWidthX, private.nominalWidthX storedNamesStrings = [] glyphOrderStrings = [] glyphOrder = set(newfont.getGlyphOrder()) for name in newfont.strings.strings: if name not in glyphOrder: storedNamesStrings.append(name) else: glyphOrderStrings.append(name) chrset = list(newfont.charset) newcs = newfont.CharStrings log.debug("FONT 0 CharStrings: %d.", len(newcs)) for i, table in enumerate(tables[1:], start=1): font = table.cff[0] defaultWidthX, nominalWidthX = ( font.Private.defaultWidthX, font.Private.nominalWidthX, ) widthsDiffer = ( defaultWidthX != newDefaultWidthX or nominalWidthX != newNominalWidthX ) font.Private = private fontGlyphOrder = set(font.getGlyphOrder()) for name in font.strings.strings: if name in fontGlyphOrder: glyphOrderStrings.append(name) cs = font.CharStrings gs = table.cff.GlobalSubrs log.debug("Font %d CharStrings: %d.", i, len(cs)) chrset.extend(font.charset) if newcs.charStringsAreIndexed: for i, name in enumerate(cs.charStrings, start=len(newcs)): newcs.charStrings[name] = i newcs.charStringsIndex.items.append(None) for name in cs.charStrings: if widthsDiffer: c = cs[name] defaultWidthXToken = object() extractor = T2WidthExtractor([], [], nominalWidthX, defaultWidthXToken) extractor.execute(c) width = extractor.width if width is not defaultWidthXToken: c.program.pop(0) else: width = defaultWidthX if width != newDefaultWidthX: c.program.insert(0, width - newNominalWidthX) newcs[name] = cs[name] newfont.charset = chrset newfont.numGlyphs = len(chrset) newfont.strings.strings = glyphOrderStrings + storedNamesStrings return newcff @add_method(ttLib.getTableClass("cmap")) def merge(self, m, tables): # TODO Handle format=14. if not hasattr(m, "cmap"): computeMegaCmap(m, tables) cmap = m.cmap cmapBmpOnly = {uni: gid for uni, gid in cmap.items() if uni <= 0xFFFF} self.tables = [] module = ttLib.getTableModule("cmap") if len(cmapBmpOnly) != len(cmap): # format-12 required. cmapTable = module.cmap_classes[12](12) cmapTable.platformID = 3 cmapTable.platEncID = 10 cmapTable.language = 0 cmapTable.cmap = cmap self.tables.append(cmapTable) # always create format-4 cmapTable = module.cmap_classes[4](4) cmapTable.platformID = 3 cmapTable.platEncID = 1 cmapTable.language = 0 cmapTable.cmap = cmapBmpOnly # ordered by platform then encoding self.tables.insert(0, cmapTable) self.tableVersion = 0 self.numSubTables = len(self.tables) return self PKaZZZA�^ױ�fontTools/merge/unicode.py# Copyright 2021 Behdad Esfahbod. All Rights Reserved. def is_Default_Ignorable(u): # http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point # # TODO Move me to unicodedata module and autogenerate. # # Unicode 14.0: # $ grep '; Default_Ignorable_Code_Point ' DerivedCoreProperties.txt | sed 's/;.*#/#/' # 00AD # Cf SOFT HYPHEN # 034F # Mn COMBINING GRAPHEME JOINER # 061C # Cf ARABIC LETTER MARK # 115F..1160 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER # 17B4..17B5 # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA # 180B..180D # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE # 180E # Cf MONGOLIAN VOWEL SEPARATOR # 180F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR # 200B..200F # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK # 202A..202E # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE # 2060..2064 # Cf [5] WORD JOINER..INVISIBLE PLUS # 2065 # Cn <reserved-2065> # 2066..206F # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES # 3164 # Lo HANGUL FILLER # FE00..FE0F # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16 # FEFF # Cf ZERO WIDTH NO-BREAK SPACE # FFA0 # Lo HALFWIDTH HANGUL FILLER # FFF0..FFF8 # Cn [9] <reserved-FFF0>..<reserved-FFF8> # 1BCA0..1BCA3 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP # 1D173..1D17A # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE # E0000 # Cn <reserved-E0000> # E0001 # Cf LANGUAGE TAG # E0002..E001F # Cn [30] <reserved-E0002>..<reserved-E001F> # E0020..E007F # Cf [96] TAG SPACE..CANCEL TAG # E0080..E00FF # Cn [128] <reserved-E0080>..<reserved-E00FF> # E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 # E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF> return ( u == 0x00AD or u == 0x034F # Cf SOFT HYPHEN or u == 0x061C # Mn COMBINING GRAPHEME JOINER or 0x115F <= u <= 0x1160 # Cf ARABIC LETTER MARK or 0x17B4 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER <= u <= 0x17B5 or 0x180B # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA <= u <= 0x180D or u # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE == 0x180E or u == 0x180F # Cf MONGOLIAN VOWEL SEPARATOR or 0x200B <= u <= 0x200F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR or 0x202A <= u <= 0x202E # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK or 0x2060 # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE <= u <= 0x2064 or u == 0x2065 # Cf [5] WORD JOINER..INVISIBLE PLUS or 0x2066 <= u <= 0x206F # Cn <reserved-2065> or u == 0x3164 # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES or 0xFE00 <= u <= 0xFE0F # Lo HANGUL FILLER or u == 0xFEFF # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16 or u == 0xFFA0 # Cf ZERO WIDTH NO-BREAK SPACE or 0xFFF0 <= u <= 0xFFF8 # Lo HALFWIDTH HANGUL FILLER or 0x1BCA0 <= u <= 0x1BCA3 # Cn [9] <reserved-FFF0>..<reserved-FFF8> or 0x1D173 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP <= u <= 0x1D17A or u == 0xE0000 # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE or u == 0xE0001 # Cn <reserved-E0000> or 0xE0002 <= u <= 0xE001F # Cf LANGUAGE TAG or 0xE0020 <= u <= 0xE007F # Cn [30] <reserved-E0002>..<reserved-E001F> or 0xE0080 <= u <= 0xE00FF # Cf [96] TAG SPACE..CANCEL TAG or 0xE0100 <= u <= 0xE01EF # Cn [128] <reserved-E0080>..<reserved-E00FF> or 0xE01F0 # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 <= u <= 0xE0FFF or False # Cn [3600] <reserved-E01F0>..<reserved-E0FFF> ) PKaZZZ/�٠2 2 fontTools/merge/util.py# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): Behdad Esfahbod, Roozbeh Pournader from fontTools.misc.timeTools import timestampNow from fontTools.ttLib.tables.DefaultTable import DefaultTable from functools import reduce import operator import logging log = logging.getLogger("fontTools.merge") # General utility functions for merging values from different fonts def equal(lst): lst = list(lst) t = iter(lst) first = next(t) assert all(item == first for item in t), "Expected all items to be equal: %s" % lst return first def first(lst): return next(iter(lst)) def recalculate(lst): return NotImplemented def current_time(lst): return timestampNow() def bitwise_and(lst): return reduce(operator.and_, lst) def bitwise_or(lst): return reduce(operator.or_, lst) def avg_int(lst): lst = list(lst) return sum(lst) // len(lst) def onlyExisting(func): """Returns a filter func that when called with a list, only calls func on the non-NotImplemented items of the list, and only so if there's at least one item remaining. Otherwise returns NotImplemented.""" def wrapper(lst): items = [item for item in lst if item is not NotImplemented] return func(items) if items else NotImplemented return wrapper def sumLists(lst): l = [] for item in lst: l.extend(item) return l def sumDicts(lst): d = {} for item in lst: d.update(item) return d def mergeBits(bitmap): def wrapper(lst): lst = list(lst) returnValue = 0 for bitNumber in range(bitmap["size"]): try: mergeLogic = bitmap[bitNumber] except KeyError: try: mergeLogic = bitmap["*"] except KeyError: raise Exception("Don't know how to merge bit %s" % bitNumber) shiftedBit = 1 << bitNumber mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst) returnValue |= mergedValue << bitNumber return returnValue return wrapper class AttendanceRecordingIdentityDict(object): """A dictionary-like object that records indices of items actually accessed from a list.""" def __init__(self, lst): self.l = lst self.d = {id(v): i for i, v in enumerate(lst)} self.s = set() def __getitem__(self, v): self.s.add(self.d[id(v)]) return v class GregariousIdentityDict(object): """A dictionary-like object that welcomes guests without reservations and adds them to the end of the guest list.""" def __init__(self, lst): self.l = lst self.s = set(id(v) for v in lst) def __getitem__(self, v): if id(v) not in self.s: self.s.add(id(v)) self.l.append(v) return v class NonhashableDict(object): """A dictionary-like object mapping objects to values.""" def __init__(self, keys, values=None): if values is None: self.d = {id(v): i for i, v in enumerate(keys)} else: self.d = {id(k): v for k, v in zip(keys, values)} def __getitem__(self, k): return self.d[id(k)] def __setitem__(self, k, v): self.d[id(k)] = v def __delitem__(self, k): del self.d[id(k)] PKaZZZhXXKKfontTools/misc/__init__.py"""Empty __init__.py file to signal Python this directory is a package.""" PKaZZZ��^��,�,fontTools/misc/arrayTools.py"""Routines for calculating bounding boxes, point in rectangle calculations and so on. """ from fontTools.misc.roundTools import otRound from fontTools.misc.vector import Vector as _Vector import math import warnings def calcBounds(array): """Calculate the bounding rectangle of a 2D points array. Args: array: A sequence of 2D tuples. Returns: A four-item tuple representing the bounding rectangle ``(xMin, yMin, xMax, yMax)``. """ if not array: return 0, 0, 0, 0 xs = [x for x, y in array] ys = [y for x, y in array] return min(xs), min(ys), max(xs), max(ys) def calcIntBounds(array, round=otRound): """Calculate the integer bounding rectangle of a 2D points array. Values are rounded to closest integer towards ``+Infinity`` using the :func:`fontTools.misc.fixedTools.otRound` function by default, unless an optional ``round`` function is passed. Args: array: A sequence of 2D tuples. round: A rounding function of type ``f(x: float) -> int``. Returns: A four-item tuple of integers representing the bounding rectangle: ``(xMin, yMin, xMax, yMax)``. """ return tuple(round(v) for v in calcBounds(array)) def updateBounds(bounds, p, min=min, max=max): """Add a point to a bounding rectangle. Args: bounds: A bounding rectangle expressed as a tuple ``(xMin, yMin, xMax, yMax), or None``. p: A 2D tuple representing a point. min,max: functions to compute the minimum and maximum. Returns: The updated bounding rectangle ``(xMin, yMin, xMax, yMax)``. """ (x, y) = p if bounds is None: return x, y, x, y xMin, yMin, xMax, yMax = bounds return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y) def pointInRect(p, rect): """Test if a point is inside a bounding rectangle. Args: p: A 2D tuple representing a point. rect: A bounding rectangle expressed as a tuple ``(xMin, yMin, xMax, yMax)``. Returns: ``True`` if the point is inside the rectangle, ``False`` otherwise. """ (x, y) = p xMin, yMin, xMax, yMax = rect return (xMin <= x <= xMax) and (yMin <= y <= yMax) def pointsInRect(array, rect): """Determine which points are inside a bounding rectangle. Args: array: A sequence of 2D tuples. rect: A bounding rectangle expressed as a tuple ``(xMin, yMin, xMax, yMax)``. Returns: A list containing the points inside the rectangle. """ if len(array) < 1: return [] xMin, yMin, xMax, yMax = rect return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array] def vectorLength(vector): """Calculate the length of the given vector. Args: vector: A 2D tuple. Returns: The Euclidean length of the vector. """ x, y = vector return math.sqrt(x**2 + y**2) def asInt16(array): """Round a list of floats to 16-bit signed integers. Args: array: List of float values. Returns: A list of rounded integers. """ return [int(math.floor(i + 0.5)) for i in array] def normRect(rect): """Normalize a bounding box rectangle. This function "turns the rectangle the right way up", so that the following holds:: xMin <= xMax and yMin <= yMax Args: rect: A bounding rectangle expressed as a tuple ``(xMin, yMin, xMax, yMax)``. Returns: A normalized bounding rectangle. """ (xMin, yMin, xMax, yMax) = rect return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax) def scaleRect(rect, x, y): """Scale a bounding box rectangle. Args: rect: A bounding rectangle expressed as a tuple ``(xMin, yMin, xMax, yMax)``. x: Factor to scale the rectangle along the X axis. Y: Factor to scale the rectangle along the Y axis. Returns: A scaled bounding rectangle. """ (xMin, yMin, xMax, yMax) = rect return xMin * x, yMin * y, xMax * x, yMax * y def offsetRect(rect, dx, dy): """Offset a bounding box rectangle. Args: rect: A bounding rectangle expressed as a tuple ``(xMin, yMin, xMax, yMax)``. dx: Amount to offset the rectangle along the X axis. dY: Amount to offset the rectangle along the Y axis. Returns: An offset bounding rectangle. """ (xMin, yMin, xMax, yMax) = rect return xMin + dx, yMin + dy, xMax + dx, yMax + dy def insetRect(rect, dx, dy): """Inset a bounding box rectangle on all sides. Args: rect: A bounding rectangle expressed as a tuple ``(xMin, yMin, xMax, yMax)``. dx: Amount to inset the rectangle along the X axis. dY: Amount to inset the rectangle along the Y axis. Returns: An inset bounding rectangle. """ (xMin, yMin, xMax, yMax) = rect return xMin + dx, yMin + dy, xMax - dx, yMax - dy def sectRect(rect1, rect2): """Test for rectangle-rectangle intersection. Args: rect1: First bounding rectangle, expressed as tuples ``(xMin, yMin, xMax, yMax)``. rect2: Second bounding rectangle. Returns: A boolean and a rectangle. If the input rectangles intersect, returns ``True`` and the intersecting rectangle. Returns ``False`` and ``(0, 0, 0, 0)`` if the input rectangles don't intersect. """ (xMin1, yMin1, xMax1, yMax1) = rect1 (xMin2, yMin2, xMax2, yMax2) = rect2 xMin, yMin, xMax, yMax = ( max(xMin1, xMin2), max(yMin1, yMin2), min(xMax1, xMax2), min(yMax1, yMax2), ) if xMin >= xMax or yMin >= yMax: return False, (0, 0, 0, 0) return True, (xMin, yMin, xMax, yMax) def unionRect(rect1, rect2): """Determine union of bounding rectangles. Args: rect1: First bounding rectangle, expressed as tuples ``(xMin, yMin, xMax, yMax)``. rect2: Second bounding rectangle. Returns: The smallest rectangle in which both input rectangles are fully enclosed. """ (xMin1, yMin1, xMax1, yMax1) = rect1 (xMin2, yMin2, xMax2, yMax2) = rect2 xMin, yMin, xMax, yMax = ( min(xMin1, xMin2), min(yMin1, yMin2), max(xMax1, xMax2), max(yMax1, yMax2), ) return (xMin, yMin, xMax, yMax) def rectCenter(rect): """Determine rectangle center. Args: rect: Bounding rectangle, expressed as tuples ``(xMin, yMin, xMax, yMax)``. Returns: A 2D tuple representing the point at the center of the rectangle. """ (xMin, yMin, xMax, yMax) = rect return (xMin + xMax) / 2, (yMin + yMax) / 2 def rectArea(rect): """Determine rectangle area. Args: rect: Bounding rectangle, expressed as tuples ``(xMin, yMin, xMax, yMax)``. Returns: The area of the rectangle. """ (xMin, yMin, xMax, yMax) = rect return (yMax - yMin) * (xMax - xMin) def intRect(rect): """Round a rectangle to integer values. Guarantees that the resulting rectangle is NOT smaller than the original. Args: rect: Bounding rectangle, expressed as tuples ``(xMin, yMin, xMax, yMax)``. Returns: A rounded bounding rectangle. """ (xMin, yMin, xMax, yMax) = rect xMin = int(math.floor(xMin)) yMin = int(math.floor(yMin)) xMax = int(math.ceil(xMax)) yMax = int(math.ceil(yMax)) return (xMin, yMin, xMax, yMax) def quantizeRect(rect, factor=1): """ >>> bounds = (72.3, -218.4, 1201.3, 919.1) >>> quantizeRect(bounds) (72, -219, 1202, 920) >>> quantizeRect(bounds, factor=10) (70, -220, 1210, 920) >>> quantizeRect(bounds, factor=100) (0, -300, 1300, 1000) """ if factor < 1: raise ValueError(f"Expected quantization factor >= 1, found: {factor!r}") xMin, yMin, xMax, yMax = normRect(rect) return ( int(math.floor(xMin / factor) * factor), int(math.floor(yMin / factor) * factor), int(math.ceil(xMax / factor) * factor), int(math.ceil(yMax / factor) * factor), ) class Vector(_Vector): def __init__(self, *args, **kwargs): warnings.warn( "fontTools.misc.arrayTools.Vector has been deprecated, please use " "fontTools.misc.vector.Vector instead.", DeprecationWarning, ) def pairwise(iterable, reverse=False): """Iterate over current and next items in iterable. Args: iterable: An iterable reverse: If true, iterate in reverse order. Returns: A iterable yielding two elements per iteration. Example: >>> tuple(pairwise([])) () >>> tuple(pairwise([], reverse=True)) () >>> tuple(pairwise([0])) ((0, 0),) >>> tuple(pairwise([0], reverse=True)) ((0, 0),) >>> tuple(pairwise([0, 1])) ((0, 1), (1, 0)) >>> tuple(pairwise([0, 1], reverse=True)) ((1, 0), (0, 1)) >>> tuple(pairwise([0, 1, 2])) ((0, 1), (1, 2), (2, 0)) >>> tuple(pairwise([0, 1, 2], reverse=True)) ((2, 1), (1, 0), (0, 2)) >>> tuple(pairwise(['a', 'b', 'c', 'd'])) (('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a')) >>> tuple(pairwise(['a', 'b', 'c', 'd'], reverse=True)) (('d', 'c'), ('c', 'b'), ('b', 'a'), ('a', 'd')) """ if not iterable: return if reverse: it = reversed(iterable) else: it = iter(iterable) first = next(it, None) a = first for b in it: yield (a, b) a = b yield (a, first) def _test(): """ >>> import math >>> calcBounds([]) (0, 0, 0, 0) >>> calcBounds([(0, 40), (0, 100), (50, 50), (80, 10)]) (0, 10, 80, 100) >>> updateBounds((0, 0, 0, 0), (100, 100)) (0, 0, 100, 100) >>> pointInRect((50, 50), (0, 0, 100, 100)) True >>> pointInRect((0, 0), (0, 0, 100, 100)) True >>> pointInRect((100, 100), (0, 0, 100, 100)) True >>> not pointInRect((101, 100), (0, 0, 100, 100)) True >>> list(pointsInRect([(50, 50), (0, 0), (100, 100), (101, 100)], (0, 0, 100, 100))) [True, True, True, False] >>> vectorLength((3, 4)) 5.0 >>> vectorLength((1, 1)) == math.sqrt(2) True >>> list(asInt16([0, 0.1, 0.5, 0.9])) [0, 0, 1, 1] >>> normRect((0, 10, 100, 200)) (0, 10, 100, 200) >>> normRect((100, 200, 0, 10)) (0, 10, 100, 200) >>> scaleRect((10, 20, 50, 150), 1.5, 2) (15.0, 40, 75.0, 300) >>> offsetRect((10, 20, 30, 40), 5, 6) (15, 26, 35, 46) >>> insetRect((10, 20, 50, 60), 5, 10) (15, 30, 45, 50) >>> insetRect((10, 20, 50, 60), -5, -10) (5, 10, 55, 70) >>> intersects, rect = sectRect((0, 10, 20, 30), (0, 40, 20, 50)) >>> not intersects True >>> intersects, rect = sectRect((0, 10, 20, 30), (5, 20, 35, 50)) >>> intersects 1 >>> rect (5, 20, 20, 30) >>> unionRect((0, 10, 20, 30), (0, 40, 20, 50)) (0, 10, 20, 50) >>> rectCenter((0, 0, 100, 200)) (50.0, 100.0) >>> rectCenter((0, 0, 100, 199.0)) (50.0, 99.5) >>> intRect((0.9, 2.9, 3.1, 4.1)) (0, 2, 4, 5) """ if __name__ == "__main__": import sys import doctest sys.exit(doctest.testmod().failed) PKaZZZ�l�I����fontTools/misc/bezierTools.py# -*- coding: utf-8 -*- """fontTools.misc.bezierTools.py -- tools for working with Bezier path segments. """ from fontTools.misc.arrayTools import calcBounds, sectRect, rectArea from fontTools.misc.transform import Identity import math from collections import namedtuple try: import cython COMPILED = cython.compiled except (AttributeError, ImportError): # if cython not installed, use mock module with no-op decorators and types from fontTools.misc import cython COMPILED = False Intersection = namedtuple("Intersection", ["pt", "t1", "t2"]) __all__ = [ "approximateCubicArcLength", "approximateCubicArcLengthC", "approximateQuadraticArcLength", "approximateQuadraticArcLengthC", "calcCubicArcLength", "calcCubicArcLengthC", "calcQuadraticArcLength", "calcQuadraticArcLengthC", "calcCubicBounds", "calcQuadraticBounds", "splitLine", "splitQuadratic", "splitCubic", "splitQuadraticAtT", "splitCubicAtT", "splitCubicAtTC", "splitCubicIntoTwoAtTC", "solveQuadratic", "solveCubic", "quadraticPointAtT", "cubicPointAtT", "cubicPointAtTC", "linePointAtT", "segmentPointAtT", "lineLineIntersections", "curveLineIntersections", "curveCurveIntersections", "segmentSegmentIntersections", ] def calcCubicArcLength(pt1, pt2, pt3, pt4, tolerance=0.005): """Calculates the arc length for a cubic Bezier segment. Whereas :func:`approximateCubicArcLength` approximates the length, this function calculates it by "measuring", recursively dividing the curve until the divided segments are shorter than ``tolerance``. Args: pt1,pt2,pt3,pt4: Control points of the Bezier as 2D tuples. tolerance: Controls the precision of the calcuation. Returns: Arc length value. """ return calcCubicArcLengthC( complex(*pt1), complex(*pt2), complex(*pt3), complex(*pt4), tolerance ) def _split_cubic_into_two(p0, p1, p2, p3): mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 deriv3 = (p3 + p2 - p1 - p0) * 0.125 return ( (p0, (p0 + p1) * 0.5, mid - deriv3, mid), (mid, mid + deriv3, (p2 + p3) * 0.5, p3), ) @cython.returns(cython.double) @cython.locals( p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, ) @cython.locals(mult=cython.double, arch=cython.double, box=cython.double) def _calcCubicArcLengthCRecurse(mult, p0, p1, p2, p3): arch = abs(p0 - p3) box = abs(p0 - p1) + abs(p1 - p2) + abs(p2 - p3) if arch * mult >= box: return (arch + box) * 0.5 else: one, two = _split_cubic_into_two(p0, p1, p2, p3) return _calcCubicArcLengthCRecurse(mult, *one) + _calcCubicArcLengthCRecurse( mult, *two ) @cython.returns(cython.double) @cython.locals( pt1=cython.complex, pt2=cython.complex, pt3=cython.complex, pt4=cython.complex, ) @cython.locals( tolerance=cython.double, mult=cython.double, ) def calcCubicArcLengthC(pt1, pt2, pt3, pt4, tolerance=0.005): """Calculates the arc length for a cubic Bezier segment. Args: pt1,pt2,pt3,pt4: Control points of the Bezier as complex numbers. tolerance: Controls the precision of the calcuation. Returns: Arc length value. """ mult = 1.0 + 1.5 * tolerance # The 1.5 is a empirical hack; no math return _calcCubicArcLengthCRecurse(mult, pt1, pt2, pt3, pt4) epsilonDigits = 6 epsilon = 1e-10 @cython.cfunc @cython.inline @cython.returns(cython.double) @cython.locals(v1=cython.complex, v2=cython.complex) def _dot(v1, v2): return (v1 * v2.conjugate()).real @cython.cfunc @cython.inline @cython.returns(cython.double) @cython.locals(x=cython.double) def _intSecAtan(x): # In : sympy.integrate(sp.sec(sp.atan(x))) # Out: x*sqrt(x**2 + 1)/2 + asinh(x)/2 return x * math.sqrt(x**2 + 1) / 2 + math.asinh(x) / 2 def calcQuadraticArcLength(pt1, pt2, pt3): """Calculates the arc length for a quadratic Bezier segment. Args: pt1: Start point of the Bezier as 2D tuple. pt2: Handle point of the Bezier as 2D tuple. pt3: End point of the Bezier as 2D tuple. Returns: Arc length value. Example:: >>> calcQuadraticArcLength((0, 0), (0, 0), (0, 0)) # empty segment 0.0 >>> calcQuadraticArcLength((0, 0), (50, 0), (80, 0)) # collinear points 80.0 >>> calcQuadraticArcLength((0, 0), (0, 50), (0, 80)) # collinear points vertical 80.0 >>> calcQuadraticArcLength((0, 0), (50, 20), (100, 40)) # collinear points 107.70329614269008 >>> calcQuadraticArcLength((0, 0), (0, 100), (100, 0)) 154.02976155645263 >>> calcQuadraticArcLength((0, 0), (0, 50), (100, 0)) 120.21581243984076 >>> calcQuadraticArcLength((0, 0), (50, -10), (80, 50)) 102.53273816445825 >>> calcQuadraticArcLength((0, 0), (40, 0), (-40, 0)) # collinear points, control point outside 66.66666666666667 >>> calcQuadraticArcLength((0, 0), (40, 0), (0, 0)) # collinear points, looping back 40.0 """ return calcQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3)) @cython.returns(cython.double) @cython.locals( pt1=cython.complex, pt2=cython.complex, pt3=cython.complex, d0=cython.complex, d1=cython.complex, d=cython.complex, n=cython.complex, ) @cython.locals( scale=cython.double, origDist=cython.double, a=cython.double, b=cython.double, x0=cython.double, x1=cython.double, Len=cython.double, ) def calcQuadraticArcLengthC(pt1, pt2, pt3): """Calculates the arc length for a quadratic Bezier segment. Args: pt1: Start point of the Bezier as a complex number. pt2: Handle point of the Bezier as a complex number. pt3: End point of the Bezier as a complex number. Returns: Arc length value. """ # Analytical solution to the length of a quadratic bezier. # Documentation: https://github.com/fonttools/fonttools/issues/3055 d0 = pt2 - pt1 d1 = pt3 - pt2 d = d1 - d0 n = d * 1j scale = abs(n) if scale == 0.0: return abs(pt3 - pt1) origDist = _dot(n, d0) if abs(origDist) < epsilon: if _dot(d0, d1) >= 0: return abs(pt3 - pt1) a, b = abs(d0), abs(d1) return (a * a + b * b) / (a + b) x0 = _dot(d, d0) / origDist x1 = _dot(d, d1) / origDist Len = abs(2 * (_intSecAtan(x1) - _intSecAtan(x0)) * origDist / (scale * (x1 - x0))) return Len def approximateQuadraticArcLength(pt1, pt2, pt3): """Calculates the arc length for a quadratic Bezier segment. Uses Gauss-Legendre quadrature for a branch-free approximation. See :func:`calcQuadraticArcLength` for a slower but more accurate result. Args: pt1: Start point of the Bezier as 2D tuple. pt2: Handle point of the Bezier as 2D tuple. pt3: End point of the Bezier as 2D tuple. Returns: Approximate arc length value. """ return approximateQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3)) @cython.returns(cython.double) @cython.locals( pt1=cython.complex, pt2=cython.complex, pt3=cython.complex, ) @cython.locals( v0=cython.double, v1=cython.double, v2=cython.double, ) def approximateQuadraticArcLengthC(pt1, pt2, pt3): """Calculates the arc length for a quadratic Bezier segment. Uses Gauss-Legendre quadrature for a branch-free approximation. See :func:`calcQuadraticArcLength` for a slower but more accurate result. Args: pt1: Start point of the Bezier as a complex number. pt2: Handle point of the Bezier as a complex number. pt3: End point of the Bezier as a complex number. Returns: Approximate arc length value. """ # This, essentially, approximates the length-of-derivative function # to be integrated with the best-matching fifth-degree polynomial # approximation of it. # # https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss.E2.80.93Legendre_quadrature # abs(BezierCurveC[2].diff(t).subs({t:T})) for T in sorted(.5, .5±sqrt(3/5)/2), # weighted 5/18, 8/18, 5/18 respectively. v0 = abs( -0.492943519233745 * pt1 + 0.430331482911935 * pt2 + 0.0626120363218102 * pt3 ) v1 = abs(pt3 - pt1) * 0.4444444444444444 v2 = abs( -0.0626120363218102 * pt1 - 0.430331482911935 * pt2 + 0.492943519233745 * pt3 ) return v0 + v1 + v2 def calcQuadraticBounds(pt1, pt2, pt3): """Calculates the bounding rectangle for a quadratic Bezier segment. Args: pt1: Start point of the Bezier as a 2D tuple. pt2: Handle point of the Bezier as a 2D tuple. pt3: End point of the Bezier as a 2D tuple. Returns: A four-item tuple representing the bounding rectangle ``(xMin, yMin, xMax, yMax)``. Example:: >>> calcQuadraticBounds((0, 0), (50, 100), (100, 0)) (0, 0, 100, 50.0) >>> calcQuadraticBounds((0, 0), (100, 0), (100, 100)) (0.0, 0.0, 100, 100) """ (ax, ay), (bx, by), (cx, cy) = calcQuadraticParameters(pt1, pt2, pt3) ax2 = ax * 2.0 ay2 = ay * 2.0 roots = [] if ax2 != 0: roots.append(-bx / ax2) if ay2 != 0: roots.append(-by / ay2) points = [ (ax * t * t + bx * t + cx, ay * t * t + by * t + cy) for t in roots if 0 <= t < 1 ] + [pt1, pt3] return calcBounds(points) def approximateCubicArcLength(pt1, pt2, pt3, pt4): """Approximates the arc length for a cubic Bezier segment. Uses Gauss-Lobatto quadrature with n=5 points to approximate arc length. See :func:`calcCubicArcLength` for a slower but more accurate result. Args: pt1,pt2,pt3,pt4: Control points of the Bezier as 2D tuples. Returns: Arc length value. Example:: >>> approximateCubicArcLength((0, 0), (25, 100), (75, 100), (100, 0)) 190.04332968932817 >>> approximateCubicArcLength((0, 0), (50, 0), (100, 50), (100, 100)) 154.8852074945903 >>> approximateCubicArcLength((0, 0), (50, 0), (100, 0), (150, 0)) # line; exact result should be 150. 149.99999999999991 >>> approximateCubicArcLength((0, 0), (50, 0), (100, 0), (-50, 0)) # cusp; exact result should be 150. 136.9267662156362 >>> approximateCubicArcLength((0, 0), (50, 0), (100, -50), (-50, 0)) # cusp 154.80848416537057 """ return approximateCubicArcLengthC( complex(*pt1), complex(*pt2), complex(*pt3), complex(*pt4) ) @cython.returns(cython.double) @cython.locals( pt1=cython.complex, pt2=cython.complex, pt3=cython.complex, pt4=cython.complex, ) @cython.locals( v0=cython.double, v1=cython.double, v2=cython.double, v3=cython.double, v4=cython.double, ) def approximateCubicArcLengthC(pt1, pt2, pt3, pt4): """Approximates the arc length for a cubic Bezier segment. Args: pt1,pt2,pt3,pt4: Control points of the Bezier as complex numbers. Returns: Arc length value. """ # This, essentially, approximates the length-of-derivative function # to be integrated with the best-matching seventh-degree polynomial # approximation of it. # # https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss.E2.80.93Lobatto_rules # abs(BezierCurveC[3].diff(t).subs({t:T})) for T in sorted(0, .5±(3/7)**.5/2, .5, 1), # weighted 1/20, 49/180, 32/90, 49/180, 1/20 respectively. v0 = abs(pt2 - pt1) * 0.15 v1 = abs( -0.558983582205757 * pt1 + 0.325650248872424 * pt2 + 0.208983582205757 * pt3 + 0.024349751127576 * pt4 ) v2 = abs(pt4 - pt1 + pt3 - pt2) * 0.26666666666666666 v3 = abs( -0.024349751127576 * pt1 - 0.208983582205757 * pt2 - 0.325650248872424 * pt3 + 0.558983582205757 * pt4 ) v4 = abs(pt4 - pt3) * 0.15 return v0 + v1 + v2 + v3 + v4 def calcCubicBounds(pt1, pt2, pt3, pt4): """Calculates the bounding rectangle for a quadratic Bezier segment. Args: pt1,pt2,pt3,pt4: Control points of the Bezier as 2D tuples. Returns: A four-item tuple representing the bounding rectangle ``(xMin, yMin, xMax, yMax)``. Example:: >>> calcCubicBounds((0, 0), (25, 100), (75, 100), (100, 0)) (0, 0, 100, 75.0) >>> calcCubicBounds((0, 0), (50, 0), (100, 50), (100, 100)) (0.0, 0.0, 100, 100) >>> print("%f %f %f %f" % calcCubicBounds((50, 0), (0, 100), (100, 100), (50, 0))) 35.566243 0.000000 64.433757 75.000000 """ (ax, ay), (bx, by), (cx, cy), (dx, dy) = calcCubicParameters(pt1, pt2, pt3, pt4) # calc first derivative ax3 = ax * 3.0 ay3 = ay * 3.0 bx2 = bx * 2.0 by2 = by * 2.0 xRoots = [t for t in solveQuadratic(ax3, bx2, cx) if 0 <= t < 1] yRoots = [t for t in solveQuadratic(ay3, by2, cy) if 0 <= t < 1] roots = xRoots + yRoots points = [ ( ax * t * t * t + bx * t * t + cx * t + dx, ay * t * t * t + by * t * t + cy * t + dy, ) for t in roots ] + [pt1, pt4] return calcBounds(points) def splitLine(pt1, pt2, where, isHorizontal): """Split a line at a given coordinate. Args: pt1: Start point of line as 2D tuple. pt2: End point of line as 2D tuple. where: Position at which to split the line. isHorizontal: Direction of the ray splitting the line. If true, ``where`` is interpreted as a Y coordinate; if false, then ``where`` is interpreted as an X coordinate. Returns: A list of two line segments (each line segment being two 2D tuples) if the line was successfully split, or a list containing the original line. Example:: >>> printSegments(splitLine((0, 0), (100, 100), 50, True)) ((0, 0), (50, 50)) ((50, 50), (100, 100)) >>> printSegments(splitLine((0, 0), (100, 100), 100, True)) ((0, 0), (100, 100)) >>> printSegments(splitLine((0, 0), (100, 100), 0, True)) ((0, 0), (0, 0)) ((0, 0), (100, 100)) >>> printSegments(splitLine((0, 0), (100, 100), 0, False)) ((0, 0), (0, 0)) ((0, 0), (100, 100)) >>> printSegments(splitLine((100, 0), (0, 0), 50, False)) ((100, 0), (50, 0)) ((50, 0), (0, 0)) >>> printSegments(splitLine((0, 100), (0, 0), 50, True)) ((0, 100), (0, 50)) ((0, 50), (0, 0)) """ pt1x, pt1y = pt1 pt2x, pt2y = pt2 ax = pt2x - pt1x ay = pt2y - pt1y bx = pt1x by = pt1y a = (ax, ay)[isHorizontal] if a == 0: return [(pt1, pt2)] t = (where - (bx, by)[isHorizontal]) / a if 0 <= t < 1: midPt = ax * t + bx, ay * t + by return [(pt1, midPt), (midPt, pt2)] else: return [(pt1, pt2)] def splitQuadratic(pt1, pt2, pt3, where, isHorizontal): """Split a quadratic Bezier curve at a given coordinate. Args: pt1,pt2,pt3: Control points of the Bezier as 2D tuples. where: Position at which to split the curve. isHorizontal: Direction of the ray splitting the curve. If true, ``where`` is interpreted as a Y coordinate; if false, then ``where`` is interpreted as an X coordinate. Returns: A list of two curve segments (each curve segment being three 2D tuples) if the curve was successfully split, or a list containing the original curve. Example:: >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 150, False)) ((0, 0), (50, 100), (100, 0)) >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, False)) ((0, 0), (25, 50), (50, 50)) ((50, 50), (75, 50), (100, 0)) >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, False)) ((0, 0), (12.5, 25), (25, 37.5)) ((25, 37.5), (62.5, 75), (100, 0)) >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, True)) ((0, 0), (7.32233, 14.6447), (14.6447, 25)) ((14.6447, 25), (50, 75), (85.3553, 25)) ((85.3553, 25), (92.6777, 14.6447), (100, -7.10543e-15)) >>> # XXX I'm not at all sure if the following behavior is desirable: >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, True)) ((0, 0), (25, 50), (50, 50)) ((50, 50), (50, 50), (50, 50)) ((50, 50), (75, 50), (100, 0)) """ a, b, c = calcQuadraticParameters(pt1, pt2, pt3) solutions = solveQuadratic( a[isHorizontal], b[isHorizontal], c[isHorizontal] - where ) solutions = sorted(t for t in solutions if 0 <= t < 1) if not solutions: return [(pt1, pt2, pt3)] return _splitQuadraticAtT(a, b, c, *solutions) def splitCubic(pt1, pt2, pt3, pt4, where, isHorizontal): """Split a cubic Bezier curve at a given coordinate. Args: pt1,pt2,pt3,pt4: Control points of the Bezier as 2D tuples. where: Position at which to split the curve. isHorizontal: Direction of the ray splitting the curve. If true, ``where`` is interpreted as a Y coordinate; if false, then ``where`` is interpreted as an X coordinate. Returns: A list of two curve segments (each curve segment being four 2D tuples) if the curve was successfully split, or a list containing the original curve. Example:: >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 150, False)) ((0, 0), (25, 100), (75, 100), (100, 0)) >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 50, False)) ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) ((50, 75), (68.75, 75), (87.5, 50), (100, 0)) >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 25, True)) ((0, 0), (2.29379, 9.17517), (4.79804, 17.5085), (7.47414, 25)) ((7.47414, 25), (31.2886, 91.6667), (68.7114, 91.6667), (92.5259, 25)) ((92.5259, 25), (95.202, 17.5085), (97.7062, 9.17517), (100, 1.77636e-15)) """ a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4) solutions = solveCubic( a[isHorizontal], b[isHorizontal], c[isHorizontal], d[isHorizontal] - where ) solutions = sorted(t for t in solutions if 0 <= t < 1) if not solutions: return [(pt1, pt2, pt3, pt4)] return _splitCubicAtT(a, b, c, d, *solutions) def splitQuadraticAtT(pt1, pt2, pt3, *ts): """Split a quadratic Bezier curve at one or more values of t. Args: pt1,pt2,pt3: Control points of the Bezier as 2D tuples. *ts: Positions at which to split the curve. Returns: A list of curve segments (each curve segment being three 2D tuples). Examples:: >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5)) ((0, 0), (25, 50), (50, 50)) ((50, 50), (75, 50), (100, 0)) >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5, 0.75)) ((0, 0), (25, 50), (50, 50)) ((50, 50), (62.5, 50), (75, 37.5)) ((75, 37.5), (87.5, 25), (100, 0)) """ a, b, c = calcQuadraticParameters(pt1, pt2, pt3) return _splitQuadraticAtT(a, b, c, *ts) def splitCubicAtT(pt1, pt2, pt3, pt4, *ts): """Split a cubic Bezier curve at one or more values of t. Args: pt1,pt2,pt3,pt4: Control points of the Bezier as 2D tuples. *ts: Positions at which to split the curve. Returns: A list of curve segments (each curve segment being four 2D tuples). Examples:: >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5)) ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) ((50, 75), (68.75, 75), (87.5, 50), (100, 0)) >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5, 0.75)) ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) ((50, 75), (59.375, 75), (68.75, 68.75), (77.3438, 56.25)) ((77.3438, 56.25), (85.9375, 43.75), (93.75, 25), (100, 0)) """ a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4) return _splitCubicAtT(a, b, c, d, *ts) @cython.locals( pt1=cython.complex, pt2=cython.complex, pt3=cython.complex, pt4=cython.complex, a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex, ) def splitCubicAtTC(pt1, pt2, pt3, pt4, *ts): """Split a cubic Bezier curve at one or more values of t. Args: pt1,pt2,pt3,pt4: Control points of the Bezier as complex numbers.. *ts: Positions at which to split the curve. Yields: Curve segments (each curve segment being four complex numbers). """ a, b, c, d = calcCubicParametersC(pt1, pt2, pt3, pt4) yield from _splitCubicAtTC(a, b, c, d, *ts) @cython.returns(cython.complex) @cython.locals( t=cython.double, pt1=cython.complex, pt2=cython.complex, pt3=cython.complex, pt4=cython.complex, pointAtT=cython.complex, off1=cython.complex, off2=cython.complex, ) @cython.locals( t2=cython.double, _1_t=cython.double, _1_t_2=cython.double, _2_t_1_t=cython.double ) def splitCubicIntoTwoAtTC(pt1, pt2, pt3, pt4, t): """Split a cubic Bezier curve at t. Args: pt1,pt2,pt3,pt4: Control points of the Bezier as complex numbers. t: Position at which to split the curve. Returns: A tuple of two curve segments (each curve segment being four complex numbers). """ t2 = t * t _1_t = 1 - t _1_t_2 = _1_t * _1_t _2_t_1_t = 2 * t * _1_t pointAtT = ( _1_t_2 * _1_t * pt1 + 3 * (_1_t_2 * t * pt2 + _1_t * t2 * pt3) + t2 * t * pt4 ) off1 = _1_t_2 * pt1 + _2_t_1_t * pt2 + t2 * pt3 off2 = _1_t_2 * pt2 + _2_t_1_t * pt3 + t2 * pt4 pt2 = pt1 + (pt2 - pt1) * t pt3 = pt4 + (pt3 - pt4) * _1_t return ((pt1, pt2, off1, pointAtT), (pointAtT, off2, pt3, pt4)) def _splitQuadraticAtT(a, b, c, *ts): ts = list(ts) segments = [] ts.insert(0, 0.0) ts.append(1.0) ax, ay = a bx, by = b cx, cy = c for i in range(len(ts) - 1): t1 = ts[i] t2 = ts[i + 1] delta = t2 - t1 # calc new a, b and c delta_2 = delta * delta a1x = ax * delta_2 a1y = ay * delta_2 b1x = (2 * ax * t1 + bx) * delta b1y = (2 * ay * t1 + by) * delta t1_2 = t1 * t1 c1x = ax * t1_2 + bx * t1 + cx c1y = ay * t1_2 + by * t1 + cy pt1, pt2, pt3 = calcQuadraticPoints((a1x, a1y), (b1x, b1y), (c1x, c1y)) segments.append((pt1, pt2, pt3)) return segments def _splitCubicAtT(a, b, c, d, *ts): ts = list(ts) ts.insert(0, 0.0) ts.append(1.0) segments = [] ax, ay = a bx, by = b cx, cy = c dx, dy = d for i in range(len(ts) - 1): t1 = ts[i] t2 = ts[i + 1] delta = t2 - t1 delta_2 = delta * delta delta_3 = delta * delta_2 t1_2 = t1 * t1 t1_3 = t1 * t1_2 # calc new a, b, c and d a1x = ax * delta_3 a1y = ay * delta_3 b1x = (3 * ax * t1 + bx) * delta_2 b1y = (3 * ay * t1 + by) * delta_2 c1x = (2 * bx * t1 + cx + 3 * ax * t1_2) * delta c1y = (2 * by * t1 + cy + 3 * ay * t1_2) * delta d1x = ax * t1_3 + bx * t1_2 + cx * t1 + dx d1y = ay * t1_3 + by * t1_2 + cy * t1 + dy pt1, pt2, pt3, pt4 = calcCubicPoints( (a1x, a1y), (b1x, b1y), (c1x, c1y), (d1x, d1y) ) segments.append((pt1, pt2, pt3, pt4)) return segments @cython.locals( a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex, t1=cython.double, t2=cython.double, delta=cython.double, delta_2=cython.double, delta_3=cython.double, a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex, ) def _splitCubicAtTC(a, b, c, d, *ts): ts = list(ts) ts.insert(0, 0.0) ts.append(1.0) for i in range(len(ts) - 1): t1 = ts[i] t2 = ts[i + 1] delta = t2 - t1 delta_2 = delta * delta delta_3 = delta * delta_2 t1_2 = t1 * t1 t1_3 = t1 * t1_2 # calc new a, b, c and d a1 = a * delta_3 b1 = (3 * a * t1 + b) * delta_2 c1 = (2 * b * t1 + c + 3 * a * t1_2) * delta d1 = a * t1_3 + b * t1_2 + c * t1 + d pt1, pt2, pt3, pt4 = calcCubicPointsC(a1, b1, c1, d1) yield (pt1, pt2, pt3, pt4) # # Equation solvers. # from math import sqrt, acos, cos, pi def solveQuadratic(a, b, c, sqrt=sqrt): """Solve a quadratic equation. Solves *a*x*x + b*x + c = 0* where a, b and c are real. Args: a: coefficient of *x²* b: coefficient of *x* c: constant term Returns: A list of roots. Note that the returned list is neither guaranteed to be sorted nor to contain unique values! """ if abs(a) < epsilon: if abs(b) < epsilon: # We have a non-equation; therefore, we have no valid solution roots = [] else: # We have a linear equation with 1 root. roots = [-c / b] else: # We have a true quadratic equation. Apply the quadratic formula to find two roots. DD = b * b - 4.0 * a * c if DD >= 0.0: rDD = sqrt(DD) roots = [(-b + rDD) / 2.0 / a, (-b - rDD) / 2.0 / a] else: # complex roots, ignore roots = [] return roots def solveCubic(a, b, c, d): """Solve a cubic equation. Solves *a*x*x*x + b*x*x + c*x + d = 0* where a, b, c and d are real. Args: a: coefficient of *x³* b: coefficient of *x²* c: coefficient of *x* d: constant term Returns: A list of roots. Note that the returned list is neither guaranteed to be sorted nor to contain unique values! Examples:: >>> solveCubic(1, 1, -6, 0) [-3.0, -0.0, 2.0] >>> solveCubic(-10.0, -9.0, 48.0, -29.0) [-2.9, 1.0, 1.0] >>> solveCubic(-9.875, -9.0, 47.625, -28.75) [-2.911392, 1.0, 1.0] >>> solveCubic(1.0, -4.5, 6.75, -3.375) [1.5, 1.5, 1.5] >>> solveCubic(-12.0, 18.0, -9.0, 1.50023651123) [0.5, 0.5, 0.5] >>> solveCubic( ... 9.0, 0.0, 0.0, -7.62939453125e-05 ... ) == [-0.0, -0.0, -0.0] True """ # # adapted from: # CUBIC.C - Solve a cubic polynomial # public domain by Ross Cottrell # found at: http://www.strangecreations.com/library/snippets/Cubic.C # if abs(a) < epsilon: # don't just test for zero; for very small values of 'a' solveCubic() # returns unreliable results, so we fall back to quad. return solveQuadratic(b, c, d) a = float(a) a1 = b / a a2 = c / a a3 = d / a Q = (a1 * a1 - 3.0 * a2) / 9.0 R = (2.0 * a1 * a1 * a1 - 9.0 * a1 * a2 + 27.0 * a3) / 54.0 R2 = R * R Q3 = Q * Q * Q R2 = 0 if R2 < epsilon else R2 Q3 = 0 if abs(Q3) < epsilon else Q3 R2_Q3 = R2 - Q3 if R2 == 0.0 and Q3 == 0.0: x = round(-a1 / 3.0, epsilonDigits) return [x, x, x] elif R2_Q3 <= epsilon * 0.5: # The epsilon * .5 above ensures that Q3 is not zero. theta = acos(max(min(R / sqrt(Q3), 1.0), -1.0)) rQ2 = -2.0 * sqrt(Q) a1_3 = a1 / 3.0 x0 = rQ2 * cos(theta / 3.0) - a1_3 x1 = rQ2 * cos((theta + 2.0 * pi) / 3.0) - a1_3 x2 = rQ2 * cos((theta + 4.0 * pi) / 3.0) - a1_3 x0, x1, x2 = sorted([x0, x1, x2]) # Merge roots that are close-enough if x1 - x0 < epsilon and x2 - x1 < epsilon: x0 = x1 = x2 = round((x0 + x1 + x2) / 3.0, epsilonDigits) elif x1 - x0 < epsilon: x0 = x1 = round((x0 + x1) / 2.0, epsilonDigits) x2 = round(x2, epsilonDigits) elif x2 - x1 < epsilon: x0 = round(x0, epsilonDigits) x1 = x2 = round((x1 + x2) / 2.0, epsilonDigits) else: x0 = round(x0, epsilonDigits) x1 = round(x1, epsilonDigits) x2 = round(x2, epsilonDigits) return [x0, x1, x2] else: x = pow(sqrt(R2_Q3) + abs(R), 1 / 3.0) x = x + Q / x if R >= 0.0: x = -x x = round(x - a1 / 3.0, epsilonDigits) return [x] # # Conversion routines for points to parameters and vice versa # def calcQuadraticParameters(pt1, pt2, pt3): x2, y2 = pt2 x3, y3 = pt3 cx, cy = pt1 bx = (x2 - cx) * 2.0 by = (y2 - cy) * 2.0 ax = x3 - cx - bx ay = y3 - cy - by return (ax, ay), (bx, by), (cx, cy) def calcCubicParameters(pt1, pt2, pt3, pt4): x2, y2 = pt2 x3, y3 = pt3 x4, y4 = pt4 dx, dy = pt1 cx = (x2 - dx) * 3.0 cy = (y2 - dy) * 3.0 bx = (x3 - x2) * 3.0 - cx by = (y3 - y2) * 3.0 - cy ax = x4 - dx - cx - bx ay = y4 - dy - cy - by return (ax, ay), (bx, by), (cx, cy), (dx, dy) @cython.cfunc @cython.inline @cython.locals( pt1=cython.complex, pt2=cython.complex, pt3=cython.complex, pt4=cython.complex, a=cython.complex, b=cython.complex, c=cython.complex, ) def calcCubicParametersC(pt1, pt2, pt3, pt4): c = (pt2 - pt1) * 3.0 b = (pt3 - pt2) * 3.0 - c a = pt4 - pt1 - c - b return (a, b, c, pt1) def calcQuadraticPoints(a, b, c): ax, ay = a bx, by = b cx, cy = c x1 = cx y1 = cy x2 = (bx * 0.5) + cx y2 = (by * 0.5) + cy x3 = ax + bx + cx y3 = ay + by + cy return (x1, y1), (x2, y2), (x3, y3) def calcCubicPoints(a, b, c, d): ax, ay = a bx, by = b cx, cy = c dx, dy = d x1 = dx y1 = dy x2 = (cx / 3.0) + dx y2 = (cy / 3.0) + dy x3 = (bx + cx) / 3.0 + x2 y3 = (by + cy) / 3.0 + y2 x4 = ax + dx + cx + bx y4 = ay + dy + cy + by return (x1, y1), (x2, y2), (x3, y3), (x4, y4) @cython.cfunc @cython.inline @cython.locals( a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex, p2=cython.complex, p3=cython.complex, p4=cython.complex, ) def calcCubicPointsC(a, b, c, d): p2 = c * (1 / 3) + d p3 = (b + c) * (1 / 3) + p2 p4 = a + b + c + d return (d, p2, p3, p4) # # Point at time # def linePointAtT(pt1, pt2, t): """Finds the point at time `t` on a line. Args: pt1, pt2: Coordinates of the line as 2D tuples. t: The time along the line. Returns: A 2D tuple with the coordinates of the point. """ return ((pt1[0] * (1 - t) + pt2[0] * t), (pt1[1] * (1 - t) + pt2[1] * t)) def quadraticPointAtT(pt1, pt2, pt3, t): """Finds the point at time `t` on a quadratic curve. Args: pt1, pt2, pt3: Coordinates of the curve as 2D tuples. t: The time along the curve. Returns: A 2D tuple with the coordinates of the point. """ x = (1 - t) * (1 - t) * pt1[0] + 2 * (1 - t) * t * pt2[0] + t * t * pt3[0] y = (1 - t) * (1 - t) * pt1[1] + 2 * (1 - t) * t * pt2[1] + t * t * pt3[1] return (x, y) def cubicPointAtT(pt1, pt2, pt3, pt4, t): """Finds the point at time `t` on a cubic curve. Args: pt1, pt2, pt3, pt4: Coordinates of the curve as 2D tuples. t: The time along the curve. Returns: A 2D tuple with the coordinates of the point. """ t2 = t * t _1_t = 1 - t _1_t_2 = _1_t * _1_t x = ( _1_t_2 * _1_t * pt1[0] + 3 * (_1_t_2 * t * pt2[0] + _1_t * t2 * pt3[0]) + t2 * t * pt4[0] ) y = ( _1_t_2 * _1_t * pt1[1] + 3 * (_1_t_2 * t * pt2[1] + _1_t * t2 * pt3[1]) + t2 * t * pt4[1] ) return (x, y) @cython.returns(cython.complex) @cython.locals( t=cython.double, pt1=cython.complex, pt2=cython.complex, pt3=cython.complex, pt4=cython.complex, ) @cython.locals(t2=cython.double, _1_t=cython.double, _1_t_2=cython.double) def cubicPointAtTC(pt1, pt2, pt3, pt4, t): """Finds the point at time `t` on a cubic curve. Args: pt1, pt2, pt3, pt4: Coordinates of the curve as complex numbers. t: The time along the curve. Returns: A complex number with the coordinates of the point. """ t2 = t * t _1_t = 1 - t _1_t_2 = _1_t * _1_t return _1_t_2 * _1_t * pt1 + 3 * (_1_t_2 * t * pt2 + _1_t * t2 * pt3) + t2 * t * pt4 def segmentPointAtT(seg, t): if len(seg) == 2: return linePointAtT(*seg, t) elif len(seg) == 3: return quadraticPointAtT(*seg, t) elif len(seg) == 4: return cubicPointAtT(*seg, t) raise ValueError("Unknown curve degree") # # Intersection finders # def _line_t_of_pt(s, e, pt): sx, sy = s ex, ey = e px, py = pt if abs(sx - ex) < epsilon and abs(sy - ey) < epsilon: # Line is a point! return -1 # Use the largest if abs(sx - ex) > abs(sy - ey): return (px - sx) / (ex - sx) else: return (py - sy) / (ey - sy) def _both_points_are_on_same_side_of_origin(a, b, origin): xDiff = (a[0] - origin[0]) * (b[0] - origin[0]) yDiff = (a[1] - origin[1]) * (b[1] - origin[1]) return not (xDiff <= 0.0 and yDiff <= 0.0) def lineLineIntersections(s1, e1, s2, e2): """Finds intersections between two line segments. Args: s1, e1: Coordinates of the first line as 2D tuples. s2, e2: Coordinates of the second line as 2D tuples. Returns: A list of ``Intersection`` objects, each object having ``pt``, ``t1`` and ``t2`` attributes containing the intersection point, time on first segment and time on second segment respectively. Examples:: >>> a = lineLineIntersections( (310,389), (453, 222), (289, 251), (447, 367)) >>> len(a) 1 >>> intersection = a[0] >>> intersection.pt (374.44882952482897, 313.73458370177315) >>> (intersection.t1, intersection.t2) (0.45069111555824465, 0.5408153767394238) """ s1x, s1y = s1 e1x, e1y = e1 s2x, s2y = s2 e2x, e2y = e2 if ( math.isclose(s2x, e2x) and math.isclose(s1x, e1x) and not math.isclose(s1x, s2x) ): # Parallel vertical return [] if ( math.isclose(s2y, e2y) and math.isclose(s1y, e1y) and not math.isclose(s1y, s2y) ): # Parallel horizontal return [] if math.isclose(s2x, e2x) and math.isclose(s2y, e2y): # Line segment is tiny return [] if math.isclose(s1x, e1x) and math.isclose(s1y, e1y): # Line segment is tiny return [] if math.isclose(e1x, s1x): x = s1x slope34 = (e2y - s2y) / (e2x - s2x) y = slope34 * (x - s2x) + s2y pt = (x, y) return [ Intersection( pt=pt, t1=_line_t_of_pt(s1, e1, pt), t2=_line_t_of_pt(s2, e2, pt) ) ] if math.isclose(s2x, e2x): x = s2x slope12 = (e1y - s1y) / (e1x - s1x) y = slope12 * (x - s1x) + s1y pt = (x, y) return [ Intersection( pt=pt, t1=_line_t_of_pt(s1, e1, pt), t2=_line_t_of_pt(s2, e2, pt) ) ] slope12 = (e1y - s1y) / (e1x - s1x) slope34 = (e2y - s2y) / (e2x - s2x) if math.isclose(slope12, slope34): return [] x = (slope12 * s1x - s1y - slope34 * s2x + s2y) / (slope12 - slope34) y = slope12 * (x - s1x) + s1y pt = (x, y) if _both_points_are_on_same_side_of_origin( pt, e1, s1 ) and _both_points_are_on_same_side_of_origin(pt, s2, e2): return [ Intersection( pt=pt, t1=_line_t_of_pt(s1, e1, pt), t2=_line_t_of_pt(s2, e2, pt) ) ] return [] def _alignment_transformation(segment): # Returns a transformation which aligns a segment horizontally at the # origin. Apply this transformation to curves and root-find to find # intersections with the segment. start = segment[0] end = segment[-1] angle = math.atan2(end[1] - start[1], end[0] - start[0]) return Identity.rotate(-angle).translate(-start[0], -start[1]) def _curve_line_intersections_t(curve, line): aligned_curve = _alignment_transformation(line).transformPoints(curve) if len(curve) == 3: a, b, c = calcQuadraticParameters(*aligned_curve) intersections = solveQuadratic(a[1], b[1], c[1]) elif len(curve) == 4: a, b, c, d = calcCubicParameters(*aligned_curve) intersections = solveCubic(a[1], b[1], c[1], d[1]) else: raise ValueError("Unknown curve degree") return sorted(i for i in intersections if 0.0 <= i <= 1) def curveLineIntersections(curve, line): """Finds intersections between a curve and a line. Args: curve: List of coordinates of the curve segment as 2D tuples. line: List of coordinates of the line segment as 2D tuples. Returns: A list of ``Intersection`` objects, each object having ``pt``, ``t1`` and ``t2`` attributes containing the intersection point, time on first segment and time on second segment respectively. Examples:: >>> curve = [ (100, 240), (30, 60), (210, 230), (160, 30) ] >>> line = [ (25, 260), (230, 20) ] >>> intersections = curveLineIntersections(curve, line) >>> len(intersections) 3 >>> intersections[0].pt (84.9000930760723, 189.87306176459828) """ if len(curve) == 3: pointFinder = quadraticPointAtT elif len(curve) == 4: pointFinder = cubicPointAtT else: raise ValueError("Unknown curve degree") intersections = [] for t in _curve_line_intersections_t(curve, line): pt = pointFinder(*curve, t) # Back-project the point onto the line, to avoid problems with # numerical accuracy in the case of vertical and horizontal lines line_t = _line_t_of_pt(*line, pt) pt = linePointAtT(*line, line_t) intersections.append(Intersection(pt=pt, t1=t, t2=line_t)) return intersections def _curve_bounds(c): if len(c) == 3: return calcQuadraticBounds(*c) elif len(c) == 4: return calcCubicBounds(*c) raise ValueError("Unknown curve degree") def _split_segment_at_t(c, t): if len(c) == 2: s, e = c midpoint = linePointAtT(s, e, t) return [(s, midpoint), (midpoint, e)] if len(c) == 3: return splitQuadraticAtT(*c, t) elif len(c) == 4: return splitCubicAtT(*c, t) raise ValueError("Unknown curve degree") def _curve_curve_intersections_t( curve1, curve2, precision=1e-3, range1=None, range2=None ): bounds1 = _curve_bounds(curve1) bounds2 = _curve_bounds(curve2) if not range1: range1 = (0.0, 1.0) if not range2: range2 = (0.0, 1.0) # If bounds don't intersect, go home intersects, _ = sectRect(bounds1, bounds2) if not intersects: return [] def midpoint(r): return 0.5 * (r[0] + r[1]) # If they do overlap but they're tiny, approximate if rectArea(bounds1) < precision and rectArea(bounds2) < precision: return [(midpoint(range1), midpoint(range2))] c11, c12 = _split_segment_at_t(curve1, 0.5) c11_range = (range1[0], midpoint(range1)) c12_range = (midpoint(range1), range1[1]) c21, c22 = _split_segment_at_t(curve2, 0.5) c21_range = (range2[0], midpoint(range2)) c22_range = (midpoint(range2), range2[1]) found = [] found.extend( _curve_curve_intersections_t( c11, c21, precision, range1=c11_range, range2=c21_range ) ) found.extend( _curve_curve_intersections_t( c12, c21, precision, range1=c12_range, range2=c21_range ) ) found.extend( _curve_curve_intersections_t( c11, c22, precision, range1=c11_range, range2=c22_range ) ) found.extend( _curve_curve_intersections_t( c12, c22, precision, range1=c12_range, range2=c22_range ) ) unique_key = lambda ts: (int(ts[0] / precision), int(ts[1] / precision)) seen = set() unique_values = [] for ts in found: key = unique_key(ts) if key in seen: continue seen.add(key) unique_values.append(ts) return unique_values def _is_linelike(segment): maybeline = _alignment_transformation(segment).transformPoints(segment) return all(math.isclose(p[1], 0.0) for p in maybeline) def curveCurveIntersections(curve1, curve2): """Finds intersections between a curve and a curve. Args: curve1: List of coordinates of the first curve segment as 2D tuples. curve2: List of coordinates of the second curve segment as 2D tuples. Returns: A list of ``Intersection`` objects, each object having ``pt``, ``t1`` and ``t2`` attributes containing the intersection point, time on first segment and time on second segment respectively. Examples:: >>> curve1 = [ (10,100), (90,30), (40,140), (220,220) ] >>> curve2 = [ (5,150), (180,20), (80,250), (210,190) ] >>> intersections = curveCurveIntersections(curve1, curve2) >>> len(intersections) 3 >>> intersections[0].pt (81.7831487395506, 109.88904552375288) """ if _is_linelike(curve1): line1 = curve1[0], curve1[-1] if _is_linelike(curve2): line2 = curve2[0], curve2[-1] return lineLineIntersections(*line1, *line2) else: return curveLineIntersections(curve2, line1) elif _is_linelike(curve2): line2 = curve2[0], curve2[-1] return curveLineIntersections(curve1, line2) intersection_ts = _curve_curve_intersections_t(curve1, curve2) return [ Intersection(pt=segmentPointAtT(curve1, ts[0]), t1=ts[0], t2=ts[1]) for ts in intersection_ts ] def segmentSegmentIntersections(seg1, seg2): """Finds intersections between two segments. Args: seg1: List of coordinates of the first segment as 2D tuples. seg2: List of coordinates of the second segment as 2D tuples. Returns: A list of ``Intersection`` objects, each object having ``pt``, ``t1`` and ``t2`` attributes containing the intersection point, time on first segment and time on second segment respectively. Examples:: >>> curve1 = [ (10,100), (90,30), (40,140), (220,220) ] >>> curve2 = [ (5,150), (180,20), (80,250), (210,190) ] >>> intersections = segmentSegmentIntersections(curve1, curve2) >>> len(intersections) 3 >>> intersections[0].pt (81.7831487395506, 109.88904552375288) >>> curve3 = [ (100, 240), (30, 60), (210, 230), (160, 30) ] >>> line = [ (25, 260), (230, 20) ] >>> intersections = segmentSegmentIntersections(curve3, line) >>> len(intersections) 3 >>> intersections[0].pt (84.9000930760723, 189.87306176459828) """ # Arrange by degree swapped = False if len(seg2) > len(seg1): seg2, seg1 = seg1, seg2 swapped = True if len(seg1) > 2: if len(seg2) > 2: intersections = curveCurveIntersections(seg1, seg2) else: intersections = curveLineIntersections(seg1, seg2) elif len(seg1) == 2 and len(seg2) == 2: intersections = lineLineIntersections(*seg1, *seg2) else: raise ValueError("Couldn't work out which intersection function to use") if not swapped: return intersections return [Intersection(pt=i.pt, t1=i.t2, t2=i.t1) for i in intersections] def _segmentrepr(obj): """ >>> _segmentrepr([1, [2, 3], [], [[2, [3, 4], [0.1, 2.2]]]]) '(1, (2, 3), (), ((2, (3, 4), (0.1, 2.2))))' """ try: it = iter(obj) except TypeError: return "%g" % obj else: return "(%s)" % ", ".join(_segmentrepr(x) for x in it) def printSegments(segments): """Helper for the doctests, displaying each segment in a list of segments on a single line as a tuple. """ for segment in segments: print(_segmentrepr(segment)) if __name__ == "__main__": import sys import doctest sys.exit(doctest.testmod().failed) PKaZZZ���0��fontTools/misc/classifyTools.py""" fontTools.misc.classifyTools.py -- tools for classifying things. """ class Classifier(object): """ Main Classifier object, used to classify things into similar sets. """ def __init__(self, sort=True): self._things = set() # set of all things known so far self._sets = [] # list of class sets produced so far self._mapping = {} # map from things to their class set self._dirty = False self._sort = sort def add(self, set_of_things): """ Add a set to the classifier. Any iterable is accepted. """ if not set_of_things: return self._dirty = True things, sets, mapping = self._things, self._sets, self._mapping s = set(set_of_things) intersection = s.intersection(things) # existing things s.difference_update(intersection) # new things difference = s del s # Add new class for new things if difference: things.update(difference) sets.append(difference) for thing in difference: mapping[thing] = difference del difference while intersection: # Take one item and process the old class it belongs to old_class = mapping[next(iter(intersection))] old_class_intersection = old_class.intersection(intersection) # Update old class to remove items from new set old_class.difference_update(old_class_intersection) # Remove processed items from todo list intersection.difference_update(old_class_intersection) # Add new class for the intersection with old class sets.append(old_class_intersection) for thing in old_class_intersection: mapping[thing] = old_class_intersection del old_class_intersection def update(self, list_of_sets): """ Add a a list of sets to the classifier. Any iterable of iterables is accepted. """ for s in list_of_sets: self.add(s) def _process(self): if not self._dirty: return # Do any deferred processing sets = self._sets self._sets = [s for s in sets if s] if self._sort: self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s))) self._dirty = False # Output methods def getThings(self): """Returns the set of all things known so far. The return value belongs to the Classifier object and should NOT be modified while the classifier is still in use. """ self._process() return self._things def getMapping(self): """Returns the mapping from things to their class set. The return value belongs to the Classifier object and should NOT be modified while the classifier is still in use. """ self._process() return self._mapping def getClasses(self): """Returns the list of class sets. The return value belongs to the Classifier object and should NOT be modified while the classifier is still in use. """ self._process() return self._sets def classify(list_of_sets, sort=True): """ Takes a iterable of iterables (list of sets from here on; but any iterable works.), and returns the smallest list of sets such that each set, is either a subset, or is disjoint from, each of the input sets. In other words, this function classifies all the things present in any of the input sets, into similar classes, based on which sets things are a member of. If sort=True, return class sets are sorted by decreasing size and their natural sort order within each class size. Otherwise, class sets are returned in the order that they were identified, which is generally not significant. >>> classify([]) == ([], {}) True >>> classify([[]]) == ([], {}) True >>> classify([[], []]) == ([], {}) True >>> classify([[1]]) == ([{1}], {1: {1}}) True >>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}}) True >>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) True >>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) True >>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}}) True >>> classify([[1,2],[2,4,5]]) == ( ... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) True >>> classify([[1,2],[2,4,5]], sort=False) == ( ... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) True >>> classify([[1,2,9],[2,4,5]], sort=False) == ( ... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5}, ... 9: {1, 9}}) True >>> classify([[1,2,9,15],[2,4,5]], sort=False) == ( ... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5}, ... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}}) True >>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False) >>> set([frozenset(c) for c in classes]) == set( ... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})]) True >>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}} True """ classifier = Classifier(sort=sort) classifier.update(list_of_sets) return classifier.getClasses(), classifier.getMapping() if __name__ == "__main__": import sys, doctest sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed) PKaZZZ�/dFFfontTools/misc/cliTools.py"""Collection of utilities for command-line interfaces and console scripts.""" import os import re numberAddedRE = re.compile(r"#\d+$") def makeOutputFileName( input, outputDir=None, extension=None, overWrite=False, suffix="" ): """Generates a suitable file name for writing output. Often tools will want to take a file, do some kind of transformation to it, and write it out again. This function determines an appropriate name for the output file, through one or more of the following steps: - changing the output directory - appending suffix before file extension - replacing the file extension - suffixing the filename with a number (``#1``, ``#2``, etc.) to avoid overwriting an existing file. Args: input: Name of input file. outputDir: Optionally, a new directory to write the file into. suffix: Optionally, a string suffix is appended to file name before the extension. extension: Optionally, a replacement for the current file extension. overWrite: Overwriting an existing file is permitted if true; if false and the proposed filename exists, a new name will be generated by adding an appropriate number suffix. Returns: str: Suitable output filename """ dirName, fileName = os.path.split(input) fileName, ext = os.path.splitext(fileName) if outputDir: dirName = outputDir fileName = numberAddedRE.split(fileName)[0] if extension is None: extension = os.path.splitext(input)[1] output = os.path.join(dirName, fileName + suffix + extension) n = 1 if not overWrite: while os.path.exists(output): output = os.path.join( dirName, fileName + suffix + "#" + repr(n) + extension ) n += 1 return output PKaZZZ!V���+�+fontTools/misc/configTools.py""" Code of the config system; not related to fontTools or fonts in particular. The options that are specific to fontTools are in :mod:`fontTools.config`. To create your own config system, you need to create an instance of :class:`Options`, and a subclass of :class:`AbstractConfig` with its ``options`` class variable set to your instance of Options. """ from __future__ import annotations import logging from dataclasses import dataclass from typing import ( Any, Callable, ClassVar, Dict, Iterable, Mapping, MutableMapping, Optional, Set, Union, ) log = logging.getLogger(__name__) __all__ = [ "AbstractConfig", "ConfigAlreadyRegisteredError", "ConfigError", "ConfigUnknownOptionError", "ConfigValueParsingError", "ConfigValueValidationError", "Option", "Options", ] class ConfigError(Exception): """Base exception for the config module.""" class ConfigAlreadyRegisteredError(ConfigError): """Raised when a module tries to register a configuration option that already exists. Should not be raised too much really, only when developing new fontTools modules. """ def __init__(self, name): super().__init__(f"Config option {name} is already registered.") class ConfigValueParsingError(ConfigError): """Raised when a configuration value cannot be parsed.""" def __init__(self, name, value): super().__init__( f"Config option {name}: value cannot be parsed (given {repr(value)})" ) class ConfigValueValidationError(ConfigError): """Raised when a configuration value cannot be validated.""" def __init__(self, name, value): super().__init__( f"Config option {name}: value is invalid (given {repr(value)})" ) class ConfigUnknownOptionError(ConfigError): """Raised when a configuration option is unknown.""" def __init__(self, option_or_name): name = ( f"'{option_or_name.name}' (id={id(option_or_name)})>" if isinstance(option_or_name, Option) else f"'{option_or_name}'" ) super().__init__(f"Config option {name} is unknown") # eq=False because Options are unique, not fungible objects @dataclass(frozen=True, eq=False) class Option: name: str """Unique name identifying the option (e.g. package.module:MY_OPTION).""" help: str """Help text for this option.""" default: Any """Default value for this option.""" parse: Callable[[str], Any] """Turn input (e.g. string) into proper type. Only when reading from file.""" validate: Optional[Callable[[Any], bool]] = None """Return true if the given value is an acceptable value.""" @staticmethod def parse_optional_bool(v: str) -> Optional[bool]: s = str(v).lower() if s in {"0", "no", "false"}: return False if s in {"1", "yes", "true"}: return True if s in {"auto", "none"}: return None raise ValueError("invalid optional bool: {v!r}") @staticmethod def validate_optional_bool(v: Any) -> bool: return v is None or isinstance(v, bool) class Options(Mapping): """Registry of available options for a given config system. Define new options using the :meth:`register()` method. Access existing options using the Mapping interface. """ __options: Dict[str, Option] def __init__(self, other: "Options" = None) -> None: self.__options = {} if other is not None: for option in other.values(): self.register_option(option) def register( self, name: str, help: str, default: Any, parse: Callable[[str], Any], validate: Optional[Callable[[Any], bool]] = None, ) -> Option: """Create and register a new option.""" return self.register_option(Option(name, help, default, parse, validate)) def register_option(self, option: Option) -> Option: """Register a new option.""" name = option.name if name in self.__options: raise ConfigAlreadyRegisteredError(name) self.__options[name] = option return option def is_registered(self, option: Option) -> bool: """Return True if the same option object is already registered.""" return self.__options.get(option.name) is option def __getitem__(self, key: str) -> Option: return self.__options.__getitem__(key) def __iter__(self) -> Iterator[str]: return self.__options.__iter__() def __len__(self) -> int: return self.__options.__len__() def __repr__(self) -> str: return ( f"{self.__class__.__name__}({{\n" + "".join( f" {k!r}: Option(default={v.default!r}, ...),\n" for k, v in self.__options.items() ) + "})" ) _USE_GLOBAL_DEFAULT = object() class AbstractConfig(MutableMapping): """ Create a set of config values, optionally pre-filled with values from the given dictionary or pre-existing config object. The class implements the MutableMapping protocol keyed by option name (`str`). For convenience its methods accept either Option or str as the key parameter. .. seealso:: :meth:`set()` This config class is abstract because it needs its ``options`` class var to be set to an instance of :class:`Options` before it can be instanciated and used. .. code:: python class MyConfig(AbstractConfig): options = Options() MyConfig.register_option( "test:option_name", "This is an option", 0, int, lambda v: isinstance(v, int)) cfg = MyConfig({"test:option_name": 10}) """ options: ClassVar[Options] @classmethod def register_option( cls, name: str, help: str, default: Any, parse: Callable[[str], Any], validate: Optional[Callable[[Any], bool]] = None, ) -> Option: """Register an available option in this config system.""" return cls.options.register( name, help=help, default=default, parse=parse, validate=validate ) _values: Dict[str, Any] def __init__( self, values: Union[AbstractConfig, Dict[Union[Option, str], Any]] = {}, parse_values: bool = False, skip_unknown: bool = False, ): self._values = {} values_dict = values._values if isinstance(values, AbstractConfig) else values for name, value in values_dict.items(): self.set(name, value, parse_values, skip_unknown) def _resolve_option(self, option_or_name: Union[Option, str]) -> Option: if isinstance(option_or_name, Option): option = option_or_name if not self.options.is_registered(option): raise ConfigUnknownOptionError(option) return option elif isinstance(option_or_name, str): name = option_or_name try: return self.options[name] except KeyError: raise ConfigUnknownOptionError(name) else: raise TypeError( "expected Option or str, found " f"{type(option_or_name).__name__}: {option_or_name!r}" ) def set( self, option_or_name: Union[Option, str], value: Any, parse_values: bool = False, skip_unknown: bool = False, ): """Set the value of an option. Args: * `option_or_name`: an `Option` object or its name (`str`). * `value`: the value to be assigned to given option. * `parse_values`: parse the configuration value from a string into its proper type, as per its `Option` object. The default behavior is to raise `ConfigValueValidationError` when the value is not of the right type. Useful when reading options from a file type that doesn't support as many types as Python. * `skip_unknown`: skip unknown configuration options. The default behaviour is to raise `ConfigUnknownOptionError`. Useful when reading options from a configuration file that has extra entries (e.g. for a later version of fontTools) """ try: option = self._resolve_option(option_or_name) except ConfigUnknownOptionError as e: if skip_unknown: log.debug(str(e)) return raise # Can be useful if the values come from a source that doesn't have # strict typing (.ini file? Terminal input?) if parse_values: try: value = option.parse(value) except Exception as e: raise ConfigValueParsingError(option.name, value) from e if option.validate is not None and not option.validate(value): raise ConfigValueValidationError(option.name, value) self._values[option.name] = value def get( self, option_or_name: Union[Option, str], default: Any = _USE_GLOBAL_DEFAULT ) -> Any: """ Get the value of an option. The value which is returned is the first provided among: 1. a user-provided value in the options's ``self._values`` dict 2. a caller-provided default value to this method call 3. the global default for the option provided in ``fontTools.config`` This is to provide the ability to migrate progressively from config options passed as arguments to fontTools APIs to config options read from the current TTFont, e.g. .. code:: python def fontToolsAPI(font, some_option): value = font.cfg.get("someLib.module:SOME_OPTION", some_option) # use value That way, the function will work the same for users of the API that still pass the option to the function call, but will favour the new config mechanism if the given font specifies a value for that option. """ option = self._resolve_option(option_or_name) if option.name in self._values: return self._values[option.name] if default is not _USE_GLOBAL_DEFAULT: return default return option.default def copy(self): return self.__class__(self._values) def __getitem__(self, option_or_name: Union[Option, str]) -> Any: return self.get(option_or_name) def __setitem__(self, option_or_name: Union[Option, str], value: Any) -> None: return self.set(option_or_name, value) def __delitem__(self, option_or_name: Union[Option, str]) -> None: option = self._resolve_option(option_or_name) del self._values[option.name] def __iter__(self) -> Iterable[str]: return self._values.__iter__() def __len__(self) -> int: return len(self._values) def __repr__(self) -> str: return f"{self.__class__.__name__}({repr(self._values)})" PKaZZZ��bت�fontTools/misc/cython.py""" Exports a no-op 'cython' namespace similar to https://github.com/cython/cython/blob/master/Cython/Shadow.py This allows to optionally compile @cython decorated functions (when cython is available at built time), or run the same code as pure-python, without runtime dependency on cython module. We only define the symbols that we use. E.g. see fontTools.cu2qu """ from types import SimpleNamespace def _empty_decorator(x): return x compiled = False for name in ("double", "complex", "int"): globals()[name] = None for name in ("cfunc", "inline"): globals()[name] = _empty_decorator locals = lambda **_: _empty_decorator returns = lambda _: _empty_decorator PKaZZZBu�q q fontTools/misc/dictTools.py"""Misc dict tools.""" __all__ = ["hashdict"] # https://stackoverflow.com/questions/1151658/python-hashable-dicts class hashdict(dict): """ hashable dict implementation, suitable for use as a key into other dicts. >>> h1 = hashdict({"apples": 1, "bananas":2}) >>> h2 = hashdict({"bananas": 3, "mangoes": 5}) >>> h1+h2 hashdict(apples=1, bananas=3, mangoes=5) >>> d1 = {} >>> d1[h1] = "salad" >>> d1[h1] 'salad' >>> d1[h2] Traceback (most recent call last): ... KeyError: hashdict(bananas=3, mangoes=5) based on answers from http://stackoverflow.com/questions/1151658/python-hashable-dicts """ def __key(self): return tuple(sorted(self.items())) def __repr__(self): return "{0}({1})".format( self.__class__.__name__, ", ".join("{0}={1}".format(str(i[0]), repr(i[1])) for i in self.__key()), ) def __hash__(self): return hash(self.__key()) def __setitem__(self, key, value): raise TypeError( "{0} does not support item assignment".format(self.__class__.__name__) ) def __delitem__(self, key): raise TypeError( "{0} does not support item assignment".format(self.__class__.__name__) ) def clear(self): raise TypeError( "{0} does not support item assignment".format(self.__class__.__name__) ) def pop(self, *args, **kwargs): raise TypeError( "{0} does not support item assignment".format(self.__class__.__name__) ) def popitem(self, *args, **kwargs): raise TypeError( "{0} does not support item assignment".format(self.__class__.__name__) ) def setdefault(self, *args, **kwargs): raise TypeError( "{0} does not support item assignment".format(self.__class__.__name__) ) def update(self, *args, **kwargs): raise TypeError( "{0} does not support item assignment".format(self.__class__.__name__) ) # update is not ok because it mutates the object # __add__ is ok because it creates a new object # while the new object is under construction, it's ok to mutate it def __add__(self, right): result = hashdict(self) dict.update(result, right) return result PKaZZZ?�f�  fontTools/misc/eexec.py""" PostScript Type 1 fonts make use of two types of encryption: charstring encryption and ``eexec`` encryption. Charstring encryption is used for the charstrings themselves, while ``eexec`` is used to encrypt larger sections of the font program, such as the ``Private`` and ``CharStrings`` dictionaries. Despite the different names, the algorithm is the same, although ``eexec`` encryption uses a fixed initial key R=55665. The algorithm uses cipher feedback, meaning that the ciphertext is used to modify the key. Because of this, the routines in this module return the new key at the end of the operation. """ from fontTools.misc.textTools import bytechr, bytesjoin, byteord def _decryptChar(cipher, R): cipher = byteord(cipher) plain = ((cipher ^ (R >> 8))) & 0xFF R = ((cipher + R) * 52845 + 22719) & 0xFFFF return bytechr(plain), R def _encryptChar(plain, R): plain = byteord(plain) cipher = ((plain ^ (R >> 8))) & 0xFF R = ((cipher + R) * 52845 + 22719) & 0xFFFF return bytechr(cipher), R def decrypt(cipherstring, R): r""" Decrypts a string using the Type 1 encryption algorithm. Args: cipherstring: String of ciphertext. R: Initial key. Returns: decryptedStr: Plaintext string. R: Output key for subsequent decryptions. Examples:: >>> testStr = b"\0\0asdadads asds\265" >>> decryptedStr, R = decrypt(testStr, 12321) >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' True >>> R == 36142 True """ plainList = [] for cipher in cipherstring: plain, R = _decryptChar(cipher, R) plainList.append(plain) plainstring = bytesjoin(plainList) return plainstring, int(R) def encrypt(plainstring, R): r""" Encrypts a string using the Type 1 encryption algorithm. Note that the algorithm as described in the Type 1 specification requires the plaintext to be prefixed with a number of random bytes. (For ``eexec`` the number of random bytes is set to 4.) This routine does *not* add the random prefix to its input. Args: plainstring: String of plaintext. R: Initial key. Returns: cipherstring: Ciphertext string. R: Output key for subsequent encryptions. Examples:: >>> testStr = b"\0\0asdadads asds\265" >>> decryptedStr, R = decrypt(testStr, 12321) >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' True >>> R == 36142 True >>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' >>> encryptedStr, R = encrypt(testStr, 12321) >>> encryptedStr == b"\0\0asdadads asds\265" True >>> R == 36142 True """ cipherList = [] for plain in plainstring: cipher, R = _encryptChar(plain, R) cipherList.append(cipher) cipherstring = bytesjoin(cipherList) return cipherstring, int(R) def hexString(s): import binascii return binascii.hexlify(s) def deHexString(h): import binascii h = bytesjoin(h.split()) return binascii.unhexlify(h) if __name__ == "__main__": import sys import doctest sys.exit(doctest.testmod().failed) PKaZZZ��?fontTools/misc/encodingTools.py"""fontTools.misc.encodingTools.py -- tools for working with OpenType encodings. """ import fontTools.encodings.codecs # Map keyed by platformID, then platEncID, then possibly langID _encodingMap = { 0: { # Unicode 0: "utf_16_be", 1: "utf_16_be", 2: "utf_16_be", 3: "utf_16_be", 4: "utf_16_be", 5: "utf_16_be", 6: "utf_16_be", }, 1: { # Macintosh # See # https://github.com/fonttools/fonttools/issues/236 0: { # Macintosh, platEncID==0, keyed by langID 15: "mac_iceland", 17: "mac_turkish", 18: "mac_croatian", 24: "mac_latin2", 25: "mac_latin2", 26: "mac_latin2", 27: "mac_latin2", 28: "mac_latin2", 36: "mac_latin2", 37: "mac_romanian", 38: "mac_latin2", 39: "mac_latin2", 40: "mac_latin2", Ellipsis: "mac_roman", # Other }, 1: "x_mac_japanese_ttx", 2: "x_mac_trad_chinese_ttx", 3: "x_mac_korean_ttx", 6: "mac_greek", 7: "mac_cyrillic", 25: "x_mac_simp_chinese_ttx", 29: "mac_latin2", 35: "mac_turkish", 37: "mac_iceland", }, 2: { # ISO 0: "ascii", 1: "utf_16_be", 2: "latin1", }, 3: { # Microsoft 0: "utf_16_be", 1: "utf_16_be", 2: "shift_jis", 3: "gb2312", 4: "big5", 5: "euc_kr", 6: "johab", 10: "utf_16_be", }, } def getEncoding(platformID, platEncID, langID, default=None): """Returns the Python encoding name for OpenType platformID/encodingID/langID triplet. If encoding for these values is not known, by default None is returned. That can be overriden by passing a value to the default argument. """ encoding = _encodingMap.get(platformID, {}).get(platEncID, default) if isinstance(encoding, dict): encoding = encoding.get(langID, encoding[Ellipsis]) return encoding PKaZZZ8�ha�B�BfontTools/misc/etree.py"""Shim module exporting the same ElementTree API for lxml and xml.etree backends. When lxml is installed, it is automatically preferred over the built-in xml.etree module. On Python 2.7, the cElementTree module is preferred over the pure-python ElementTree module. Besides exporting a unified interface, this also defines extra functions or subclasses built-in ElementTree classes to add features that are only availble in lxml, like OrderedDict for attributes, pretty_print and iterwalk. """ from fontTools.misc.textTools import tostr XML_DECLARATION = """<?xml version='1.0' encoding='%s'?>""" __all__ = [ # public symbols "Comment", "dump", "Element", "ElementTree", "fromstring", "fromstringlist", "iselement", "iterparse", "parse", "ParseError", "PI", "ProcessingInstruction", "QName", "SubElement", "tostring", "tostringlist", "TreeBuilder", "XML", "XMLParser", "register_namespace", ] try: from lxml.etree import * _have_lxml = True except ImportError: try: from xml.etree.cElementTree import * # the cElementTree version of XML function doesn't support # the optional 'parser' keyword argument from xml.etree.ElementTree import XML except ImportError: # pragma: no cover from xml.etree.ElementTree import * _have_lxml = False import sys # dict is always ordered in python >= 3.6 and on pypy PY36 = sys.version_info >= (3, 6) try: import __pypy__ except ImportError: __pypy__ = None _dict_is_ordered = bool(PY36 or __pypy__) del PY36, __pypy__ if _dict_is_ordered: _Attrib = dict else: from collections import OrderedDict as _Attrib if isinstance(Element, type): _Element = Element else: # in py27, cElementTree.Element cannot be subclassed, so # we need to import the pure-python class from xml.etree.ElementTree import Element as _Element class Element(_Element): """Element subclass that keeps the order of attributes.""" def __init__(self, tag, attrib=_Attrib(), **extra): super(Element, self).__init__(tag) self.attrib = _Attrib() if attrib: self.attrib.update(attrib) if extra: self.attrib.update(extra) def SubElement(parent, tag, attrib=_Attrib(), **extra): """Must override SubElement as well otherwise _elementtree.SubElement fails if 'parent' is a subclass of Element object. """ element = parent.__class__(tag, attrib, **extra) parent.append(element) return element def _iterwalk(element, events, tag): include = tag is None or element.tag == tag if include and "start" in events: yield ("start", element) for e in element: for item in _iterwalk(e, events, tag): yield item if include: yield ("end", element) def iterwalk(element_or_tree, events=("end",), tag=None): """A tree walker that generates events from an existing tree as if it was parsing XML data with iterparse(). Drop-in replacement for lxml.etree.iterwalk. """ if iselement(element_or_tree): element = element_or_tree else: element = element_or_tree.getroot() if tag == "*": tag = None for item in _iterwalk(element, events, tag): yield item _ElementTree = ElementTree class ElementTree(_ElementTree): """ElementTree subclass that adds 'pretty_print' and 'doctype' arguments to the 'write' method. Currently these are only supported for the default XML serialization 'method', and not also for "html" or "text", for these are delegated to the base class. """ def write( self, file_or_filename, encoding=None, xml_declaration=False, method=None, doctype=None, pretty_print=False, ): if method and method != "xml": # delegate to super-class super(ElementTree, self).write( file_or_filename, encoding=encoding, xml_declaration=xml_declaration, method=method, ) return if encoding is not None and encoding.lower() == "unicode": if xml_declaration: raise ValueError( "Serialisation to unicode must not request an XML declaration" ) write_declaration = False encoding = "unicode" elif xml_declaration is None: # by default, write an XML declaration only for non-standard encodings write_declaration = encoding is not None and encoding.upper() not in ( "ASCII", "UTF-8", "UTF8", "US-ASCII", ) else: write_declaration = xml_declaration if encoding is None: encoding = "ASCII" if pretty_print: # NOTE this will modify the tree in-place _indent(self._root) with _get_writer(file_or_filename, encoding) as write: if write_declaration: write(XML_DECLARATION % encoding.upper()) if pretty_print: write("\n") if doctype: write(_tounicode(doctype)) if pretty_print: write("\n") qnames, namespaces = _namespaces(self._root) _serialize_xml(write, self._root, qnames, namespaces) import io def tostring( element, encoding=None, xml_declaration=None, method=None, doctype=None, pretty_print=False, ): """Custom 'tostring' function that uses our ElementTree subclass, with pretty_print support. """ stream = io.StringIO() if encoding == "unicode" else io.BytesIO() ElementTree(element).write( stream, encoding=encoding, xml_declaration=xml_declaration, method=method, doctype=doctype, pretty_print=pretty_print, ) return stream.getvalue() # serialization support import re # Valid XML strings can include any Unicode character, excluding control # characters, the surrogate blocks, FFFE, and FFFF: # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] # Here we reversed the pattern to match only the invalid characters. # For the 'narrow' python builds supporting only UCS-2, which represent # characters beyond BMP as UTF-16 surrogate pairs, we need to pass through # the surrogate block. I haven't found a more elegant solution... UCS2 = sys.maxunicode < 0x10FFFF if UCS2: _invalid_xml_string = re.compile( "[\u0000-\u0008\u000B-\u000C\u000E-\u001F\uFFFE-\uFFFF]" ) else: _invalid_xml_string = re.compile( "[\u0000-\u0008\u000B-\u000C\u000E-\u001F\uD800-\uDFFF\uFFFE-\uFFFF]" ) def _tounicode(s): """Test if a string is valid user input and decode it to unicode string using ASCII encoding if it's a bytes string. Reject all bytes/unicode input that contains non-XML characters. Reject all bytes input that contains non-ASCII characters. """ try: s = tostr(s, encoding="ascii", errors="strict") except UnicodeDecodeError: raise ValueError( "Bytes strings can only contain ASCII characters. " "Use unicode strings for non-ASCII characters." ) except AttributeError: _raise_serialization_error(s) if s and _invalid_xml_string.search(s): raise ValueError( "All strings must be XML compatible: Unicode or ASCII, " "no NULL bytes or control characters" ) return s import contextlib @contextlib.contextmanager def _get_writer(file_or_filename, encoding): # returns text write method and release all resources after using try: write = file_or_filename.write except AttributeError: # file_or_filename is a file name f = open( file_or_filename, "w", encoding="utf-8" if encoding == "unicode" else encoding, errors="xmlcharrefreplace", ) with f: yield f.write else: # file_or_filename is a file-like object # encoding determines if it is a text or binary writer if encoding == "unicode": # use a text writer as is yield write else: # wrap a binary writer with TextIOWrapper detach_buffer = False if isinstance(file_or_filename, io.BufferedIOBase): buf = file_or_filename elif isinstance(file_or_filename, io.RawIOBase): buf = io.BufferedWriter(file_or_filename) detach_buffer = True else: # This is to handle passed objects that aren't in the # IOBase hierarchy, but just have a write method buf = io.BufferedIOBase() buf.writable = lambda: True buf.write = write try: # TextIOWrapper uses this methods to determine # if BOM (for UTF-16, etc) should be added buf.seekable = file_or_filename.seekable buf.tell = file_or_filename.tell except AttributeError: pass wrapper = io.TextIOWrapper( buf, encoding=encoding, errors="xmlcharrefreplace", newline="\n", ) try: yield wrapper.write finally: # Keep the original file open when the TextIOWrapper and # the BufferedWriter are destroyed wrapper.detach() if detach_buffer: buf.detach() from xml.etree.ElementTree import _namespace_map def _namespaces(elem): # identify namespaces used in this tree # maps qnames to *encoded* prefix:local names qnames = {None: None} # maps uri:s to prefixes namespaces = {} def add_qname(qname): # calculate serialized qname representation try: qname = _tounicode(qname) if qname[:1] == "{": uri, tag = qname[1:].rsplit("}", 1) prefix = namespaces.get(uri) if prefix is None: prefix = _namespace_map.get(uri) if prefix is None: prefix = "ns%d" % len(namespaces) else: prefix = _tounicode(prefix) if prefix != "xml": namespaces[uri] = prefix if prefix: qnames[qname] = "%s:%s" % (prefix, tag) else: qnames[qname] = tag # default element else: qnames[qname] = qname except TypeError: _raise_serialization_error(qname) # populate qname and namespaces table for elem in elem.iter(): tag = elem.tag if isinstance(tag, QName): if tag.text not in qnames: add_qname(tag.text) elif isinstance(tag, str): if tag not in qnames: add_qname(tag) elif tag is not None and tag is not Comment and tag is not PI: _raise_serialization_error(tag) for key, value in elem.items(): if isinstance(key, QName): key = key.text if key not in qnames: add_qname(key) if isinstance(value, QName) and value.text not in qnames: add_qname(value.text) text = elem.text if isinstance(text, QName) and text.text not in qnames: add_qname(text.text) return qnames, namespaces def _serialize_xml(write, elem, qnames, namespaces, **kwargs): tag = elem.tag text = elem.text if tag is Comment: write("<!--%s-->" % _tounicode(text)) elif tag is ProcessingInstruction: write("<?%s?>" % _tounicode(text)) else: tag = qnames[_tounicode(tag) if tag is not None else None] if tag is None: if text: write(_escape_cdata(text)) for e in elem: _serialize_xml(write, e, qnames, None) else: write("<" + tag) if namespaces: for uri, prefix in sorted( namespaces.items(), key=lambda x: x[1] ): # sort on prefix if prefix: prefix = ":" + prefix write(' xmlns%s="%s"' % (prefix, _escape_attrib(uri))) attrs = elem.attrib if attrs: # try to keep existing attrib order if len(attrs) <= 1 or type(attrs) is _Attrib: items = attrs.items() else: # if plain dict, use lexical order items = sorted(attrs.items()) for k, v in items: if isinstance(k, QName): k = _tounicode(k.text) else: k = _tounicode(k) if isinstance(v, QName): v = qnames[_tounicode(v.text)] else: v = _escape_attrib(v) write(' %s="%s"' % (qnames[k], v)) if text is not None or len(elem): write(">") if text: write(_escape_cdata(text)) for e in elem: _serialize_xml(write, e, qnames, None) write("</" + tag + ">") else: write("/>") if elem.tail: write(_escape_cdata(elem.tail)) def _raise_serialization_error(text): raise TypeError("cannot serialize %r (type %s)" % (text, type(text).__name__)) def _escape_cdata(text): # escape character data try: text = _tounicode(text) # it's worth avoiding do-nothing calls for short strings if "&" in text: text = text.replace("&", "&amp;") if "<" in text: text = text.replace("<", "&lt;") if ">" in text: text = text.replace(">", "&gt;") return text except (TypeError, AttributeError): _raise_serialization_error(text) def _escape_attrib(text): # escape attribute value try: text = _tounicode(text) if "&" in text: text = text.replace("&", "&amp;") if "<" in text: text = text.replace("<", "&lt;") if ">" in text: text = text.replace(">", "&gt;") if '"' in text: text = text.replace('"', "&quot;") if "\n" in text: text = text.replace("\n", "&#10;") return text except (TypeError, AttributeError): _raise_serialization_error(text) def _indent(elem, level=0): # From http://effbot.org/zone/element-lib.htm#prettyprint i = "\n" + level * " " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: _indent(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i PKaZZZ�h�2  fontTools/misc/filenames.py""" This module implements the algorithm for converting between a "user name" - something that a user can choose arbitrarily inside a font editor - and a file name suitable for use in a wide range of operating systems and filesystems. The `UFO 3 specification <http://unifiedfontobject.org/versions/ufo3/conventions/>`_ provides an example of an algorithm for such conversion, which avoids illegal characters, reserved file names, ambiguity between upper- and lower-case characters, and clashes with existing files. This code was originally copied from `ufoLib <https://github.com/unified-font-object/ufoLib/blob/8747da7/Lib/ufoLib/filenames.py>`_ by Tal Leming and is copyright (c) 2005-2016, The RoboFab Developers: - Erik van Blokland - Tal Leming - Just van Rossum """ illegalCharacters = r"\" * + / : < > ? [ \ ] | \0".split(" ") illegalCharacters += [chr(i) for i in range(1, 32)] illegalCharacters += [chr(0x7F)] reservedFileNames = "CON PRN AUX CLOCK$ NUL A:-Z: COM1".lower().split(" ") reservedFileNames += "LPT1 LPT2 LPT3 COM2 COM3 COM4".lower().split(" ") maxFileNameLength = 255 class NameTranslationError(Exception): pass def userNameToFileName(userName, existing=[], prefix="", suffix=""): """Converts from a user name to a file name. Takes care to avoid illegal characters, reserved file names, ambiguity between upper- and lower-case characters, and clashes with existing files. Args: userName (str): The input file name. existing: A case-insensitive list of all existing file names. prefix: Prefix to be prepended to the file name. suffix: Suffix to be appended to the file name. Returns: A suitable filename. Raises: NameTranslationError: If no suitable name could be generated. Examples:: >>> userNameToFileName("a") == "a" True >>> userNameToFileName("A") == "A_" True >>> userNameToFileName("AE") == "A_E_" True >>> userNameToFileName("Ae") == "A_e" True >>> userNameToFileName("ae") == "ae" True >>> userNameToFileName("aE") == "aE_" True >>> userNameToFileName("a.alt") == "a.alt" True >>> userNameToFileName("A.alt") == "A_.alt" True >>> userNameToFileName("A.Alt") == "A_.A_lt" True >>> userNameToFileName("A.aLt") == "A_.aL_t" True >>> userNameToFileName(u"A.alT") == "A_.alT_" True >>> userNameToFileName("T_H") == "T__H_" True >>> userNameToFileName("T_h") == "T__h" True >>> userNameToFileName("t_h") == "t_h" True >>> userNameToFileName("F_F_I") == "F__F__I_" True >>> userNameToFileName("f_f_i") == "f_f_i" True >>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash" True >>> userNameToFileName(".notdef") == "_notdef" True >>> userNameToFileName("con") == "_con" True >>> userNameToFileName("CON") == "C_O_N_" True >>> userNameToFileName("con.alt") == "_con.alt" True >>> userNameToFileName("alt.con") == "alt._con" True """ # the incoming name must be a str if not isinstance(userName, str): raise ValueError("The value for userName must be a string.") # establish the prefix and suffix lengths prefixLength = len(prefix) suffixLength = len(suffix) # replace an initial period with an _ # if no prefix is to be added if not prefix and userName[0] == ".": userName = "_" + userName[1:] # filter the user name filteredUserName = [] for character in userName: # replace illegal characters with _ if character in illegalCharacters: character = "_" # add _ to all non-lower characters elif character != character.lower(): character += "_" filteredUserName.append(character) userName = "".join(filteredUserName) # clip to 255 sliceLength = maxFileNameLength - prefixLength - suffixLength userName = userName[:sliceLength] # test for illegal files names parts = [] for part in userName.split("."): if part.lower() in reservedFileNames: part = "_" + part parts.append(part) userName = ".".join(parts) # test for clash fullName = prefix + userName + suffix if fullName.lower() in existing: fullName = handleClash1(userName, existing, prefix, suffix) # finished return fullName def handleClash1(userName, existing=[], prefix="", suffix=""): """ existing should be a case-insensitive list of all existing file names. >>> prefix = ("0" * 5) + "." >>> suffix = "." + ("0" * 10) >>> existing = ["a" * 5] >>> e = list(existing) >>> handleClash1(userName="A" * 5, existing=e, ... prefix=prefix, suffix=suffix) == ( ... '00000.AAAAA000000000000001.0000000000') True >>> e = list(existing) >>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix) >>> handleClash1(userName="A" * 5, existing=e, ... prefix=prefix, suffix=suffix) == ( ... '00000.AAAAA000000000000002.0000000000') True >>> e = list(existing) >>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix) >>> handleClash1(userName="A" * 5, existing=e, ... prefix=prefix, suffix=suffix) == ( ... '00000.AAAAA000000000000001.0000000000') True """ # if the prefix length + user name length + suffix length + 15 is at # or past the maximum length, silce 15 characters off of the user name prefixLength = len(prefix) suffixLength = len(suffix) if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength: l = prefixLength + len(userName) + suffixLength + 15 sliceLength = maxFileNameLength - l userName = userName[:sliceLength] finalName = None # try to add numbers to create a unique name counter = 1 while finalName is None: name = userName + str(counter).zfill(15) fullName = prefix + name + suffix if fullName.lower() not in existing: finalName = fullName break else: counter += 1 if counter >= 999999999999999: break # if there is a clash, go to the next fallback if finalName is None: finalName = handleClash2(existing, prefix, suffix) # finished return finalName def handleClash2(existing=[], prefix="", suffix=""): """ existing should be a case-insensitive list of all existing file names. >>> prefix = ("0" * 5) + "." >>> suffix = "." + ("0" * 10) >>> existing = [prefix + str(i) + suffix for i in range(100)] >>> e = list(existing) >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( ... '00000.100.0000000000') True >>> e = list(existing) >>> e.remove(prefix + "1" + suffix) >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( ... '00000.1.0000000000') True >>> e = list(existing) >>> e.remove(prefix + "2" + suffix) >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( ... '00000.2.0000000000') True """ # calculate the longest possible string maxLength = maxFileNameLength - len(prefix) - len(suffix) maxValue = int("9" * maxLength) # try to find a number finalName = None counter = 1 while finalName is None: fullName = prefix + str(counter) + suffix if fullName.lower() not in existing: finalName = fullName break else: counter += 1 if counter >= maxValue: break # raise an error if nothing has been found if finalName is None: raise NameTranslationError("No unique name could be found.") # finished return finalName if __name__ == "__main__": import doctest import sys sys.exit(doctest.testmod().failed) PKaZZZp�:���fontTools/misc/fixedTools.py""" The `OpenType specification <https://docs.microsoft.com/en-us/typography/opentype/spec/otff#data-types>`_ defines two fixed-point data types: ``Fixed`` A 32-bit signed fixed-point number with a 16 bit twos-complement magnitude component and 16 fractional bits. ``F2DOT14`` A 16-bit signed fixed-point number with a 2 bit twos-complement magnitude component and 14 fractional bits. To support reading and writing data with these data types, this module provides functions for converting between fixed-point, float and string representations. .. data:: MAX_F2DOT14 The maximum value that can still fit in an F2Dot14. (1.99993896484375) """ from .roundTools import otRound, nearestMultipleShortestRepr import logging log = logging.getLogger(__name__) __all__ = [ "MAX_F2DOT14", "fixedToFloat", "floatToFixed", "floatToFixedToFloat", "floatToFixedToStr", "fixedToStr", "strToFixed", "strToFixedToFloat", "ensureVersionIsLong", "versionToFixed", ] MAX_F2DOT14 = 0x7FFF / (1 << 14) def fixedToFloat(value, precisionBits): """Converts a fixed-point number to a float given the number of precision bits. Args: value (int): Number in fixed-point format. precisionBits (int): Number of precision bits. Returns: Floating point value. Examples:: >>> import math >>> f = fixedToFloat(-10139, precisionBits=14) >>> math.isclose(f, -0.61883544921875) True """ return value / (1 << precisionBits) def floatToFixed(value, precisionBits): """Converts a float to a fixed-point number given the number of precision bits. Args: value (float): Floating point value. precisionBits (int): Number of precision bits. Returns: int: Fixed-point representation. Examples:: >>> floatToFixed(-0.61883544921875, precisionBits=14) -10139 >>> floatToFixed(-0.61884, precisionBits=14) -10139 """ return otRound(value * (1 << precisionBits)) def floatToFixedToFloat(value, precisionBits): """Converts a float to a fixed-point number and back again. By converting the float to fixed, rounding it, and converting it back to float again, this returns a floating point values which is exactly representable in fixed-point format. Note: this **is** equivalent to ``fixedToFloat(floatToFixed(value))``. Args: value (float): The input floating point value. precisionBits (int): Number of precision bits. Returns: float: The transformed and rounded value. Examples:: >>> import math >>> f1 = -0.61884 >>> f2 = floatToFixedToFloat(-0.61884, precisionBits=14) >>> f1 != f2 True >>> math.isclose(f2, -0.61883544921875) True """ scale = 1 << precisionBits return otRound(value * scale) / scale def fixedToStr(value, precisionBits): """Converts a fixed-point number to a string representing a decimal float. This chooses the float that has the shortest decimal representation (the least number of fractional decimal digits). For example, to convert a fixed-point number in a 2.14 format, use ``precisionBits=14``:: >>> fixedToStr(-10139, precisionBits=14) '-0.61884' This is pretty slow compared to the simple division used in ``fixedToFloat``. Use sporadically when you need to serialize or print the fixed-point number in a human-readable form. It uses nearestMultipleShortestRepr under the hood. Args: value (int): The fixed-point value to convert. precisionBits (int): Number of precision bits, *up to a maximum of 16*. Returns: str: A string representation of the value. """ scale = 1 << precisionBits return nearestMultipleShortestRepr(value / scale, factor=1.0 / scale) def strToFixed(string, precisionBits): """Converts a string representing a decimal float to a fixed-point number. Args: string (str): A string representing a decimal float. precisionBits (int): Number of precision bits, *up to a maximum of 16*. Returns: int: Fixed-point representation. Examples:: >>> ## to convert a float string to a 2.14 fixed-point number: >>> strToFixed('-0.61884', precisionBits=14) -10139 """ value = float(string) return otRound(value * (1 << precisionBits)) def strToFixedToFloat(string, precisionBits): """Convert a string to a decimal float with fixed-point rounding. This first converts string to a float, then turns it into a fixed-point number with ``precisionBits`` fractional binary digits, then back to a float again. This is simply a shorthand for fixedToFloat(floatToFixed(float(s))). Args: string (str): A string representing a decimal float. precisionBits (int): Number of precision bits. Returns: float: The transformed and rounded value. Examples:: >>> import math >>> s = '-0.61884' >>> bits = 14 >>> f = strToFixedToFloat(s, precisionBits=bits) >>> math.isclose(f, -0.61883544921875) True >>> f == fixedToFloat(floatToFixed(float(s), precisionBits=bits), precisionBits=bits) True """ value = float(string) scale = 1 << precisionBits return otRound(value * scale) / scale def floatToFixedToStr(value, precisionBits): """Convert float to string with fixed-point rounding. This uses the shortest decimal representation (ie. the least number of fractional decimal digits) to represent the equivalent fixed-point number with ``precisionBits`` fractional binary digits. It uses nearestMultipleShortestRepr under the hood. >>> floatToFixedToStr(-0.61883544921875, precisionBits=14) '-0.61884' Args: value (float): The float value to convert. precisionBits (int): Number of precision bits, *up to a maximum of 16*. Returns: str: A string representation of the value. """ scale = 1 << precisionBits return nearestMultipleShortestRepr(value, factor=1.0 / scale) def ensureVersionIsLong(value): """Ensure a table version is an unsigned long. OpenType table version numbers are expressed as a single unsigned long comprising of an unsigned short major version and unsigned short minor version. This function detects if the value to be used as a version number looks too small (i.e. is less than ``0x10000``), and converts it to fixed-point using :func:`floatToFixed` if so. Args: value (Number): a candidate table version number. Returns: int: A table version number, possibly corrected to fixed-point. """ if value < 0x10000: newValue = floatToFixed(value, 16) log.warning( "Table version value is a float: %.4f; " "fix to use hex instead: 0x%08x", value, newValue, ) value = newValue return value def versionToFixed(value): """Ensure a table version number is fixed-point. Args: value (str): a candidate table version number. Returns: int: A table version number, possibly corrected to fixed-point. """ value = int(value, 0) if value.startswith("0") else float(value) value = ensureVersionIsLong(value) return value PKaZZZ��.JJfontTools/misc/intTools.py__all__ = ["popCount", "bit_count", "bit_indices"] try: bit_count = int.bit_count except AttributeError: def bit_count(v): return bin(v).count("1") """Return number of 1 bits (population count) of the absolute value of an integer. See https://docs.python.org/3.10/library/stdtypes.html#int.bit_count """ popCount = bit_count # alias def bit_indices(v): """Return list of indices where bits are set, 0 being the index of the least significant bit. >>> bit_indices(0b101) [0, 2] """ return [i for i, b in enumerate(bin(v)[::-1]) if b == "1"] PKaZZZs��)�M�MfontTools/misc/loggingTools.pyimport sys import logging import timeit from functools import wraps from collections.abc import Mapping, Callable import warnings from logging import PercentStyle # default logging level used by Timer class TIME_LEVEL = logging.DEBUG # per-level format strings used by the default formatter # (the level name is not printed for INFO and DEBUG messages) DEFAULT_FORMATS = { "*": "%(levelname)s: %(message)s", "INFO": "%(message)s", "DEBUG": "%(message)s", } class LevelFormatter(logging.Formatter): """Log formatter with level-specific formatting. Formatter class which optionally takes a dict of logging levels to format strings, allowing to customise the log records appearance for specific levels. Attributes: fmt: A dictionary mapping logging levels to format strings. The ``*`` key identifies the default format string. datefmt: As per py:class:`logging.Formatter` style: As per py:class:`logging.Formatter` >>> import sys >>> handler = logging.StreamHandler(sys.stdout) >>> formatter = LevelFormatter( ... fmt={ ... '*': '[%(levelname)s] %(message)s', ... 'DEBUG': '%(name)s [%(levelname)s] %(message)s', ... 'INFO': '%(message)s', ... }) >>> handler.setFormatter(formatter) >>> log = logging.getLogger('test') >>> log.setLevel(logging.DEBUG) >>> log.addHandler(handler) >>> log.debug('this uses a custom format string') test [DEBUG] this uses a custom format string >>> log.info('this also uses a custom format string') this also uses a custom format string >>> log.warning("this one uses the default format string") [WARNING] this one uses the default format string """ def __init__(self, fmt=None, datefmt=None, style="%"): if style != "%": raise ValueError( "only '%' percent style is supported in both python 2 and 3" ) if fmt is None: fmt = DEFAULT_FORMATS if isinstance(fmt, str): default_format = fmt custom_formats = {} elif isinstance(fmt, Mapping): custom_formats = dict(fmt) default_format = custom_formats.pop("*", None) else: raise TypeError("fmt must be a str or a dict of str: %r" % fmt) super(LevelFormatter, self).__init__(default_format, datefmt) self.default_format = self._fmt self.custom_formats = {} for level, fmt in custom_formats.items(): level = logging._checkLevel(level) self.custom_formats[level] = fmt def format(self, record): if self.custom_formats: fmt = self.custom_formats.get(record.levelno, self.default_format) if self._fmt != fmt: self._fmt = fmt # for python >= 3.2, _style needs to be set if _fmt changes if PercentStyle: self._style = PercentStyle(fmt) return super(LevelFormatter, self).format(record) def configLogger(**kwargs): """A more sophisticated logging system configuation manager. This is more or less the same as :py:func:`logging.basicConfig`, with some additional options and defaults. The default behaviour is to create a ``StreamHandler`` which writes to sys.stderr, set a formatter using the ``DEFAULT_FORMATS`` strings, and add the handler to the top-level library logger ("fontTools"). A number of optional keyword arguments may be specified, which can alter the default behaviour. Args: logger: Specifies the logger name or a Logger instance to be configured. (Defaults to "fontTools" logger). Unlike ``basicConfig``, this function can be called multiple times to reconfigure a logger. If the logger or any of its children already exists before the call is made, they will be reset before the new configuration is applied. filename: Specifies that a ``FileHandler`` be created, using the specified filename, rather than a ``StreamHandler``. filemode: Specifies the mode to open the file, if filename is specified. (If filemode is unspecified, it defaults to ``a``). format: Use the specified format string for the handler. This argument also accepts a dictionary of format strings keyed by level name, to allow customising the records appearance for specific levels. The special ``'*'`` key is for 'any other' level. datefmt: Use the specified date/time format. level: Set the logger level to the specified level. stream: Use the specified stream to initialize the StreamHandler. Note that this argument is incompatible with ``filename`` - if both are present, ``stream`` is ignored. handlers: If specified, this should be an iterable of already created handlers, which will be added to the logger. Any handler in the list which does not have a formatter assigned will be assigned the formatter created in this function. filters: If specified, this should be an iterable of already created filters. If the ``handlers`` do not already have filters assigned, these filters will be added to them. propagate: All loggers have a ``propagate`` attribute which determines whether to continue searching for handlers up the logging hierarchy. If not provided, the "propagate" attribute will be set to ``False``. """ # using kwargs to enforce keyword-only arguments in py2. handlers = kwargs.pop("handlers", None) if handlers is None: if "stream" in kwargs and "filename" in kwargs: raise ValueError( "'stream' and 'filename' should not be " "specified together" ) else: if "stream" in kwargs or "filename" in kwargs: raise ValueError( "'stream' or 'filename' should not be " "specified together with 'handlers'" ) if handlers is None: filename = kwargs.pop("filename", None) mode = kwargs.pop("filemode", "a") if filename: h = logging.FileHandler(filename, mode) else: stream = kwargs.pop("stream", None) h = logging.StreamHandler(stream) handlers = [h] # By default, the top-level library logger is configured. logger = kwargs.pop("logger", "fontTools") if not logger or isinstance(logger, str): # empty "" or None means the 'root' logger logger = logging.getLogger(logger) # before (re)configuring, reset named logger and its children (if exist) _resetExistingLoggers(parent=logger.name) # use DEFAULT_FORMATS if 'format' is None fs = kwargs.pop("format", None) dfs = kwargs.pop("datefmt", None) # XXX: '%' is the only format style supported on both py2 and 3 style = kwargs.pop("style", "%") fmt = LevelFormatter(fs, dfs, style) filters = kwargs.pop("filters", []) for h in handlers: if h.formatter is None: h.setFormatter(fmt) if not h.filters: for f in filters: h.addFilter(f) logger.addHandler(h) if logger.name != "root": # stop searching up the hierarchy for handlers logger.propagate = kwargs.pop("propagate", False) # set a custom severity level level = kwargs.pop("level", None) if level is not None: logger.setLevel(level) if kwargs: keys = ", ".join(kwargs.keys()) raise ValueError("Unrecognised argument(s): %s" % keys) def _resetExistingLoggers(parent="root"): """Reset the logger named 'parent' and all its children to their initial state, if they already exist in the current configuration. """ root = logging.root # get sorted list of all existing loggers existing = sorted(root.manager.loggerDict.keys()) if parent == "root": # all the existing loggers are children of 'root' loggers_to_reset = [parent] + existing elif parent not in existing: # nothing to do return elif parent in existing: loggers_to_reset = [parent] # collect children, starting with the entry after parent name i = existing.index(parent) + 1 prefixed = parent + "." pflen = len(prefixed) num_existing = len(existing) while i < num_existing: if existing[i][:pflen] == prefixed: loggers_to_reset.append(existing[i]) i += 1 for name in loggers_to_reset: if name == "root": root.setLevel(logging.WARNING) for h in root.handlers[:]: root.removeHandler(h) for f in root.filters[:]: root.removeFilters(f) root.disabled = False else: logger = root.manager.loggerDict[name] logger.level = logging.NOTSET logger.handlers = [] logger.filters = [] logger.propagate = True logger.disabled = False class Timer(object): """Keeps track of overall time and split/lap times. >>> import time >>> timer = Timer() >>> time.sleep(0.01) >>> print("First lap:", timer.split()) First lap: ... >>> time.sleep(0.02) >>> print("Second lap:", timer.split()) Second lap: ... >>> print("Overall time:", timer.time()) Overall time: ... Can be used as a context manager inside with-statements. >>> with Timer() as t: ... time.sleep(0.01) >>> print("%0.3f seconds" % t.elapsed) 0... seconds If initialised with a logger, it can log the elapsed time automatically upon exiting the with-statement. >>> import logging >>> log = logging.getLogger("my-fancy-timer-logger") >>> configLogger(logger=log, level="DEBUG", format="%(message)s", stream=sys.stdout) >>> with Timer(log, 'do something'): ... time.sleep(0.01) Took ... to do something The same Timer instance, holding a reference to a logger, can be reused in multiple with-statements, optionally with different messages or levels. >>> timer = Timer(log) >>> with timer(): ... time.sleep(0.01) elapsed time: ...s >>> with timer('redo it', level=logging.INFO): ... time.sleep(0.02) Took ... to redo it It can also be used as a function decorator to log the time elapsed to run the decorated function. >>> @timer() ... def test1(): ... time.sleep(0.01) >>> @timer('run test 2', level=logging.INFO) ... def test2(): ... time.sleep(0.02) >>> test1() Took ... to run 'test1' >>> test2() Took ... to run test 2 """ # timeit.default_timer choses the most accurate clock for each platform _time = timeit.default_timer default_msg = "elapsed time: %(time).3fs" default_format = "Took %(time).3fs to %(msg)s" def __init__(self, logger=None, msg=None, level=None, start=None): self.reset(start) if logger is None: for arg in ("msg", "level"): if locals().get(arg) is not None: raise ValueError("'%s' can't be specified without a 'logger'" % arg) self.logger = logger self.level = level if level is not None else TIME_LEVEL self.msg = msg def reset(self, start=None): """Reset timer to 'start_time' or the current time.""" if start is None: self.start = self._time() else: self.start = start self.last = self.start self.elapsed = 0.0 def time(self): """Return the overall time (in seconds) since the timer started.""" return self._time() - self.start def split(self): """Split and return the lap time (in seconds) in between splits.""" current = self._time() self.elapsed = current - self.last self.last = current return self.elapsed def formatTime(self, msg, time): """Format 'time' value in 'msg' and return formatted string. If 'msg' contains a '%(time)' format string, try to use that. Otherwise, use the predefined 'default_format'. If 'msg' is empty or None, fall back to 'default_msg'. """ if not msg: msg = self.default_msg if msg.find("%(time)") < 0: msg = self.default_format % {"msg": msg, "time": time} else: try: msg = msg % {"time": time} except (KeyError, ValueError): pass # skip if the format string is malformed return msg def __enter__(self): """Start a new lap""" self.last = self._time() self.elapsed = 0.0 return self def __exit__(self, exc_type, exc_value, traceback): """End the current lap. If timer has a logger, log the time elapsed, using the format string in self.msg (or the default one). """ time = self.split() if self.logger is None or exc_type: # if there's no logger attached, or if any exception occurred in # the with-statement, exit without logging the time return message = self.formatTime(self.msg, time) # Allow log handlers to see the individual parts to facilitate things # like a server accumulating aggregate stats. msg_parts = {"msg": self.msg, "time": time} self.logger.log(self.level, message, msg_parts) def __call__(self, func_or_msg=None, **kwargs): """If the first argument is a function, return a decorator which runs the wrapped function inside Timer's context manager. Otherwise, treat the first argument as a 'msg' string and return an updated Timer instance, referencing the same logger. A 'level' keyword can also be passed to override self.level. """ if isinstance(func_or_msg, Callable): func = func_or_msg # use the function name when no explicit 'msg' is provided if not self.msg: self.msg = "run '%s'" % func.__name__ @wraps(func) def wrapper(*args, **kwds): with self: return func(*args, **kwds) return wrapper else: msg = func_or_msg or kwargs.get("msg") level = kwargs.get("level", self.level) return self.__class__(self.logger, msg, level) def __float__(self): return self.elapsed def __int__(self): return int(self.elapsed) def __str__(self): return "%.3f" % self.elapsed class ChannelsFilter(logging.Filter): """Provides a hierarchical filter for log entries based on channel names. Filters out records emitted from a list of enabled channel names, including their children. It works the same as the ``logging.Filter`` class, but allows the user to specify multiple channel names. >>> import sys >>> handler = logging.StreamHandler(sys.stdout) >>> handler.setFormatter(logging.Formatter("%(message)s")) >>> filter = ChannelsFilter("A.B", "C.D") >>> handler.addFilter(filter) >>> root = logging.getLogger() >>> root.addHandler(handler) >>> root.setLevel(level=logging.DEBUG) >>> logging.getLogger('A.B').debug('this record passes through') this record passes through >>> logging.getLogger('A.B.C').debug('records from children also pass') records from children also pass >>> logging.getLogger('C.D').debug('this one as well') this one as well >>> logging.getLogger('A.B.').debug('also this one') also this one >>> logging.getLogger('A.F').debug('but this one does not!') >>> logging.getLogger('C.DE').debug('neither this one!') """ def __init__(self, *names): self.names = names self.num = len(names) self.lengths = {n: len(n) for n in names} def filter(self, record): if self.num == 0: return True for name in self.names: nlen = self.lengths[name] if name == record.name: return True elif record.name.find(name, 0, nlen) == 0 and record.name[nlen] == ".": return True return False class CapturingLogHandler(logging.Handler): def __init__(self, logger, level): super(CapturingLogHandler, self).__init__(level=level) self.records = [] if isinstance(logger, str): self.logger = logging.getLogger(logger) else: self.logger = logger def __enter__(self): self.original_disabled = self.logger.disabled self.original_level = self.logger.level self.original_propagate = self.logger.propagate self.logger.addHandler(self) self.logger.setLevel(self.level) self.logger.disabled = False self.logger.propagate = False return self def __exit__(self, type, value, traceback): self.logger.removeHandler(self) self.logger.setLevel(self.original_level) self.logger.disabled = self.original_disabled self.logger.propagate = self.original_propagate return self def emit(self, record): self.records.append(record) def assertRegex(self, regexp, msg=None): import re pattern = re.compile(regexp) for r in self.records: if pattern.search(r.getMessage()): return True if msg is None: msg = "Pattern '%s' not found in logger records" % regexp assert 0, msg class LogMixin(object): """Mixin class that adds logging functionality to another class. You can define a new class that subclasses from ``LogMixin`` as well as other base classes through multiple inheritance. All instances of that class will have a ``log`` property that returns a ``logging.Logger`` named after their respective ``<module>.<class>``. For example: >>> class BaseClass(object): ... pass >>> class MyClass(LogMixin, BaseClass): ... pass >>> a = MyClass() >>> isinstance(a.log, logging.Logger) True >>> print(a.log.name) fontTools.misc.loggingTools.MyClass >>> class AnotherClass(MyClass): ... pass >>> b = AnotherClass() >>> isinstance(b.log, logging.Logger) True >>> print(b.log.name) fontTools.misc.loggingTools.AnotherClass """ @property def log(self): if not hasattr(self, "_log"): name = ".".join((self.__class__.__module__, self.__class__.__name__)) self._log = logging.getLogger(name) return self._log def deprecateArgument(name, msg, category=UserWarning): """Raise a warning about deprecated function argument 'name'.""" warnings.warn("%r is deprecated; %s" % (name, msg), category=category, stacklevel=3) def deprecateFunction(msg, category=UserWarning): """Decorator to raise a warning when a deprecated function is called.""" def decorator(func): @wraps(func) def wrapper(*args, **kwargs): warnings.warn( "%r is deprecated; %s" % (func.__name__, msg), category=category, stacklevel=2, ) return func(*args, **kwargs) return wrapper return decorator if __name__ == "__main__": import doctest sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed) PKaZZZU�@�99 fontTools/misc/macCreatorType.pyfrom fontTools.misc.textTools import Tag, bytesjoin, strjoin try: import xattr except ImportError: xattr = None def _reverseString(s): s = list(s) s.reverse() return strjoin(s) def getMacCreatorAndType(path): """Returns file creator and file type codes for a path. Args: path (str): A file path. Returns: A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first representing the file creator and the second representing the file type. """ if xattr is not None: try: finderInfo = xattr.getxattr(path, "com.apple.FinderInfo") except (KeyError, IOError): pass else: fileType = Tag(finderInfo[:4]) fileCreator = Tag(finderInfo[4:8]) return fileCreator, fileType return None, None def setMacCreatorAndType(path, fileCreator, fileType): """Set file creator and file type codes for a path. Note that if the ``xattr`` module is not installed, no action is taken but no error is raised. Args: path (str): A file path. fileCreator: A four-character file creator tag. fileType: A four-character file type tag. """ if xattr is not None: from fontTools.misc.textTools import pad if not all(len(s) == 4 for s in (fileCreator, fileType)): raise TypeError("arg must be string of 4 chars") finderInfo = pad(bytesjoin([fileType, fileCreator]), 32) xattr.setxattr(path, "com.apple.FinderInfo", finderInfo) PKaZZZ3g�&�!�!fontTools/misc/macRes.pyfrom io import BytesIO import struct from fontTools.misc import sstruct from fontTools.misc.textTools import bytesjoin, tostr from collections import OrderedDict from collections.abc import MutableMapping class ResourceError(Exception): pass class ResourceReader(MutableMapping): """Reader for Mac OS resource forks. Parses a resource fork and returns resources according to their type. If run on OS X, this will open the resource fork in the filesystem. Otherwise, it will open the file itself and attempt to read it as though it were a resource fork. The returned object can be indexed by type and iterated over, returning in each case a list of py:class:`Resource` objects representing all the resources of a certain type. """ def __init__(self, fileOrPath): """Open a file Args: fileOrPath: Either an object supporting a ``read`` method, an ``os.PathLike`` object, or a string. """ self._resources = OrderedDict() if hasattr(fileOrPath, "read"): self.file = fileOrPath else: try: # try reading from the resource fork (only works on OS X) self.file = self.openResourceFork(fileOrPath) self._readFile() return except (ResourceError, IOError): # if it fails, use the data fork self.file = self.openDataFork(fileOrPath) self._readFile() @staticmethod def openResourceFork(path): if hasattr(path, "__fspath__"): # support os.PathLike objects path = path.__fspath__() with open(path + "/..namedfork/rsrc", "rb") as resfork: data = resfork.read() infile = BytesIO(data) infile.name = path return infile @staticmethod def openDataFork(path): with open(path, "rb") as datafork: data = datafork.read() infile = BytesIO(data) infile.name = path return infile def _readFile(self): self._readHeaderAndMap() self._readTypeList() def _read(self, numBytes, offset=None): if offset is not None: try: self.file.seek(offset) except OverflowError: raise ResourceError("Failed to seek offset ('offset' is too large)") if self.file.tell() != offset: raise ResourceError("Failed to seek offset (reached EOF)") try: data = self.file.read(numBytes) except OverflowError: raise ResourceError("Cannot read resource ('numBytes' is too large)") if len(data) != numBytes: raise ResourceError("Cannot read resource (not enough data)") return data def _readHeaderAndMap(self): self.file.seek(0) headerData = self._read(ResourceForkHeaderSize) sstruct.unpack(ResourceForkHeader, headerData, self) # seek to resource map, skip reserved mapOffset = self.mapOffset + 22 resourceMapData = self._read(ResourceMapHeaderSize, mapOffset) sstruct.unpack(ResourceMapHeader, resourceMapData, self) self.absTypeListOffset = self.mapOffset + self.typeListOffset self.absNameListOffset = self.mapOffset + self.nameListOffset def _readTypeList(self): absTypeListOffset = self.absTypeListOffset numTypesData = self._read(2, absTypeListOffset) (self.numTypes,) = struct.unpack(">H", numTypesData) absTypeListOffset2 = absTypeListOffset + 2 for i in range(self.numTypes + 1): resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset) item = sstruct.unpack(ResourceTypeItem, resTypeItemData) resType = tostr(item["type"], encoding="mac-roman") refListOffset = absTypeListOffset + item["refListOffset"] numRes = item["numRes"] + 1 resources = self._readReferenceList(resType, refListOffset, numRes) self._resources[resType] = resources def _readReferenceList(self, resType, refListOffset, numRes): resources = [] for i in range(numRes): refOffset = refListOffset + ResourceRefItemSize * i refData = self._read(ResourceRefItemSize, refOffset) res = Resource(resType) res.decompile(refData, self) resources.append(res) return resources def __getitem__(self, resType): return self._resources[resType] def __delitem__(self, resType): del self._resources[resType] def __setitem__(self, resType, resources): self._resources[resType] = resources def __len__(self): return len(self._resources) def __iter__(self): return iter(self._resources) def keys(self): return self._resources.keys() @property def types(self): """A list of the types of resources in the resource fork.""" return list(self._resources.keys()) def countResources(self, resType): """Return the number of resources of a given type.""" try: return len(self[resType]) except KeyError: return 0 def getIndices(self, resType): """Returns a list of indices of resources of a given type.""" numRes = self.countResources(resType) if numRes: return list(range(1, numRes + 1)) else: return [] def getNames(self, resType): """Return list of names of all resources of a given type.""" return [res.name for res in self.get(resType, []) if res.name is not None] def getIndResource(self, resType, index): """Return resource of given type located at an index ranging from 1 to the number of resources for that type, or None if not found. """ if index < 1: return None try: res = self[resType][index - 1] except (KeyError, IndexError): return None return res def getNamedResource(self, resType, name): """Return the named resource of given type, else return None.""" name = tostr(name, encoding="mac-roman") for res in self.get(resType, []): if res.name == name: return res return None def close(self): if not self.file.closed: self.file.close() class Resource(object): """Represents a resource stored within a resource fork. Attributes: type: resource type. data: resource data. id: ID. name: resource name. attr: attributes. """ def __init__( self, resType=None, resData=None, resID=None, resName=None, resAttr=None ): self.type = resType self.data = resData self.id = resID self.name = resName self.attr = resAttr def decompile(self, refData, reader): sstruct.unpack(ResourceRefItem, refData, self) # interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct (self.dataOffset,) = struct.unpack(">L", bytesjoin([b"\0", self.dataOffset])) absDataOffset = reader.dataOffset + self.dataOffset (dataLength,) = struct.unpack(">L", reader._read(4, absDataOffset)) self.data = reader._read(dataLength) if self.nameOffset == -1: return absNameOffset = reader.absNameListOffset + self.nameOffset (nameLength,) = struct.unpack("B", reader._read(1, absNameOffset)) (name,) = struct.unpack(">%ss" % nameLength, reader._read(nameLength)) self.name = tostr(name, encoding="mac-roman") ResourceForkHeader = """ > # big endian dataOffset: L mapOffset: L dataLen: L mapLen: L """ ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader) ResourceMapHeader = """ > # big endian attr: H typeListOffset: H nameListOffset: H """ ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader) ResourceTypeItem = """ > # big endian type: 4s numRes: H refListOffset: H """ ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem) ResourceRefItem = """ > # big endian id: h nameOffset: h attr: B dataOffset: 3s reserved: L """ ResourceRefItemSize = sstruct.calcsize(ResourceRefItem) PKaZZZ��bv��fontTools/misc/psCharStrings.py"""psCharStrings.py -- module implementing various kinds of CharStrings: CFF dictionary data and Type1/Type2 CharStrings. """ from fontTools.misc.fixedTools import ( fixedToFloat, floatToFixed, floatToFixedToStr, strToFixedToFloat, ) from fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin from fontTools.pens.boundsPen import BoundsPen import struct import logging log = logging.getLogger(__name__) def read_operator(self, b0, data, index): if b0 == 12: op = (b0, byteord(data[index])) index = index + 1 else: op = b0 try: operator = self.operators[op] except KeyError: return None, index value = self.handle_operator(operator) return value, index def read_byte(self, b0, data, index): return b0 - 139, index def read_smallInt1(self, b0, data, index): b1 = byteord(data[index]) return (b0 - 247) * 256 + b1 + 108, index + 1 def read_smallInt2(self, b0, data, index): b1 = byteord(data[index]) return -(b0 - 251) * 256 - b1 - 108, index + 1 def read_shortInt(self, b0, data, index): (value,) = struct.unpack(">h", data[index : index + 2]) return value, index + 2 def read_longInt(self, b0, data, index): (value,) = struct.unpack(">l", data[index : index + 4]) return value, index + 4 def read_fixed1616(self, b0, data, index): (value,) = struct.unpack(">l", data[index : index + 4]) return fixedToFloat(value, precisionBits=16), index + 4 def read_reserved(self, b0, data, index): assert NotImplementedError return NotImplemented, index def read_realNumber(self, b0, data, index): number = "" while True: b = byteord(data[index]) index = index + 1 nibble0 = (b & 0xF0) >> 4 nibble1 = b & 0x0F if nibble0 == 0xF: break number = number + realNibbles[nibble0] if nibble1 == 0xF: break number = number + realNibbles[nibble1] return float(number), index t1OperandEncoding = [None] * 256 t1OperandEncoding[0:32] = (32) * [read_operator] t1OperandEncoding[32:247] = (247 - 32) * [read_byte] t1OperandEncoding[247:251] = (251 - 247) * [read_smallInt1] t1OperandEncoding[251:255] = (255 - 251) * [read_smallInt2] t1OperandEncoding[255] = read_longInt assert len(t1OperandEncoding) == 256 t2OperandEncoding = t1OperandEncoding[:] t2OperandEncoding[28] = read_shortInt t2OperandEncoding[255] = read_fixed1616 cffDictOperandEncoding = t2OperandEncoding[:] cffDictOperandEncoding[29] = read_longInt cffDictOperandEncoding[30] = read_realNumber cffDictOperandEncoding[255] = read_reserved realNibbles = [ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ".", "E", "E-", None, "-", ] realNibblesDict = {v: i for i, v in enumerate(realNibbles)} maxOpStack = 193 def buildOperatorDict(operatorList): oper = {} opc = {} for item in operatorList: if len(item) == 2: oper[item[0]] = item[1] else: oper[item[0]] = item[1:] if isinstance(item[0], tuple): opc[item[1]] = item[0] else: opc[item[1]] = (item[0],) return oper, opc t2Operators = [ # opcode name (1, "hstem"), (3, "vstem"), (4, "vmoveto"), (5, "rlineto"), (6, "hlineto"), (7, "vlineto"), (8, "rrcurveto"), (10, "callsubr"), (11, "return"), (14, "endchar"), (15, "vsindex"), (16, "blend"), (18, "hstemhm"), (19, "hintmask"), (20, "cntrmask"), (21, "rmoveto"), (22, "hmoveto"), (23, "vstemhm"), (24, "rcurveline"), (25, "rlinecurve"), (26, "vvcurveto"), (27, "hhcurveto"), # (28, 'shortint'), # not really an operator (29, "callgsubr"), (30, "vhcurveto"), (31, "hvcurveto"), ((12, 0), "ignore"), # dotsection. Yes, there a few very early OTF/CFF # fonts with this deprecated operator. Just ignore it. ((12, 3), "and"), ((12, 4), "or"), ((12, 5), "not"), ((12, 8), "store"), ((12, 9), "abs"), ((12, 10), "add"), ((12, 11), "sub"), ((12, 12), "div"), ((12, 13), "load"), ((12, 14), "neg"), ((12, 15), "eq"), ((12, 18), "drop"), ((12, 20), "put"), ((12, 21), "get"), ((12, 22), "ifelse"), ((12, 23), "random"), ((12, 24), "mul"), ((12, 26), "sqrt"), ((12, 27), "dup"), ((12, 28), "exch"), ((12, 29), "index"), ((12, 30), "roll"), ((12, 34), "hflex"), ((12, 35), "flex"), ((12, 36), "hflex1"), ((12, 37), "flex1"), ] def getIntEncoder(format): if format == "cff": twoByteOp = bytechr(28) fourByteOp = bytechr(29) elif format == "t1": twoByteOp = None fourByteOp = bytechr(255) else: assert format == "t2" twoByteOp = bytechr(28) fourByteOp = None def encodeInt( value, fourByteOp=fourByteOp, bytechr=bytechr, pack=struct.pack, unpack=struct.unpack, twoByteOp=twoByteOp, ): if -107 <= value <= 107: code = bytechr(value + 139) elif 108 <= value <= 1131: value = value - 108 code = bytechr((value >> 8) + 247) + bytechr(value & 0xFF) elif -1131 <= value <= -108: value = -value - 108 code = bytechr((value >> 8) + 251) + bytechr(value & 0xFF) elif twoByteOp is not None and -32768 <= value <= 32767: code = twoByteOp + pack(">h", value) elif fourByteOp is None: # Backwards compatible hack: due to a previous bug in FontTools, # 16.16 fixed numbers were written out as 4-byte ints. When # these numbers were small, they were wrongly written back as # small ints instead of 4-byte ints, breaking round-tripping. # This here workaround doesn't do it any better, since we can't # distinguish anymore between small ints that were supposed to # be small fixed numbers and small ints that were just small # ints. Hence the warning. log.warning( "4-byte T2 number got passed to the " "IntType handler. This should happen only when reading in " "old XML files.\n" ) code = bytechr(255) + pack(">l", value) else: code = fourByteOp + pack(">l", value) return code return encodeInt encodeIntCFF = getIntEncoder("cff") encodeIntT1 = getIntEncoder("t1") encodeIntT2 = getIntEncoder("t2") def encodeFixed(f, pack=struct.pack): """For T2 only""" value = floatToFixed(f, precisionBits=16) if value & 0xFFFF == 0: # check if the fractional part is zero return encodeIntT2(value >> 16) # encode only the integer part else: return b"\xff" + pack(">l", value) # encode the entire fixed point value realZeroBytes = bytechr(30) + bytechr(0xF) def encodeFloat(f): # For CFF only, used in cffLib if f == 0.0: # 0.0 == +0.0 == -0.0 return realZeroBytes # Note: 14 decimal digits seems to be the limitation for CFF real numbers # in macOS. However, we use 8 here to match the implementation of AFDKO. s = "%.8G" % f if s[:2] == "0.": s = s[1:] elif s[:3] == "-0.": s = "-" + s[2:] nibbles = [] while s: c = s[0] s = s[1:] if c == "E": c2 = s[:1] if c2 == "-": s = s[1:] c = "E-" elif c2 == "+": s = s[1:] nibbles.append(realNibblesDict[c]) nibbles.append(0xF) if len(nibbles) % 2: nibbles.append(0xF) d = bytechr(30) for i in range(0, len(nibbles), 2): d = d + bytechr(nibbles[i] << 4 | nibbles[i + 1]) return d class CharStringCompileError(Exception): pass class SimpleT2Decompiler(object): def __init__(self, localSubrs, globalSubrs, private=None, blender=None): self.localSubrs = localSubrs self.localBias = calcSubrBias(localSubrs) self.globalSubrs = globalSubrs self.globalBias = calcSubrBias(globalSubrs) self.private = private self.blender = blender self.reset() def reset(self): self.callingStack = [] self.operandStack = [] self.hintCount = 0 self.hintMaskBytes = 0 self.numRegions = 0 self.vsIndex = 0 def execute(self, charString): self.callingStack.append(charString) needsDecompilation = charString.needsDecompilation() if needsDecompilation: program = [] pushToProgram = program.append else: pushToProgram = lambda x: None pushToStack = self.operandStack.append index = 0 while True: token, isOperator, index = charString.getToken(index) if token is None: break # we're done! pushToProgram(token) if isOperator: handlerName = "op_" + token handler = getattr(self, handlerName, None) if handler is not None: rv = handler(index) if rv: hintMaskBytes, index = rv pushToProgram(hintMaskBytes) else: self.popall() else: pushToStack(token) if needsDecompilation: charString.setProgram(program) del self.callingStack[-1] def pop(self): value = self.operandStack[-1] del self.operandStack[-1] return value def popall(self): stack = self.operandStack[:] self.operandStack[:] = [] return stack def push(self, value): self.operandStack.append(value) def op_return(self, index): if self.operandStack: pass def op_endchar(self, index): pass def op_ignore(self, index): pass def op_callsubr(self, index): subrIndex = self.pop() subr = self.localSubrs[subrIndex + self.localBias] self.execute(subr) def op_callgsubr(self, index): subrIndex = self.pop() subr = self.globalSubrs[subrIndex + self.globalBias] self.execute(subr) def op_hstem(self, index): self.countHints() def op_vstem(self, index): self.countHints() def op_hstemhm(self, index): self.countHints() def op_vstemhm(self, index): self.countHints() def op_hintmask(self, index): if not self.hintMaskBytes: self.countHints() self.hintMaskBytes = (self.hintCount + 7) // 8 hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes) return hintMaskBytes, index op_cntrmask = op_hintmask def countHints(self): args = self.popall() self.hintCount = self.hintCount + len(args) // 2 # misc def op_and(self, index): raise NotImplementedError def op_or(self, index): raise NotImplementedError def op_not(self, index): raise NotImplementedError def op_store(self, index): raise NotImplementedError def op_abs(self, index): raise NotImplementedError def op_add(self, index): raise NotImplementedError def op_sub(self, index): raise NotImplementedError def op_div(self, index): raise NotImplementedError def op_load(self, index): raise NotImplementedError def op_neg(self, index): raise NotImplementedError def op_eq(self, index): raise NotImplementedError def op_drop(self, index): raise NotImplementedError def op_put(self, index): raise NotImplementedError def op_get(self, index): raise NotImplementedError def op_ifelse(self, index): raise NotImplementedError def op_random(self, index): raise NotImplementedError def op_mul(self, index): raise NotImplementedError def op_sqrt(self, index): raise NotImplementedError def op_dup(self, index): raise NotImplementedError def op_exch(self, index): raise NotImplementedError def op_index(self, index): raise NotImplementedError def op_roll(self, index): raise NotImplementedError def op_blend(self, index): if self.numRegions == 0: self.numRegions = self.private.getNumRegions() numBlends = self.pop() numOps = numBlends * (self.numRegions + 1) if self.blender is None: del self.operandStack[ -(numOps - numBlends) : ] # Leave the default operands on the stack. else: argi = len(self.operandStack) - numOps end_args = tuplei = argi + numBlends while argi < end_args: next_ti = tuplei + self.numRegions deltas = self.operandStack[tuplei:next_ti] delta = self.blender(self.vsIndex, deltas) self.operandStack[argi] += delta tuplei = next_ti argi += 1 self.operandStack[end_args:] = [] def op_vsindex(self, index): vi = self.pop() self.vsIndex = vi self.numRegions = self.private.getNumRegions(vi) t1Operators = [ # opcode name (1, "hstem"), (3, "vstem"), (4, "vmoveto"), (5, "rlineto"), (6, "hlineto"), (7, "vlineto"), (8, "rrcurveto"), (9, "closepath"), (10, "callsubr"), (11, "return"), (13, "hsbw"), (14, "endchar"), (21, "rmoveto"), (22, "hmoveto"), (30, "vhcurveto"), (31, "hvcurveto"), ((12, 0), "dotsection"), ((12, 1), "vstem3"), ((12, 2), "hstem3"), ((12, 6), "seac"), ((12, 7), "sbw"), ((12, 12), "div"), ((12, 16), "callothersubr"), ((12, 17), "pop"), ((12, 33), "setcurrentpoint"), ] class T2WidthExtractor(SimpleT2Decompiler): def __init__( self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None, blender=None, ): SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private, blender) self.nominalWidthX = nominalWidthX self.defaultWidthX = defaultWidthX def reset(self): SimpleT2Decompiler.reset(self) self.gotWidth = 0 self.width = 0 def popallWidth(self, evenOdd=0): args = self.popall() if not self.gotWidth: if evenOdd ^ (len(args) % 2): # For CFF2 charstrings, this should never happen assert ( self.defaultWidthX is not None ), "CFF2 CharStrings must not have an initial width value" self.width = self.nominalWidthX + args[0] args = args[1:] else: self.width = self.defaultWidthX self.gotWidth = 1 return args def countHints(self): args = self.popallWidth() self.hintCount = self.hintCount + len(args) // 2 def op_rmoveto(self, index): self.popallWidth() def op_hmoveto(self, index): self.popallWidth(1) def op_vmoveto(self, index): self.popallWidth(1) def op_endchar(self, index): self.popallWidth() class T2OutlineExtractor(T2WidthExtractor): def __init__( self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None, blender=None, ): T2WidthExtractor.__init__( self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private, blender, ) self.pen = pen self.subrLevel = 0 def reset(self): T2WidthExtractor.reset(self) self.currentPoint = (0, 0) self.sawMoveTo = 0 self.subrLevel = 0 def execute(self, charString): self.subrLevel += 1 super().execute(charString) self.subrLevel -= 1 if self.subrLevel == 0: self.endPath() def _nextPoint(self, point): x, y = self.currentPoint point = x + point[0], y + point[1] self.currentPoint = point return point def rMoveTo(self, point): self.pen.moveTo(self._nextPoint(point)) self.sawMoveTo = 1 def rLineTo(self, point): if not self.sawMoveTo: self.rMoveTo((0, 0)) self.pen.lineTo(self._nextPoint(point)) def rCurveTo(self, pt1, pt2, pt3): if not self.sawMoveTo: self.rMoveTo((0, 0)) nextPoint = self._nextPoint self.pen.curveTo(nextPoint(pt1), nextPoint(pt2), nextPoint(pt3)) def closePath(self): if self.sawMoveTo: self.pen.closePath() self.sawMoveTo = 0 def endPath(self): # In T2 there are no open paths, so always do a closePath when # finishing a sub path. We avoid spurious calls to closePath() # because its a real T1 op we're emulating in T2 whereas # endPath() is just a means to that emulation if self.sawMoveTo: self.closePath() # # hint operators # # def op_hstem(self, index): # self.countHints() # def op_vstem(self, index): # self.countHints() # def op_hstemhm(self, index): # self.countHints() # def op_vstemhm(self, index): # self.countHints() # def op_hintmask(self, index): # self.countHints() # def op_cntrmask(self, index): # self.countHints() # # path constructors, moveto # def op_rmoveto(self, index): self.endPath() self.rMoveTo(self.popallWidth()) def op_hmoveto(self, index): self.endPath() self.rMoveTo((self.popallWidth(1)[0], 0)) def op_vmoveto(self, index): self.endPath() self.rMoveTo((0, self.popallWidth(1)[0])) def op_endchar(self, index): self.endPath() args = self.popallWidth() if args: from fontTools.encodings.StandardEncoding import StandardEncoding # endchar can do seac accent bulding; The T2 spec says it's deprecated, # but recent software that shall remain nameless does output it. adx, ady, bchar, achar = args baseGlyph = StandardEncoding[bchar] self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0)) accentGlyph = StandardEncoding[achar] self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady)) # # path constructors, lines # def op_rlineto(self, index): args = self.popall() for i in range(0, len(args), 2): point = args[i : i + 2] self.rLineTo(point) def op_hlineto(self, index): self.alternatingLineto(1) def op_vlineto(self, index): self.alternatingLineto(0) # # path constructors, curves # def op_rrcurveto(self, index): """{dxa dya dxb dyb dxc dyc}+ rrcurveto""" args = self.popall() for i in range(0, len(args), 6): ( dxa, dya, dxb, dyb, dxc, dyc, ) = args[i : i + 6] self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc)) def op_rcurveline(self, index): """{dxa dya dxb dyb dxc dyc}+ dxd dyd rcurveline""" args = self.popall() for i in range(0, len(args) - 2, 6): dxb, dyb, dxc, dyc, dxd, dyd = args[i : i + 6] self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd)) self.rLineTo(args[-2:]) def op_rlinecurve(self, index): """{dxa dya}+ dxb dyb dxc dyc dxd dyd rlinecurve""" args = self.popall() lineArgs = args[:-6] for i in range(0, len(lineArgs), 2): self.rLineTo(lineArgs[i : i + 2]) dxb, dyb, dxc, dyc, dxd, dyd = args[-6:] self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd)) def op_vvcurveto(self, index): "dx1? {dya dxb dyb dyc}+ vvcurveto" args = self.popall() if len(args) % 2: dx1 = args[0] args = args[1:] else: dx1 = 0 for i in range(0, len(args), 4): dya, dxb, dyb, dyc = args[i : i + 4] self.rCurveTo((dx1, dya), (dxb, dyb), (0, dyc)) dx1 = 0 def op_hhcurveto(self, index): """dy1? {dxa dxb dyb dxc}+ hhcurveto""" args = self.popall() if len(args) % 2: dy1 = args[0] args = args[1:] else: dy1 = 0 for i in range(0, len(args), 4): dxa, dxb, dyb, dxc = args[i : i + 4] self.rCurveTo((dxa, dy1), (dxb, dyb), (dxc, 0)) dy1 = 0 def op_vhcurveto(self, index): """dy1 dx2 dy2 dx3 {dxa dxb dyb dyc dyd dxe dye dxf}* dyf? vhcurveto (30) {dya dxb dyb dxc dxd dxe dye dyf}+ dxf? vhcurveto """ args = self.popall() while args: args = self.vcurveto(args) if args: args = self.hcurveto(args) def op_hvcurveto(self, index): """dx1 dx2 dy2 dy3 {dya dxb dyb dxc dxd dxe dye dyf}* dxf? {dxa dxb dyb dyc dyd dxe dye dxf}+ dyf? """ args = self.popall() while args: args = self.hcurveto(args) if args: args = self.vcurveto(args) # # path constructors, flex # def op_hflex(self, index): dx1, dx2, dy2, dx3, dx4, dx5, dx6 = self.popall() dy1 = dy3 = dy4 = dy6 = 0 dy5 = -dy2 self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) def op_flex(self, index): dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, dx6, dy6, fd = self.popall() self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) def op_hflex1(self, index): dx1, dy1, dx2, dy2, dx3, dx4, dx5, dy5, dx6 = self.popall() dy3 = dy4 = 0 dy6 = -(dy1 + dy2 + dy3 + dy4 + dy5) self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) def op_flex1(self, index): dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, d6 = self.popall() dx = dx1 + dx2 + dx3 + dx4 + dx5 dy = dy1 + dy2 + dy3 + dy4 + dy5 if abs(dx) > abs(dy): dx6 = d6 dy6 = -dy else: dx6 = -dx dy6 = d6 self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) # misc def op_and(self, index): raise NotImplementedError def op_or(self, index): raise NotImplementedError def op_not(self, index): raise NotImplementedError def op_store(self, index): raise NotImplementedError def op_abs(self, index): raise NotImplementedError def op_add(self, index): raise NotImplementedError def op_sub(self, index): raise NotImplementedError def op_div(self, index): num2 = self.pop() num1 = self.pop() d1 = num1 // num2 d2 = num1 / num2 if d1 == d2: self.push(d1) else: self.push(d2) def op_load(self, index): raise NotImplementedError def op_neg(self, index): raise NotImplementedError def op_eq(self, index): raise NotImplementedError def op_drop(self, index): raise NotImplementedError def op_put(self, index): raise NotImplementedError def op_get(self, index): raise NotImplementedError def op_ifelse(self, index): raise NotImplementedError def op_random(self, index): raise NotImplementedError def op_mul(self, index): raise NotImplementedError def op_sqrt(self, index): raise NotImplementedError def op_dup(self, index): raise NotImplementedError def op_exch(self, index): raise NotImplementedError def op_index(self, index): raise NotImplementedError def op_roll(self, index): raise NotImplementedError # # miscellaneous helpers # def alternatingLineto(self, isHorizontal): args = self.popall() for arg in args: if isHorizontal: point = (arg, 0) else: point = (0, arg) self.rLineTo(point) isHorizontal = not isHorizontal def vcurveto(self, args): dya, dxb, dyb, dxc = args[:4] args = args[4:] if len(args) == 1: dyc = args[0] args = [] else: dyc = 0 self.rCurveTo((0, dya), (dxb, dyb), (dxc, dyc)) return args def hcurveto(self, args): dxa, dxb, dyb, dyc = args[:4] args = args[4:] if len(args) == 1: dxc = args[0] args = [] else: dxc = 0 self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc)) return args class T1OutlineExtractor(T2OutlineExtractor): def __init__(self, pen, subrs): self.pen = pen self.subrs = subrs self.reset() def reset(self): self.flexing = 0 self.width = 0 self.sbx = 0 T2OutlineExtractor.reset(self) def endPath(self): if self.sawMoveTo: self.pen.endPath() self.sawMoveTo = 0 def popallWidth(self, evenOdd=0): return self.popall() def exch(self): stack = self.operandStack stack[-1], stack[-2] = stack[-2], stack[-1] # # path constructors # def op_rmoveto(self, index): if self.flexing: return self.endPath() self.rMoveTo(self.popall()) def op_hmoveto(self, index): if self.flexing: # We must add a parameter to the stack if we are flexing self.push(0) return self.endPath() self.rMoveTo((self.popall()[0], 0)) def op_vmoveto(self, index): if self.flexing: # We must add a parameter to the stack if we are flexing self.push(0) self.exch() return self.endPath() self.rMoveTo((0, self.popall()[0])) def op_closepath(self, index): self.closePath() def op_setcurrentpoint(self, index): args = self.popall() x, y = args self.currentPoint = x, y def op_endchar(self, index): self.endPath() def op_hsbw(self, index): sbx, wx = self.popall() self.width = wx self.sbx = sbx self.currentPoint = sbx, self.currentPoint[1] def op_sbw(self, index): self.popall() # XXX # def op_callsubr(self, index): subrIndex = self.pop() subr = self.subrs[subrIndex] self.execute(subr) def op_callothersubr(self, index): subrIndex = self.pop() nArgs = self.pop() # print nArgs, subrIndex, "callothersubr" if subrIndex == 0 and nArgs == 3: self.doFlex() self.flexing = 0 elif subrIndex == 1 and nArgs == 0: self.flexing = 1 # ignore... def op_pop(self, index): pass # ignore... def doFlex(self): finaly = self.pop() finalx = self.pop() self.pop() # flex height is unused p3y = self.pop() p3x = self.pop() bcp4y = self.pop() bcp4x = self.pop() bcp3y = self.pop() bcp3x = self.pop() p2y = self.pop() p2x = self.pop() bcp2y = self.pop() bcp2x = self.pop() bcp1y = self.pop() bcp1x = self.pop() rpy = self.pop() rpx = self.pop() # call rrcurveto self.push(bcp1x + rpx) self.push(bcp1y + rpy) self.push(bcp2x) self.push(bcp2y) self.push(p2x) self.push(p2y) self.op_rrcurveto(None) # call rrcurveto self.push(bcp3x) self.push(bcp3y) self.push(bcp4x) self.push(bcp4y) self.push(p3x) self.push(p3y) self.op_rrcurveto(None) # Push back final coords so subr 0 can find them self.push(finalx) self.push(finaly) def op_dotsection(self, index): self.popall() # XXX def op_hstem3(self, index): self.popall() # XXX def op_seac(self, index): "asb adx ady bchar achar seac" from fontTools.encodings.StandardEncoding import StandardEncoding asb, adx, ady, bchar, achar = self.popall() baseGlyph = StandardEncoding[bchar] self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0)) accentGlyph = StandardEncoding[achar] adx = adx + self.sbx - asb # seac weirdness self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady)) def op_vstem3(self, index): self.popall() # XXX class T2CharString(object): operandEncoding = t2OperandEncoding operators, opcodes = buildOperatorDict(t2Operators) decompilerClass = SimpleT2Decompiler outlineExtractor = T2OutlineExtractor def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None): if program is None: program = [] self.bytecode = bytecode self.program = program self.private = private self.globalSubrs = globalSubrs if globalSubrs is not None else [] self._cur_vsindex = None def getNumRegions(self, vsindex=None): pd = self.private assert pd is not None if vsindex is not None: self._cur_vsindex = vsindex elif self._cur_vsindex is None: self._cur_vsindex = pd.vsindex if hasattr(pd, "vsindex") else 0 return pd.getNumRegions(self._cur_vsindex) def __repr__(self): if self.bytecode is None: return "<%s (source) at %x>" % (self.__class__.__name__, id(self)) else: return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self)) def getIntEncoder(self): return encodeIntT2 def getFixedEncoder(self): return encodeFixed def decompile(self): if not self.needsDecompilation(): return subrs = getattr(self.private, "Subrs", []) decompiler = self.decompilerClass(subrs, self.globalSubrs, self.private) decompiler.execute(self) def draw(self, pen, blender=None): subrs = getattr(self.private, "Subrs", []) extractor = self.outlineExtractor( pen, subrs, self.globalSubrs, self.private.nominalWidthX, self.private.defaultWidthX, self.private, blender, ) extractor.execute(self) self.width = extractor.width def calcBounds(self, glyphSet): boundsPen = BoundsPen(glyphSet) self.draw(boundsPen) return boundsPen.bounds def compile(self, isCFF2=False): if self.bytecode is not None: return opcodes = self.opcodes program = self.program if isCFF2: # If present, remove return and endchar operators. if program and program[-1] in ("return", "endchar"): program = program[:-1] elif program and not isinstance(program[-1], str): raise CharStringCompileError( "T2CharString or Subr has items on the stack after last operator." ) bytecode = [] encodeInt = self.getIntEncoder() encodeFixed = self.getFixedEncoder() i = 0 end = len(program) while i < end: token = program[i] i = i + 1 if isinstance(token, str): try: bytecode.extend(bytechr(b) for b in opcodes[token]) except KeyError: raise CharStringCompileError("illegal operator: %s" % token) if token in ("hintmask", "cntrmask"): bytecode.append(program[i]) # hint mask i = i + 1 elif isinstance(token, int): bytecode.append(encodeInt(token)) elif isinstance(token, float): bytecode.append(encodeFixed(token)) else: assert 0, "unsupported type: %s" % type(token) try: bytecode = bytesjoin(bytecode) except TypeError: log.error(bytecode) raise self.setBytecode(bytecode) def needsDecompilation(self): return self.bytecode is not None def setProgram(self, program): self.program = program self.bytecode = None def setBytecode(self, bytecode): self.bytecode = bytecode self.program = None def getToken(self, index, len=len, byteord=byteord, isinstance=isinstance): if self.bytecode is not None: if index >= len(self.bytecode): return None, 0, 0 b0 = byteord(self.bytecode[index]) index = index + 1 handler = self.operandEncoding[b0] token, index = handler(self, b0, self.bytecode, index) else: if index >= len(self.program): return None, 0, 0 token = self.program[index] index = index + 1 isOperator = isinstance(token, str) return token, isOperator, index def getBytes(self, index, nBytes): if self.bytecode is not None: newIndex = index + nBytes bytes = self.bytecode[index:newIndex] index = newIndex else: bytes = self.program[index] index = index + 1 assert len(bytes) == nBytes return bytes, index def handle_operator(self, operator): return operator def toXML(self, xmlWriter, ttFont=None): from fontTools.misc.textTools import num2binary if self.bytecode is not None: xmlWriter.dumphex(self.bytecode) else: index = 0 args = [] while True: token, isOperator, index = self.getToken(index) if token is None: break if isOperator: if token in ("hintmask", "cntrmask"): hintMask, isOperator, index = self.getToken(index) bits = [] for byte in hintMask: bits.append(num2binary(byteord(byte), 8)) hintMask = strjoin(bits) line = " ".join(args + [token, hintMask]) else: line = " ".join(args + [token]) xmlWriter.write(line) xmlWriter.newline() args = [] else: if isinstance(token, float): token = floatToFixedToStr(token, precisionBits=16) else: token = str(token) args.append(token) if args: # NOTE: only CFF2 charstrings/subrs can have numeric arguments on # the stack after the last operator. Compiling this would fail if # this is part of CFF 1.0 table. line = " ".join(args) xmlWriter.write(line) def fromXML(self, name, attrs, content): from fontTools.misc.textTools import binary2num, readHex if attrs.get("raw"): self.setBytecode(readHex(content)) return content = strjoin(content) content = content.split() program = [] end = len(content) i = 0 while i < end: token = content[i] i = i + 1 try: token = int(token) except ValueError: try: token = strToFixedToFloat(token, precisionBits=16) except ValueError: program.append(token) if token in ("hintmask", "cntrmask"): mask = content[i] maskBytes = b"" for j in range(0, len(mask), 8): maskBytes = maskBytes + bytechr(binary2num(mask[j : j + 8])) program.append(maskBytes) i = i + 1 else: program.append(token) else: program.append(token) self.setProgram(program) class T1CharString(T2CharString): operandEncoding = t1OperandEncoding operators, opcodes = buildOperatorDict(t1Operators) def __init__(self, bytecode=None, program=None, subrs=None): super().__init__(bytecode, program) self.subrs = subrs def getIntEncoder(self): return encodeIntT1 def getFixedEncoder(self): def encodeFixed(value): raise TypeError("Type 1 charstrings don't support floating point operands") def decompile(self): if self.bytecode is None: return program = [] index = 0 while True: token, isOperator, index = self.getToken(index) if token is None: break program.append(token) self.setProgram(program) def draw(self, pen): extractor = T1OutlineExtractor(pen, self.subrs) extractor.execute(self) self.width = extractor.width class DictDecompiler(object): operandEncoding = cffDictOperandEncoding def __init__(self, strings, parent=None): self.stack = [] self.strings = strings self.dict = {} self.parent = parent def getDict(self): assert len(self.stack) == 0, "non-empty stack" return self.dict def decompile(self, data): index = 0 lenData = len(data) push = self.stack.append while index < lenData: b0 = byteord(data[index]) index = index + 1 handler = self.operandEncoding[b0] value, index = handler(self, b0, data, index) if value is not None: push(value) def pop(self): value = self.stack[-1] del self.stack[-1] return value def popall(self): args = self.stack[:] del self.stack[:] return args def handle_operator(self, operator): operator, argType = operator if isinstance(argType, tuple): value = () for i in range(len(argType) - 1, -1, -1): arg = argType[i] arghandler = getattr(self, "arg_" + arg) value = (arghandler(operator),) + value else: arghandler = getattr(self, "arg_" + argType) value = arghandler(operator) if operator == "blend": self.stack.extend(value) else: self.dict[operator] = value def arg_number(self, name): if isinstance(self.stack[0], list): out = self.arg_blend_number(self.stack) else: out = self.pop() return out def arg_blend_number(self, name): out = [] blendArgs = self.pop() numMasters = len(blendArgs) out.append(blendArgs) out.append("blend") dummy = self.popall() return blendArgs def arg_SID(self, name): return self.strings[self.pop()] def arg_array(self, name): return self.popall() def arg_blendList(self, name): """ There may be non-blend args at the top of the stack. We first calculate where the blend args start in the stack. These are the last numMasters*numBlends) +1 args. The blend args starts with numMasters relative coordinate values, the BlueValues in the list from the default master font. This is followed by numBlends list of values. Each of value in one of these lists is the Variable Font delta for the matching region. We re-arrange this to be a list of numMaster entries. Each entry starts with the corresponding default font relative value, and is followed by the delta values. We then convert the default values, the first item in each entry, to an absolute value. """ vsindex = self.dict.get("vsindex", 0) numMasters = ( self.parent.getNumRegions(vsindex) + 1 ) # only a PrivateDict has blended ops. numBlends = self.pop() args = self.popall() numArgs = len(args) # The spec says that there should be no non-blended Blue Values,. assert numArgs == numMasters * numBlends value = [None] * numBlends numDeltas = numMasters - 1 i = 0 prevVal = 0 while i < numBlends: newVal = args[i] + prevVal prevVal = newVal masterOffset = numBlends + (i * numDeltas) blendList = [newVal] + args[masterOffset : masterOffset + numDeltas] value[i] = blendList i += 1 return value def arg_delta(self, name): valueList = self.popall() out = [] if valueList and isinstance(valueList[0], list): # arg_blendList() has already converted these to absolute values. out = valueList else: current = 0 for v in valueList: current = current + v out.append(current) return out def calcSubrBias(subrs): nSubrs = len(subrs) if nSubrs < 1240: bias = 107 elif nSubrs < 33900: bias = 1131 else: bias = 32768 return bias PKaZZZ[~$�C/C/fontTools/misc/psLib.pyfrom fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes, tostr from fontTools.misc import eexec from .psOperators import ( PSOperators, ps_StandardEncoding, ps_array, ps_boolean, ps_dict, ps_integer, ps_literal, ps_mark, ps_name, ps_operator, ps_procedure, ps_procmark, ps_real, ps_string, ) import re from collections.abc import Callable from string import whitespace import logging log = logging.getLogger(__name__) ps_special = b"()<>[]{}%" # / is one too, but we take care of that one differently skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"])) endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"]) endofthingRE = re.compile(endofthingPat) commentRE = re.compile(b"%[^\n\r]*") # XXX This not entirely correct as it doesn't allow *nested* embedded parens: stringPat = rb""" \( ( ( [^()]* \ [()] ) | ( [^()]* \( [^()]* \) ) )* [^()]* \) """ stringPat = b"".join(stringPat.split()) stringRE = re.compile(stringPat) hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"])) class PSTokenError(Exception): pass class PSError(Exception): pass class PSTokenizer(object): def __init__(self, buf=b"", encoding="ascii"): # Force self.buf to be a byte string buf = tobytes(buf) self.buf = buf self.len = len(buf) self.pos = 0 self.closed = False self.encoding = encoding def read(self, n=-1): """Read at most 'n' bytes from the buffer, or less if the read hits EOF before obtaining 'n' bytes. If 'n' is negative or omitted, read all data until EOF is reached. """ if self.closed: raise ValueError("I/O operation on closed file") if n is None or n < 0: newpos = self.len else: newpos = min(self.pos + n, self.len) r = self.buf[self.pos : newpos] self.pos = newpos return r def close(self): if not self.closed: self.closed = True del self.buf, self.pos def getnexttoken( self, # localize some stuff, for performance len=len, ps_special=ps_special, stringmatch=stringRE.match, hexstringmatch=hexstringRE.match, commentmatch=commentRE.match, endmatch=endofthingRE.match, ): self.skipwhite() if self.pos >= self.len: return None, None pos = self.pos buf = self.buf char = bytechr(byteord(buf[pos])) if char in ps_special: if char in b"{}[]": tokentype = "do_special" token = char elif char == b"%": tokentype = "do_comment" _, nextpos = commentmatch(buf, pos).span() token = buf[pos:nextpos] elif char == b"(": tokentype = "do_string" m = stringmatch(buf, pos) if m is None: raise PSTokenError("bad string at character %d" % pos) _, nextpos = m.span() token = buf[pos:nextpos] elif char == b"<": tokentype = "do_hexstring" m = hexstringmatch(buf, pos) if m is None: raise PSTokenError("bad hexstring at character %d" % pos) _, nextpos = m.span() token = buf[pos:nextpos] else: raise PSTokenError("bad token at character %d" % pos) else: if char == b"/": tokentype = "do_literal" m = endmatch(buf, pos + 1) else: tokentype = "" m = endmatch(buf, pos) if m is None: raise PSTokenError("bad token at character %d" % pos) _, nextpos = m.span() token = buf[pos:nextpos] self.pos = pos + len(token) token = tostr(token, encoding=self.encoding) return tokentype, token def skipwhite(self, whitematch=skipwhiteRE.match): _, nextpos = whitematch(self.buf, self.pos).span() self.pos = nextpos def starteexec(self): self.pos = self.pos + 1 self.dirtybuf = self.buf[self.pos :] self.buf, R = eexec.decrypt(self.dirtybuf, 55665) self.len = len(self.buf) self.pos = 4 def stopeexec(self): if not hasattr(self, "dirtybuf"): return self.buf = self.dirtybuf del self.dirtybuf class PSInterpreter(PSOperators): def __init__(self, encoding="ascii"): systemdict = {} userdict = {} self.encoding = encoding self.dictstack = [systemdict, userdict] self.stack = [] self.proclevel = 0 self.procmark = ps_procmark() self.fillsystemdict() def fillsystemdict(self): systemdict = self.dictstack[0] systemdict["["] = systemdict["mark"] = self.mark = ps_mark() systemdict["]"] = ps_operator("]", self.do_makearray) systemdict["true"] = ps_boolean(1) systemdict["false"] = ps_boolean(0) systemdict["StandardEncoding"] = ps_array(ps_StandardEncoding) systemdict["FontDirectory"] = ps_dict({}) self.suckoperators(systemdict, self.__class__) def suckoperators(self, systemdict, klass): for name in dir(klass): attr = getattr(self, name) if isinstance(attr, Callable) and name[:3] == "ps_": name = name[3:] systemdict[name] = ps_operator(name, attr) for baseclass in klass.__bases__: self.suckoperators(systemdict, baseclass) def interpret(self, data, getattr=getattr): tokenizer = self.tokenizer = PSTokenizer(data, self.encoding) getnexttoken = tokenizer.getnexttoken do_token = self.do_token handle_object = self.handle_object try: while 1: tokentype, token = getnexttoken() if not token: break if tokentype: handler = getattr(self, tokentype) object = handler(token) else: object = do_token(token) if object is not None: handle_object(object) tokenizer.close() self.tokenizer = None except: if self.tokenizer is not None: log.debug( "ps error:\n" "- - - - - - -\n" "%s\n" ">>>\n" "%s\n" "- - - - - - -", self.tokenizer.buf[self.tokenizer.pos - 50 : self.tokenizer.pos], self.tokenizer.buf[self.tokenizer.pos : self.tokenizer.pos + 50], ) raise def handle_object(self, object): if not (self.proclevel or object.literal or object.type == "proceduretype"): if object.type != "operatortype": object = self.resolve_name(object.value) if object.literal: self.push(object) else: if object.type == "proceduretype": self.call_procedure(object) else: object.function() else: self.push(object) def call_procedure(self, proc): handle_object = self.handle_object for item in proc.value: handle_object(item) def resolve_name(self, name): dictstack = self.dictstack for i in range(len(dictstack) - 1, -1, -1): if name in dictstack[i]: return dictstack[i][name] raise PSError("name error: " + str(name)) def do_token( self, token, int=int, float=float, ps_name=ps_name, ps_integer=ps_integer, ps_real=ps_real, ): try: num = int(token) except (ValueError, OverflowError): try: num = float(token) except (ValueError, OverflowError): if "#" in token: hashpos = token.find("#") try: base = int(token[:hashpos]) num = int(token[hashpos + 1 :], base) except (ValueError, OverflowError): return ps_name(token) else: return ps_integer(num) else: return ps_name(token) else: return ps_real(num) else: return ps_integer(num) def do_comment(self, token): pass def do_literal(self, token): return ps_literal(token[1:]) def do_string(self, token): return ps_string(token[1:-1]) def do_hexstring(self, token): hexStr = "".join(token[1:-1].split()) if len(hexStr) % 2: hexStr = hexStr + "0" cleanstr = [] for i in range(0, len(hexStr), 2): cleanstr.append(chr(int(hexStr[i : i + 2], 16))) cleanstr = "".join(cleanstr) return ps_string(cleanstr) def do_special(self, token): if token == "{": self.proclevel = self.proclevel + 1 return self.procmark elif token == "}": proc = [] while 1: topobject = self.pop() if topobject == self.procmark: break proc.append(topobject) self.proclevel = self.proclevel - 1 proc.reverse() return ps_procedure(proc) elif token == "[": return self.mark elif token == "]": return ps_name("]") else: raise PSTokenError("huh?") def push(self, object): self.stack.append(object) def pop(self, *types): stack = self.stack if not stack: raise PSError("stack underflow") object = stack[-1] if types: if object.type not in types: raise PSError( "typecheck, expected %s, found %s" % (repr(types), object.type) ) del stack[-1] return object def do_makearray(self): array = [] while 1: topobject = self.pop() if topobject == self.mark: break array.append(topobject) array.reverse() self.push(ps_array(array)) def close(self): """Remove circular references.""" del self.stack del self.dictstack def unpack_item(item): tp = type(item.value) if tp == dict: newitem = {} for key, value in item.value.items(): newitem[key] = unpack_item(value) elif tp == list: newitem = [None] * len(item.value) for i in range(len(item.value)): newitem[i] = unpack_item(item.value[i]) if item.type == "proceduretype": newitem = tuple(newitem) else: newitem = item.value return newitem def suckfont(data, encoding="ascii"): m = re.search(rb"/FontName\s+/([^ \t\n\r]+)\s+def", data) if m: fontName = m.group(1) fontName = fontName.decode() else: fontName = None interpreter = PSInterpreter(encoding=encoding) interpreter.interpret( b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop" ) interpreter.interpret(data) fontdir = interpreter.dictstack[0]["FontDirectory"].value if fontName in fontdir: rawfont = fontdir[fontName] else: # fall back, in case fontName wasn't found fontNames = list(fontdir.keys()) if len(fontNames) > 1: fontNames.remove("Helvetica") fontNames.sort() rawfont = fontdir[fontNames[0]] interpreter.close() return unpack_item(rawfont) PKaZZZҪ��T=T=fontTools/misc/psOperators.py_accessstrings = {0: "", 1: "readonly", 2: "executeonly", 3: "noaccess"} class ps_object(object): literal = 1 access = 0 value = None def __init__(self, value): self.value = value self.type = self.__class__.__name__[3:] + "type" def __repr__(self): return "<%s %s>" % (self.__class__.__name__[3:], repr(self.value)) class ps_operator(ps_object): literal = 0 def __init__(self, name, function): self.name = name self.function = function self.type = self.__class__.__name__[3:] + "type" def __repr__(self): return "<operator %s>" % self.name class ps_procedure(ps_object): literal = 0 def __repr__(self): return "<procedure>" def __str__(self): psstring = "{" for i in range(len(self.value)): if i: psstring = psstring + " " + str(self.value[i]) else: psstring = psstring + str(self.value[i]) return psstring + "}" class ps_name(ps_object): literal = 0 def __str__(self): if self.literal: return "/" + self.value else: return self.value class ps_literal(ps_object): def __str__(self): return "/" + self.value class ps_array(ps_object): def __str__(self): psstring = "[" for i in range(len(self.value)): item = self.value[i] access = _accessstrings[item.access] if access: access = " " + access if i: psstring = psstring + " " + str(item) + access else: psstring = psstring + str(item) + access return psstring + "]" def __repr__(self): return "<array>" _type1_pre_eexec_order = [ "FontInfo", "FontName", "Encoding", "PaintType", "FontType", "FontMatrix", "FontBBox", "UniqueID", "Metrics", "StrokeWidth", ] _type1_fontinfo_order = [ "version", "Notice", "FullName", "FamilyName", "Weight", "ItalicAngle", "isFixedPitch", "UnderlinePosition", "UnderlineThickness", ] _type1_post_eexec_order = ["Private", "CharStrings", "FID"] def _type1_item_repr(key, value): psstring = "" access = _accessstrings[value.access] if access: access = access + " " if key == "CharStrings": psstring = psstring + "/%s %s def\n" % ( key, _type1_CharString_repr(value.value), ) elif key == "Encoding": psstring = psstring + _type1_Encoding_repr(value, access) else: psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access) return psstring def _type1_Encoding_repr(encoding, access): encoding = encoding.value psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n" for i in range(256): name = encoding[i].value if name != ".notdef": psstring = psstring + "dup %d /%s put\n" % (i, name) return psstring + access + "def\n" def _type1_CharString_repr(charstrings): items = sorted(charstrings.items()) return "xxx" class ps_font(ps_object): def __str__(self): psstring = "%d dict dup begin\n" % len(self.value) for key in _type1_pre_eexec_order: try: value = self.value[key] except KeyError: pass else: psstring = psstring + _type1_item_repr(key, value) items = sorted(self.value.items()) for key, value in items: if key not in _type1_pre_eexec_order + _type1_post_eexec_order: psstring = psstring + _type1_item_repr(key, value) psstring = psstring + "currentdict end\ncurrentfile eexec\ndup " for key in _type1_post_eexec_order: try: value = self.value[key] except KeyError: pass else: psstring = psstring + _type1_item_repr(key, value) return ( psstring + "dup/FontName get exch definefont pop\nmark currentfile closefile\n" + 8 * (64 * "0" + "\n") + "cleartomark" + "\n" ) def __repr__(self): return "<font>" class ps_file(ps_object): pass class ps_dict(ps_object): def __str__(self): psstring = "%d dict dup begin\n" % len(self.value) items = sorted(self.value.items()) for key, value in items: access = _accessstrings[value.access] if access: access = access + " " psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access) return psstring + "end " def __repr__(self): return "<dict>" class ps_mark(ps_object): def __init__(self): self.value = "mark" self.type = self.__class__.__name__[3:] + "type" class ps_procmark(ps_object): def __init__(self): self.value = "procmark" self.type = self.__class__.__name__[3:] + "type" class ps_null(ps_object): def __init__(self): self.type = self.__class__.__name__[3:] + "type" class ps_boolean(ps_object): def __str__(self): if self.value: return "true" else: return "false" class ps_string(ps_object): def __str__(self): return "(%s)" % repr(self.value)[1:-1] class ps_integer(ps_object): def __str__(self): return repr(self.value) class ps_real(ps_object): def __str__(self): return repr(self.value) class PSOperators(object): def ps_def(self): obj = self.pop() name = self.pop() self.dictstack[-1][name.value] = obj def ps_bind(self): proc = self.pop("proceduretype") self.proc_bind(proc) self.push(proc) def proc_bind(self, proc): for i in range(len(proc.value)): item = proc.value[i] if item.type == "proceduretype": self.proc_bind(item) else: if not item.literal: try: obj = self.resolve_name(item.value) except: pass else: if obj.type == "operatortype": proc.value[i] = obj def ps_exch(self): if len(self.stack) < 2: raise RuntimeError("stack underflow") obj1 = self.pop() obj2 = self.pop() self.push(obj1) self.push(obj2) def ps_dup(self): if not self.stack: raise RuntimeError("stack underflow") self.push(self.stack[-1]) def ps_exec(self): obj = self.pop() if obj.type == "proceduretype": self.call_procedure(obj) else: self.handle_object(obj) def ps_count(self): self.push(ps_integer(len(self.stack))) def ps_eq(self): any1 = self.pop() any2 = self.pop() self.push(ps_boolean(any1.value == any2.value)) def ps_ne(self): any1 = self.pop() any2 = self.pop() self.push(ps_boolean(any1.value != any2.value)) def ps_cvx(self): obj = self.pop() obj.literal = 0 self.push(obj) def ps_matrix(self): matrix = [ ps_real(1.0), ps_integer(0), ps_integer(0), ps_real(1.0), ps_integer(0), ps_integer(0), ] self.push(ps_array(matrix)) def ps_string(self): num = self.pop("integertype").value self.push(ps_string("\0" * num)) def ps_type(self): obj = self.pop() self.push(ps_string(obj.type)) def ps_store(self): value = self.pop() key = self.pop() name = key.value for i in range(len(self.dictstack) - 1, -1, -1): if name in self.dictstack[i]: self.dictstack[i][name] = value break self.dictstack[-1][name] = value def ps_where(self): name = self.pop() # XXX self.push(ps_boolean(0)) def ps_systemdict(self): self.push(ps_dict(self.dictstack[0])) def ps_userdict(self): self.push(ps_dict(self.dictstack[1])) def ps_currentdict(self): self.push(ps_dict(self.dictstack[-1])) def ps_currentfile(self): self.push(ps_file(self.tokenizer)) def ps_eexec(self): f = self.pop("filetype").value f.starteexec() def ps_closefile(self): f = self.pop("filetype").value f.skipwhite() f.stopeexec() def ps_cleartomark(self): obj = self.pop() while obj != self.mark: obj = self.pop() def ps_readstring(self, ps_boolean=ps_boolean, len=len): s = self.pop("stringtype") oldstr = s.value f = self.pop("filetype") # pad = file.value.read(1) # for StringIO, this is faster f.value.pos = f.value.pos + 1 newstr = f.value.read(len(oldstr)) s.value = newstr self.push(s) self.push(ps_boolean(len(oldstr) == len(newstr))) def ps_known(self): key = self.pop() d = self.pop("dicttype", "fonttype") self.push(ps_boolean(key.value in d.value)) def ps_if(self): proc = self.pop("proceduretype") if self.pop("booleantype").value: self.call_procedure(proc) def ps_ifelse(self): proc2 = self.pop("proceduretype") proc1 = self.pop("proceduretype") if self.pop("booleantype").value: self.call_procedure(proc1) else: self.call_procedure(proc2) def ps_readonly(self): obj = self.pop() if obj.access < 1: obj.access = 1 self.push(obj) def ps_executeonly(self): obj = self.pop() if obj.access < 2: obj.access = 2 self.push(obj) def ps_noaccess(self): obj = self.pop() if obj.access < 3: obj.access = 3 self.push(obj) def ps_not(self): obj = self.pop("booleantype", "integertype") if obj.type == "booleantype": self.push(ps_boolean(not obj.value)) else: self.push(ps_integer(~obj.value)) def ps_print(self): str = self.pop("stringtype") print("PS output --->", str.value) def ps_anchorsearch(self): seek = self.pop("stringtype") s = self.pop("stringtype") seeklen = len(seek.value) if s.value[:seeklen] == seek.value: self.push(ps_string(s.value[seeklen:])) self.push(seek) self.push(ps_boolean(1)) else: self.push(s) self.push(ps_boolean(0)) def ps_array(self): num = self.pop("integertype") array = ps_array([None] * num.value) self.push(array) def ps_astore(self): array = self.pop("arraytype") for i in range(len(array.value) - 1, -1, -1): array.value[i] = self.pop() self.push(array) def ps_load(self): name = self.pop() self.push(self.resolve_name(name.value)) def ps_put(self): obj1 = self.pop() obj2 = self.pop() obj3 = self.pop("arraytype", "dicttype", "stringtype", "proceduretype") tp = obj3.type if tp == "arraytype" or tp == "proceduretype": obj3.value[obj2.value] = obj1 elif tp == "dicttype": obj3.value[obj2.value] = obj1 elif tp == "stringtype": index = obj2.value obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index + 1 :] def ps_get(self): obj1 = self.pop() if obj1.value == "Encoding": pass obj2 = self.pop( "arraytype", "dicttype", "stringtype", "proceduretype", "fonttype" ) tp = obj2.type if tp in ("arraytype", "proceduretype"): self.push(obj2.value[obj1.value]) elif tp in ("dicttype", "fonttype"): self.push(obj2.value[obj1.value]) elif tp == "stringtype": self.push(ps_integer(ord(obj2.value[obj1.value]))) else: assert False, "shouldn't get here" def ps_getinterval(self): obj1 = self.pop("integertype") obj2 = self.pop("integertype") obj3 = self.pop("arraytype", "stringtype") tp = obj3.type if tp == "arraytype": self.push(ps_array(obj3.value[obj2.value : obj2.value + obj1.value])) elif tp == "stringtype": self.push(ps_string(obj3.value[obj2.value : obj2.value + obj1.value])) def ps_putinterval(self): obj1 = self.pop("arraytype", "stringtype") obj2 = self.pop("integertype") obj3 = self.pop("arraytype", "stringtype") tp = obj3.type if tp == "arraytype": obj3.value[obj2.value : obj2.value + len(obj1.value)] = obj1.value elif tp == "stringtype": newstr = obj3.value[: obj2.value] newstr = newstr + obj1.value newstr = newstr + obj3.value[obj2.value + len(obj1.value) :] obj3.value = newstr def ps_cvn(self): self.push(ps_name(self.pop("stringtype").value)) def ps_index(self): n = self.pop("integertype").value if n < 0: raise RuntimeError("index may not be negative") self.push(self.stack[-1 - n]) def ps_for(self): proc = self.pop("proceduretype") limit = self.pop("integertype", "realtype").value increment = self.pop("integertype", "realtype").value i = self.pop("integertype", "realtype").value while 1: if increment > 0: if i > limit: break else: if i < limit: break if type(i) == type(0.0): self.push(ps_real(i)) else: self.push(ps_integer(i)) self.call_procedure(proc) i = i + increment def ps_forall(self): proc = self.pop("proceduretype") obj = self.pop("arraytype", "stringtype", "dicttype") tp = obj.type if tp == "arraytype": for item in obj.value: self.push(item) self.call_procedure(proc) elif tp == "stringtype": for item in obj.value: self.push(ps_integer(ord(item))) self.call_procedure(proc) elif tp == "dicttype": for key, value in obj.value.items(): self.push(ps_name(key)) self.push(value) self.call_procedure(proc) def ps_definefont(self): font = self.pop("dicttype") name = self.pop() font = ps_font(font.value) self.dictstack[0]["FontDirectory"].value[name.value] = font self.push(font) def ps_findfont(self): name = self.pop() font = self.dictstack[0]["FontDirectory"].value[name.value] self.push(font) def ps_pop(self): self.pop() def ps_dict(self): self.pop("integertype") self.push(ps_dict({})) def ps_begin(self): self.dictstack.append(self.pop("dicttype").value) def ps_end(self): if len(self.dictstack) > 2: del self.dictstack[-1] else: raise RuntimeError("dictstack underflow") notdef = ".notdef" from fontTools.encodings.StandardEncoding import StandardEncoding ps_StandardEncoding = list(map(ps_name, StandardEncoding)) PKaZZZ���X��fontTools/misc/py23.py"""Python 2/3 compat layer leftovers.""" import decimal as _decimal import math as _math import warnings from contextlib import redirect_stderr, redirect_stdout from io import BytesIO from io import StringIO as UnicodeIO from types import SimpleNamespace from .textTools import Tag, bytechr, byteord, bytesjoin, strjoin, tobytes, tostr warnings.warn( "The py23 module has been deprecated and will be removed in a future release. " "Please update your code.", DeprecationWarning, ) __all__ = [ "basestring", "bytechr", "byteord", "BytesIO", "bytesjoin", "open", "Py23Error", "range", "RecursionError", "round", "SimpleNamespace", "StringIO", "strjoin", "Tag", "tobytes", "tostr", "tounicode", "unichr", "unicode", "UnicodeIO", "xrange", "zip", ] class Py23Error(NotImplementedError): pass RecursionError = RecursionError StringIO = UnicodeIO basestring = str isclose = _math.isclose isfinite = _math.isfinite open = open range = range round = round3 = round unichr = chr unicode = str zip = zip tounicode = tostr def xrange(*args, **kwargs): raise Py23Error("'xrange' is not defined. Use 'range' instead.") def round2(number, ndigits=None): """ Implementation of Python 2 built-in round() function. Rounds a number to a given precision in decimal digits (default 0 digits). The result is a floating point number. Values are rounded to the closest multiple of 10 to the power minus ndigits; if two multiples are equally close, rounding is done away from 0. ndigits may be negative. See Python 2 documentation: https://docs.python.org/2/library/functions.html?highlight=round#round """ if ndigits is None: ndigits = 0 if ndigits < 0: exponent = 10 ** (-ndigits) quotient, remainder = divmod(number, exponent) if remainder >= exponent // 2 and number >= 0: quotient += 1 return float(quotient * exponent) else: exponent = _decimal.Decimal("10") ** (-ndigits) d = _decimal.Decimal.from_float(number).quantize( exponent, rounding=_decimal.ROUND_HALF_UP ) return float(d) PKaZZZԬ�e e fontTools/misc/roundTools.py""" Various round-to-integer helpers. """ import math import functools import logging log = logging.getLogger(__name__) __all__ = [ "noRound", "otRound", "maybeRound", "roundFunc", "nearestMultipleShortestRepr", ] def noRound(value): return value def otRound(value): """Round float value to nearest integer towards ``+Infinity``. The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_) defines the required method for converting floating point values to fixed-point. In particular it specifies the following rounding strategy: for fractional values of 0.5 and higher, take the next higher integer; for other fractional values, truncate. This function rounds the floating-point value according to this strategy in preparation for conversion to fixed-point. Args: value (float): The input floating-point value. Returns float: The rounded value. """ # See this thread for how we ended up with this implementation: # https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166 return int(math.floor(value + 0.5)) def maybeRound(v, tolerance, round=otRound): rounded = round(v) return rounded if abs(rounded - v) <= tolerance else v def roundFunc(tolerance, round=otRound): if tolerance < 0: raise ValueError("Rounding tolerance must be positive") if tolerance == 0: return noRound if tolerance >= 0.5: return round return functools.partial(maybeRound, tolerance=tolerance, round=round) def nearestMultipleShortestRepr(value: float, factor: float) -> str: """Round to nearest multiple of factor and return shortest decimal representation. This chooses the float that is closer to a multiple of the given factor while having the shortest decimal representation (the least number of fractional decimal digits). For example, given the following: >>> nearestMultipleShortestRepr(-0.61883544921875, 1.0/(1<<14)) '-0.61884' Useful when you need to serialize or print a fixed-point number (or multiples thereof, such as F2Dot14 fractions of 180 degrees in COLRv1 PaintRotate) in a human-readable form. Args: value (value): The value to be rounded and serialized. factor (float): The value which the result is a close multiple of. Returns: str: A compact string representation of the value. """ if not value: return "0.0" value = otRound(value / factor) * factor eps = 0.5 * factor lo = value - eps hi = value + eps # If the range of valid choices spans an integer, return the integer. if int(lo) != int(hi): return str(float(round(value))) fmt = "%.8f" lo = fmt % lo hi = fmt % hi assert len(lo) == len(hi) and lo != hi for i in range(len(lo)): if lo[i] != hi[i]: break period = lo.find(".") assert period < i fmt = "%%.%df" % (i - period) return fmt % value PKaZZZís�EEfontTools/misc/sstruct.py"""sstruct.py -- SuperStruct Higher level layer on top of the struct module, enabling to bind names to struct elements. The interface is similar to struct, except the objects passed and returned are not tuples (or argument lists), but dictionaries or instances. Just like struct, we use fmt strings to describe a data structure, except we use one line per element. Lines are separated by newlines or semi-colons. Each line contains either one of the special struct characters ('@', '=', '<', '>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f'). Repetitions, like the struct module offers them are not useful in this context, except for fixed length strings (eg. 'myInt:5h' is not allowed but 'myString:5s' is). The 'x' fmt character (pad byte) is treated as 'special', since it is by definition anonymous. Extra whitespace is allowed everywhere. The sstruct module offers one feature that the "normal" struct module doesn't: support for fixed point numbers. These are spelled as "n.mF", where n is the number of bits before the point, and m the number of bits after the point. Fixed point numbers get converted to floats. pack(fmt, object): 'object' is either a dictionary or an instance (or actually anything that has a __dict__ attribute). If it is a dictionary, its keys are used for names. If it is an instance, it's attributes are used to grab struct elements from. Returns a string containing the data. unpack(fmt, data, object=None) If 'object' is omitted (or None), a new dictionary will be returned. If 'object' is a dictionary, it will be used to add struct elements to. If it is an instance (or in fact anything that has a __dict__ attribute), an attribute will be added for each struct element. In the latter two cases, 'object' itself is returned. unpack2(fmt, data, object=None) Convenience function. Same as unpack, except data may be longer than needed. The returned value is a tuple: (object, leftoverdata). calcsize(fmt) like struct.calcsize(), but uses our own fmt strings: it returns the size of the data in bytes. """ from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi from fontTools.misc.textTools import tobytes, tostr import struct import re __version__ = "1.2" __copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>" class Error(Exception): pass def pack(fmt, obj): formatstring, names, fixes = getformat(fmt, keep_pad_byte=True) elements = [] if not isinstance(obj, dict): obj = obj.__dict__ for name in names: value = obj[name] if name in fixes: # fixed point conversion value = fl2fi(value, fixes[name]) elif isinstance(value, str): value = tobytes(value) elements.append(value) data = struct.pack(*(formatstring,) + tuple(elements)) return data def unpack(fmt, data, obj=None): if obj is None: obj = {} data = tobytes(data) formatstring, names, fixes = getformat(fmt) if isinstance(obj, dict): d = obj else: d = obj.__dict__ elements = struct.unpack(formatstring, data) for i in range(len(names)): name = names[i] value = elements[i] if name in fixes: # fixed point conversion value = fi2fl(value, fixes[name]) elif isinstance(value, bytes): try: value = tostr(value) except UnicodeDecodeError: pass d[name] = value return obj def unpack2(fmt, data, obj=None): length = calcsize(fmt) return unpack(fmt, data[:length], obj), data[length:] def calcsize(fmt): formatstring, names, fixes = getformat(fmt) return struct.calcsize(formatstring) # matches "name:formatchar" (whitespace is allowed) _elementRE = re.compile( r"\s*" # whitespace r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier) r"\s*:\s*" # whitespace : whitespace r"([xcbB?hHiIlLqQfd]|" # formatchar... r"[0-9]+[ps]|" # ...formatchar... r"([0-9]+)\.([0-9]+)(F))" # ...formatchar r"\s*" # whitespace r"(#.*)?$" # [comment] + end of string ) # matches the special struct fmt chars and 'x' (pad byte) _extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$") # matches an "empty" string, possibly containing whitespace and/or a comment _emptyRE = re.compile(r"\s*(#.*)?$") _fixedpointmappings = {8: "b", 16: "h", 32: "l"} _formatcache = {} def getformat(fmt, keep_pad_byte=False): fmt = tostr(fmt, encoding="ascii") try: formatstring, names, fixes = _formatcache[fmt] except KeyError: lines = re.split("[\n;]", fmt) formatstring = "" names = [] fixes = {} for line in lines: if _emptyRE.match(line): continue m = _extraRE.match(line) if m: formatchar = m.group(1) if formatchar != "x" and formatstring: raise Error("a special fmt char must be first") else: m = _elementRE.match(line) if not m: raise Error("syntax error in fmt: '%s'" % line) name = m.group(1) formatchar = m.group(2) if keep_pad_byte or formatchar != "x": names.append(name) if m.group(3): # fixed point before = int(m.group(3)) after = int(m.group(4)) bits = before + after if bits not in [8, 16, 32]: raise Error("fixed point must be 8, 16 or 32 bits long") formatchar = _fixedpointmappings[bits] assert m.group(5) == "F" fixes[name] = after formatstring = formatstring + formatchar _formatcache[fmt] = formatstring, names, fixes return formatstring, names, fixes def _test(): fmt = """ # comments are allowed > # big endian (see documentation for struct) # empty lines are allowed: ashort: h along: l abyte: b # a byte achar: c astr: 5s afloat: f; adouble: d # multiple "statements" are allowed afixed: 16.16F abool: ? apad: x """ print("size:", calcsize(fmt)) class foo(object): pass i = foo() i.ashort = 0x7FFF i.along = 0x7FFFFFFF i.abyte = 0x7F i.achar = "a" i.astr = "12345" i.afloat = 0.5 i.adouble = 0.5 i.afixed = 1.5 i.abool = True data = pack(fmt, i) print("data:", repr(data)) print(unpack(fmt, data)) i2 = foo() unpack(fmt, data, i2) print(vars(i2)) if __name__ == "__main__": _test() PKaZZZ ����fontTools/misc/symfont.pyfrom fontTools.pens.basePen import BasePen from functools import partial from itertools import count import sympy as sp import sys n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic t, x, y = sp.symbols("t x y", real=True) c = sp.symbols("c", real=False) # Complex representation instead of x/y X = tuple(sp.symbols("x:%d" % (n + 1), real=True)) Y = tuple(sp.symbols("y:%d" % (n + 1), real=True)) P = tuple(zip(*(sp.symbols("p:%d[%s]" % (n + 1, w), real=True) for w in "01"))) C = tuple(sp.symbols("c:%d" % (n + 1), real=False)) # Cubic Bernstein basis functions BinomialCoefficient = [(1, 0)] for i in range(1, n + 1): last = BinomialCoefficient[-1] this = tuple(last[j - 1] + last[j] for j in range(len(last))) + (0,) BinomialCoefficient.append(this) BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient) del last, this BernsteinPolynomial = tuple( tuple(c * t**i * (1 - t) ** (n - i) for i, c in enumerate(coeffs)) for n, coeffs in enumerate(BinomialCoefficient) ) BezierCurve = tuple( tuple( sum(P[i][j] * bernstein for i, bernstein in enumerate(bernsteins)) for j in range(2) ) for n, bernsteins in enumerate(BernsteinPolynomial) ) BezierCurveC = tuple( sum(C[i] * bernstein for i, bernstein in enumerate(bernsteins)) for n, bernsteins in enumerate(BernsteinPolynomial) ) def green(f, curveXY): f = -sp.integrate(sp.sympify(f), y) f = f.subs({x: curveXY[0], y: curveXY[1]}) f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1)) return f class _BezierFuncsLazy(dict): def __init__(self, symfunc): self._symfunc = symfunc self._bezfuncs = {} def __missing__(self, i): args = ["p%d" % d for d in range(i + 1)] f = green(self._symfunc, BezierCurve[i]) f = sp.gcd_terms(f.collect(sum(P, ()))) # Optimize return sp.lambdify(args, f) class GreenPen(BasePen): _BezierFuncs = {} @classmethod def _getGreenBezierFuncs(celf, func): funcstr = str(func) if not funcstr in celf._BezierFuncs: celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func) return celf._BezierFuncs[funcstr] def __init__(self, func, glyphset=None): BasePen.__init__(self, glyphset) self._funcs = self._getGreenBezierFuncs(func) self.value = 0 def _moveTo(self, p0): self.__startPoint = p0 def _closePath(self): p0 = self._getCurrentPoint() if p0 != self.__startPoint: self._lineTo(self.__startPoint) def _endPath(self): p0 = self._getCurrentPoint() if p0 != self.__startPoint: # Green theorem is not defined on open contours. raise NotImplementedError def _lineTo(self, p1): p0 = self._getCurrentPoint() self.value += self._funcs[1](p0, p1) def _qCurveToOne(self, p1, p2): p0 = self._getCurrentPoint() self.value += self._funcs[2](p0, p1, p2) def _curveToOne(self, p1, p2, p3): p0 = self._getCurrentPoint() self.value += self._funcs[3](p0, p1, p2, p3) # Sample pens. # Do not use this in real code. # Use fontTools.pens.momentsPen.MomentsPen instead. AreaPen = partial(GreenPen, func=1) MomentXPen = partial(GreenPen, func=x) MomentYPen = partial(GreenPen, func=y) MomentXXPen = partial(GreenPen, func=x * x) MomentYYPen = partial(GreenPen, func=y * y) MomentXYPen = partial(GreenPen, func=x * y) def printGreenPen(penName, funcs, file=sys.stdout, docstring=None): if docstring is not None: print('"""%s"""' % docstring) print( """from fontTools.pens.basePen import BasePen, OpenContourError try: import cython COMPILED = cython.compiled except (AttributeError, ImportError): # if cython not installed, use mock module with no-op decorators and types from fontTools.misc import cython COMPILED = False __all__ = ["%s"] class %s(BasePen): def __init__(self, glyphset=None): BasePen.__init__(self, glyphset) """ % (penName, penName), file=file, ) for name, f in funcs: print(" self.%s = 0" % name, file=file) print( """ def _moveTo(self, p0): self.__startPoint = p0 def _closePath(self): p0 = self._getCurrentPoint() if p0 != self.__startPoint: self._lineTo(self.__startPoint) def _endPath(self): p0 = self._getCurrentPoint() if p0 != self.__startPoint: # Green theorem is not defined on open contours. raise OpenContourError( "Green theorem is not defined on open contours." ) """, end="", file=file, ) for n in (1, 2, 3): subs = {P[i][j]: [X, Y][j][i] for i in range(n + 1) for j in range(2)} greens = [green(f, BezierCurve[n]) for name, f in funcs] greens = [sp.gcd_terms(f.collect(sum(P, ()))) for f in greens] # Optimize greens = [f.subs(subs) for f in greens] # Convert to p to x/y defs, exprs = sp.cse( greens, optimizations="basic", symbols=(sp.Symbol("r%d" % i) for i in count()), ) print() for name, value in defs: print(" @cython.locals(%s=cython.double)" % name, file=file) if n == 1: print( """\ @cython.locals(x0=cython.double, y0=cython.double) @cython.locals(x1=cython.double, y1=cython.double) def _lineTo(self, p1): x0,y0 = self._getCurrentPoint() x1,y1 = p1 """, file=file, ) elif n == 2: print( """\ @cython.locals(x0=cython.double, y0=cython.double) @cython.locals(x1=cython.double, y1=cython.double) @cython.locals(x2=cython.double, y2=cython.double) def _qCurveToOne(self, p1, p2): x0,y0 = self._getCurrentPoint() x1,y1 = p1 x2,y2 = p2 """, file=file, ) elif n == 3: print( """\ @cython.locals(x0=cython.double, y0=cython.double) @cython.locals(x1=cython.double, y1=cython.double) @cython.locals(x2=cython.double, y2=cython.double) @cython.locals(x3=cython.double, y3=cython.double) def _curveToOne(self, p1, p2, p3): x0,y0 = self._getCurrentPoint() x1,y1 = p1 x2,y2 = p2 x3,y3 = p3 """, file=file, ) for name, value in defs: print(" %s = %s" % (name, value), file=file) print(file=file) for name, value in zip([f[0] for f in funcs], exprs): print(" self.%s += %s" % (name, value), file=file) print( """ if __name__ == '__main__': from fontTools.misc.symfont import x, y, printGreenPen printGreenPen('%s', [""" % penName, file=file, ) for name, f in funcs: print(" ('%s', %s)," % (name, str(f)), file=file) print(" ])", file=file) if __name__ == "__main__": pen = AreaPen() pen.moveTo((100, 100)) pen.lineTo((100, 200)) pen.lineTo((200, 200)) pen.curveTo((200, 250), (300, 300), (250, 350)) pen.lineTo((200, 100)) pen.closePath() print(pen.value) PKaZZZ0���fontTools/misc/testTools.py"""Helpers for writing unit tests.""" from collections.abc import Iterable from io import BytesIO import os import re import shutil import sys import tempfile from unittest import TestCase as _TestCase from fontTools.config import Config from fontTools.misc.textTools import tobytes from fontTools.misc.xmlWriter import XMLWriter def parseXML(xmlSnippet): """Parses a snippet of XML. Input can be either a single string (unicode or UTF-8 bytes), or a a sequence of strings. The result is in the same format that would be returned by XMLReader, but the parser imposes no constraints on the root element so it can be called on small snippets of TTX files. """ # To support snippets with multiple elements, we add a fake root. reader = TestXMLReader_() xml = b"<root>" if isinstance(xmlSnippet, bytes): xml += xmlSnippet elif isinstance(xmlSnippet, str): xml += tobytes(xmlSnippet, "utf-8") elif isinstance(xmlSnippet, Iterable): xml += b"".join(tobytes(s, "utf-8") for s in xmlSnippet) else: raise TypeError( "expected string or sequence of strings; found %r" % type(xmlSnippet).__name__ ) xml += b"</root>" reader.parser.Parse(xml, 0) return reader.root[2] def parseXmlInto(font, parseInto, xmlSnippet): parsed_xml = [e for e in parseXML(xmlSnippet.strip()) if not isinstance(e, str)] for name, attrs, content in parsed_xml: parseInto.fromXML(name, attrs, content, font) parseInto.populateDefaults() return parseInto class FakeFont: def __init__(self, glyphs): self.glyphOrder_ = glyphs self.reverseGlyphOrderDict_ = {g: i for i, g in enumerate(glyphs)} self.lazy = False self.tables = {} self.cfg = Config() def __getitem__(self, tag): return self.tables[tag] def __setitem__(self, tag, table): self.tables[tag] = table def get(self, tag, default=None): return self.tables.get(tag, default) def getGlyphID(self, name): return self.reverseGlyphOrderDict_[name] def getGlyphIDMany(self, lst): return [self.getGlyphID(gid) for gid in lst] def getGlyphName(self, glyphID): if glyphID < len(self.glyphOrder_): return self.glyphOrder_[glyphID] else: return "glyph%.5d" % glyphID def getGlyphNameMany(self, lst): return [self.getGlyphName(gid) for gid in lst] def getGlyphOrder(self): return self.glyphOrder_ def getReverseGlyphMap(self): return self.reverseGlyphOrderDict_ def getGlyphNames(self): return sorted(self.getGlyphOrder()) class TestXMLReader_(object): def __init__(self): from xml.parsers.expat import ParserCreate self.parser = ParserCreate() self.parser.StartElementHandler = self.startElement_ self.parser.EndElementHandler = self.endElement_ self.parser.CharacterDataHandler = self.addCharacterData_ self.root = None self.stack = [] def startElement_(self, name, attrs): element = (name, attrs, []) if self.stack: self.stack[-1][2].append(element) else: self.root = element self.stack.append(element) def endElement_(self, name): self.stack.pop() def addCharacterData_(self, data): self.stack[-1][2].append(data) def makeXMLWriter(newlinestr="\n"): # don't write OS-specific new lines writer = XMLWriter(BytesIO(), newlinestr=newlinestr) # erase XML declaration writer.file.seek(0) writer.file.truncate() return writer def getXML(func, ttFont=None): """Call the passed toXML function and return the written content as a list of lines (unicode strings). Result is stripped of XML declaration and OS-specific newline characters. """ writer = makeXMLWriter() func(writer, ttFont) xml = writer.file.getvalue().decode("utf-8") # toXML methods must always end with a writer.newline() assert xml.endswith("\n") return xml.splitlines() def stripVariableItemsFromTTX( string: str, ttLibVersion: bool = True, checkSumAdjustment: bool = True, modified: bool = True, created: bool = True, sfntVersion: bool = False, # opt-in only ) -> str: """Strip stuff like ttLibVersion, checksums, timestamps, etc. from TTX dumps.""" # ttlib changes with the fontTools version if ttLibVersion: string = re.sub(' ttLibVersion="[^"]+"', "", string) # sometimes (e.g. some subsetter tests) we don't care whether it's OTF or TTF if sfntVersion: string = re.sub(' sfntVersion="[^"]+"', "", string) # head table checksum and creation and mod date changes with each save. if checkSumAdjustment: string = re.sub('<checkSumAdjustment value="[^"]+"/>', "", string) if modified: string = re.sub('<modified value="[^"]+"/>', "", string) if created: string = re.sub('<created value="[^"]+"/>', "", string) return string class MockFont(object): """A font-like object that automatically adds any looked up glyphname to its glyphOrder.""" def __init__(self): self._glyphOrder = [".notdef"] class AllocatingDict(dict): def __missing__(reverseDict, key): self._glyphOrder.append(key) gid = len(reverseDict) reverseDict[key] = gid return gid self._reverseGlyphOrder = AllocatingDict({".notdef": 0}) self.lazy = False def getGlyphID(self, glyph): gid = self._reverseGlyphOrder[glyph] return gid def getReverseGlyphMap(self): return self._reverseGlyphOrder def getGlyphName(self, gid): return self._glyphOrder[gid] def getGlyphOrder(self): return self._glyphOrder class TestCase(_TestCase): def __init__(self, methodName): _TestCase.__init__(self, methodName) # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, # and fires deprecation warnings if a program uses the old name. if not hasattr(self, "assertRaisesRegex"): self.assertRaisesRegex = self.assertRaisesRegexp class DataFilesHandler(TestCase): def setUp(self): self.tempdir = None self.num_tempfiles = 0 def tearDown(self): if self.tempdir: shutil.rmtree(self.tempdir) def getpath(self, testfile): folder = os.path.dirname(sys.modules[self.__module__].__file__) return os.path.join(folder, "data", testfile) def temp_dir(self): if not self.tempdir: self.tempdir = tempfile.mkdtemp() def temp_font(self, font_path, file_name): self.temp_dir() temppath = os.path.join(self.tempdir, file_name) shutil.copy2(font_path, temppath) return temppath PKaZZZҔ��1 1 fontTools/misc/textTools.py"""fontTools.misc.textTools.py -- miscellaneous routines.""" import ast import string # alias kept for backward compatibility safeEval = ast.literal_eval class Tag(str): @staticmethod def transcode(blob): if isinstance(blob, bytes): blob = blob.decode("latin-1") return blob def __new__(self, content): return str.__new__(self, self.transcode(content)) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): return str.__eq__(self, self.transcode(other)) def __hash__(self): return str.__hash__(self) def tobytes(self): return self.encode("latin-1") def readHex(content): """Convert a list of hex strings to binary data.""" return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, str))) def deHexStr(hexdata): """Convert a hex string to binary data.""" hexdata = strjoin(hexdata.split()) if len(hexdata) % 2: hexdata = hexdata + "0" data = [] for i in range(0, len(hexdata), 2): data.append(bytechr(int(hexdata[i : i + 2], 16))) return bytesjoin(data) def hexStr(data): """Convert binary data to a hex string.""" h = string.hexdigits r = "" for c in data: i = byteord(c) r = r + h[(i >> 4) & 0xF] + h[i & 0xF] return r def num2binary(l, bits=32): items = [] binary = "" for i in range(bits): if l & 0x1: binary = "1" + binary else: binary = "0" + binary l = l >> 1 if not ((i + 1) % 8): items.append(binary) binary = "" if binary: items.append(binary) items.reverse() assert l in (0, -1), "number doesn't fit in number of bits" return " ".join(items) def binary2num(bin): bin = strjoin(bin.split()) l = 0 for digit in bin: l = l << 1 if digit != "0": l = l | 0x1 return l def caselessSort(alist): """Return a sorted copy of a list. If there are only strings in the list, it will not consider case. """ try: return sorted(alist, key=lambda a: (a.lower(), a)) except TypeError: return sorted(alist) def pad(data, size): r"""Pad byte string 'data' with null bytes until its length is a multiple of 'size'. >>> len(pad(b'abcd', 4)) 4 >>> len(pad(b'abcde', 2)) 6 >>> len(pad(b'abcde', 4)) 8 >>> pad(b'abcdef', 4) == b'abcdef\x00\x00' True """ data = tobytes(data) if size > 1: remainder = len(data) % size if remainder: data += b"\0" * (size - remainder) return data def tostr(s, encoding="ascii", errors="strict"): if not isinstance(s, str): return s.decode(encoding, errors) else: return s def tobytes(s, encoding="ascii", errors="strict"): if isinstance(s, str): return s.encode(encoding, errors) else: return bytes(s) def bytechr(n): return bytes([n]) def byteord(c): return c if isinstance(c, int) else ord(c) def strjoin(iterable, joiner=""): return tostr(joiner).join(iterable) def bytesjoin(iterable, joiner=b""): return tobytes(joiner).join(tobytes(item) for item in iterable) if __name__ == "__main__": import doctest, sys sys.exit(doctest.testmod().failed) PKaZZZN��?��fontTools/misc/timeTools.py"""fontTools.misc.timeTools.py -- tools for working with OpenType timestamps. """ import os import time from datetime import datetime, timezone import calendar epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0)) DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] MONTHNAMES = [ None, "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ] def asctime(t=None): """ Convert a tuple or struct_time representing a time as returned by gmtime() or localtime() to a 24-character string of the following form: >>> asctime(time.gmtime(0)) 'Thu Jan 1 00:00:00 1970' If t is not provided, the current time as returned by localtime() is used. Locale information is not used by asctime(). This is meant to normalise the output of the built-in time.asctime() across different platforms and Python versions. In Python 3.x, the day of the month is right-justified, whereas on Windows Python 2.7 it is padded with zeros. See https://github.com/fonttools/fonttools/issues/455 """ if t is None: t = time.localtime() s = "%s %s %2s %s" % ( DAYNAMES[t.tm_wday], MONTHNAMES[t.tm_mon], t.tm_mday, time.strftime("%H:%M:%S %Y", t), ) return s def timestampToString(value): return asctime(time.gmtime(max(0, value + epoch_diff))) def timestampFromString(value): wkday, mnth = value[:7].split() t = datetime.strptime(value[7:], " %d %H:%M:%S %Y") t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc) wkday_idx = DAYNAMES.index(wkday) assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday' return int(t.timestamp()) - epoch_diff def timestampNow(): # https://reproducible-builds.org/specs/source-date-epoch/ source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH") if source_date_epoch is not None: return int(source_date_epoch) - epoch_diff return int(time.time() - epoch_diff) def timestampSinceEpoch(value): return int(value - epoch_diff) if __name__ == "__main__": import sys import doctest sys.exit(doctest.testmod().failed) PKaZZZ�xԅ�8�8fontTools/misc/transform.py"""Affine 2D transformation matrix class. The Transform class implements various transformation matrix operations, both on the matrix itself, as well as on 2D coordinates. Transform instances are effectively immutable: all methods that operate on the transformation itself always return a new instance. This has as the interesting side effect that Transform instances are hashable, ie. they can be used as dictionary keys. This module exports the following symbols: Transform this is the main class Identity Transform instance set to the identity transformation Offset Convenience function that returns a translating transformation Scale Convenience function that returns a scaling transformation The DecomposedTransform class implements a transformation with separate translate, rotation, scale, skew, and transformation-center components. :Example: >>> t = Transform(2, 0, 0, 3, 0, 0) >>> t.transformPoint((100, 100)) (200, 300) >>> t = Scale(2, 3) >>> t.transformPoint((100, 100)) (200, 300) >>> t.transformPoint((0, 0)) (0, 0) >>> t = Offset(2, 3) >>> t.transformPoint((100, 100)) (102, 103) >>> t.transformPoint((0, 0)) (2, 3) >>> t2 = t.scale(0.5) >>> t2.transformPoint((100, 100)) (52.0, 53.0) >>> import math >>> t3 = t2.rotate(math.pi / 2) >>> t3.transformPoint((0, 0)) (2.0, 3.0) >>> t3.transformPoint((100, 100)) (-48.0, 53.0) >>> t = Identity.scale(0.5).translate(100, 200).skew(0.1, 0.2) >>> t.transformPoints([(0, 0), (1, 1), (100, 100)]) [(50.0, 100.0), (50.550167336042726, 100.60135501775433), (105.01673360427253, 160.13550177543362)] >>> """ import math from typing import NamedTuple from dataclasses import dataclass __all__ = ["Transform", "Identity", "Offset", "Scale", "DecomposedTransform"] _EPSILON = 1e-15 _ONE_EPSILON = 1 - _EPSILON _MINUS_ONE_EPSILON = -1 + _EPSILON def _normSinCos(v): if abs(v) < _EPSILON: v = 0 elif v > _ONE_EPSILON: v = 1 elif v < _MINUS_ONE_EPSILON: v = -1 return v class Transform(NamedTuple): """2x2 transformation matrix plus offset, a.k.a. Affine transform. Transform instances are immutable: all transforming methods, eg. rotate(), return a new Transform instance. :Example: >>> t = Transform() >>> t <Transform [1 0 0 1 0 0]> >>> t.scale(2) <Transform [2 0 0 2 0 0]> >>> t.scale(2.5, 5.5) <Transform [2.5 0 0 5.5 0 0]> >>> >>> t.scale(2, 3).transformPoint((100, 100)) (200, 300) Transform's constructor takes six arguments, all of which are optional, and can be used as keyword arguments:: >>> Transform(12) <Transform [12 0 0 1 0 0]> >>> Transform(dx=12) <Transform [1 0 0 1 12 0]> >>> Transform(yx=12) <Transform [1 0 12 1 0 0]> Transform instances also behave like sequences of length 6:: >>> len(Identity) 6 >>> list(Identity) [1, 0, 0, 1, 0, 0] >>> tuple(Identity) (1, 0, 0, 1, 0, 0) Transform instances are comparable:: >>> t1 = Identity.scale(2, 3).translate(4, 6) >>> t2 = Identity.translate(8, 18).scale(2, 3) >>> t1 == t2 1 But beware of floating point rounding errors:: >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) >>> t1 <Transform [0.2 0 0 0.3 0.08 0.18]> >>> t2 <Transform [0.2 0 0 0.3 0.08 0.18]> >>> t1 == t2 0 Transform instances are hashable, meaning you can use them as keys in dictionaries:: >>> d = {Scale(12, 13): None} >>> d {<Transform [12 0 0 13 0 0]>: None} But again, beware of floating point rounding errors:: >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) >>> t1 <Transform [0.2 0 0 0.3 0.08 0.18]> >>> t2 <Transform [0.2 0 0 0.3 0.08 0.18]> >>> d = {t1: None} >>> d {<Transform [0.2 0 0 0.3 0.08 0.18]>: None} >>> d[t2] Traceback (most recent call last): File "<stdin>", line 1, in ? KeyError: <Transform [0.2 0 0 0.3 0.08 0.18]> """ xx: float = 1 xy: float = 0 yx: float = 0 yy: float = 1 dx: float = 0 dy: float = 0 def transformPoint(self, p): """Transform a point. :Example: >>> t = Transform() >>> t = t.scale(2.5, 5.5) >>> t.transformPoint((100, 100)) (250.0, 550.0) """ (x, y) = p xx, xy, yx, yy, dx, dy = self return (xx * x + yx * y + dx, xy * x + yy * y + dy) def transformPoints(self, points): """Transform a list of points. :Example: >>> t = Scale(2, 3) >>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)]) [(0, 0), (0, 300), (200, 300), (200, 0)] >>> """ xx, xy, yx, yy, dx, dy = self return [(xx * x + yx * y + dx, xy * x + yy * y + dy) for x, y in points] def transformVector(self, v): """Transform an (dx, dy) vector, treating translation as zero. :Example: >>> t = Transform(2, 0, 0, 2, 10, 20) >>> t.transformVector((3, -4)) (6, -8) >>> """ (dx, dy) = v xx, xy, yx, yy = self[:4] return (xx * dx + yx * dy, xy * dx + yy * dy) def transformVectors(self, vectors): """Transform a list of (dx, dy) vector, treating translation as zero. :Example: >>> t = Transform(2, 0, 0, 2, 10, 20) >>> t.transformVectors([(3, -4), (5, -6)]) [(6, -8), (10, -12)] >>> """ xx, xy, yx, yy = self[:4] return [(xx * dx + yx * dy, xy * dx + yy * dy) for dx, dy in vectors] def translate(self, x=0, y=0): """Return a new transformation, translated (offset) by x, y. :Example: >>> t = Transform() >>> t.translate(20, 30) <Transform [1 0 0 1 20 30]> >>> """ return self.transform((1, 0, 0, 1, x, y)) def scale(self, x=1, y=None): """Return a new transformation, scaled by x, y. The 'y' argument may be None, which implies to use the x value for y as well. :Example: >>> t = Transform() >>> t.scale(5) <Transform [5 0 0 5 0 0]> >>> t.scale(5, 6) <Transform [5 0 0 6 0 0]> >>> """ if y is None: y = x return self.transform((x, 0, 0, y, 0, 0)) def rotate(self, angle): """Return a new transformation, rotated by 'angle' (radians). :Example: >>> import math >>> t = Transform() >>> t.rotate(math.pi / 2) <Transform [0 1 -1 0 0 0]> >>> """ import math c = _normSinCos(math.cos(angle)) s = _normSinCos(math.sin(angle)) return self.transform((c, s, -s, c, 0, 0)) def skew(self, x=0, y=0): """Return a new transformation, skewed by x and y. :Example: >>> import math >>> t = Transform() >>> t.skew(math.pi / 4) <Transform [1 0 1 1 0 0]> >>> """ import math return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0)) def transform(self, other): """Return a new transformation, transformed by another transformation. :Example: >>> t = Transform(2, 0, 0, 3, 1, 6) >>> t.transform((4, 3, 2, 1, 5, 6)) <Transform [8 9 4 3 11 24]> >>> """ xx1, xy1, yx1, yy1, dx1, dy1 = other xx2, xy2, yx2, yy2, dx2, dy2 = self return self.__class__( xx1 * xx2 + xy1 * yx2, xx1 * xy2 + xy1 * yy2, yx1 * xx2 + yy1 * yx2, yx1 * xy2 + yy1 * yy2, xx2 * dx1 + yx2 * dy1 + dx2, xy2 * dx1 + yy2 * dy1 + dy2, ) def reverseTransform(self, other): """Return a new transformation, which is the other transformation transformed by self. self.reverseTransform(other) is equivalent to other.transform(self). :Example: >>> t = Transform(2, 0, 0, 3, 1, 6) >>> t.reverseTransform((4, 3, 2, 1, 5, 6)) <Transform [8 6 6 3 21 15]> >>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6)) <Transform [8 6 6 3 21 15]> >>> """ xx1, xy1, yx1, yy1, dx1, dy1 = self xx2, xy2, yx2, yy2, dx2, dy2 = other return self.__class__( xx1 * xx2 + xy1 * yx2, xx1 * xy2 + xy1 * yy2, yx1 * xx2 + yy1 * yx2, yx1 * xy2 + yy1 * yy2, xx2 * dx1 + yx2 * dy1 + dx2, xy2 * dx1 + yy2 * dy1 + dy2, ) def inverse(self): """Return the inverse transformation. :Example: >>> t = Identity.translate(2, 3).scale(4, 5) >>> t.transformPoint((10, 20)) (42, 103) >>> it = t.inverse() >>> it.transformPoint((42, 103)) (10.0, 20.0) >>> """ if self == Identity: return self xx, xy, yx, yy, dx, dy = self det = xx * yy - yx * xy xx, xy, yx, yy = yy / det, -xy / det, -yx / det, xx / det dx, dy = -xx * dx - yx * dy, -xy * dx - yy * dy return self.__class__(xx, xy, yx, yy, dx, dy) def toPS(self): """Return a PostScript representation :Example: >>> t = Identity.scale(2, 3).translate(4, 5) >>> t.toPS() '[2 0 0 3 8 15]' >>> """ return "[%s %s %s %s %s %s]" % self def toDecomposed(self) -> "DecomposedTransform": """Decompose into a DecomposedTransform.""" return DecomposedTransform.fromTransform(self) def __bool__(self): """Returns True if transform is not identity, False otherwise. :Example: >>> bool(Identity) False >>> bool(Transform()) False >>> bool(Scale(1.)) False >>> bool(Scale(2)) True >>> bool(Offset()) False >>> bool(Offset(0)) False >>> bool(Offset(2)) True """ return self != Identity def __repr__(self): return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) + self) Identity = Transform() def Offset(x=0, y=0): """Return the identity transformation offset by x, y. :Example: >>> Offset(2, 3) <Transform [1 0 0 1 2 3]> >>> """ return Transform(1, 0, 0, 1, x, y) def Scale(x, y=None): """Return the identity transformation scaled by x, y. The 'y' argument may be None, which implies to use the x value for y as well. :Example: >>> Scale(2, 3) <Transform [2 0 0 3 0 0]> >>> """ if y is None: y = x return Transform(x, 0, 0, y, 0, 0) @dataclass class DecomposedTransform: """The DecomposedTransform class implements a transformation with separate translate, rotation, scale, skew, and transformation-center components. """ translateX: float = 0 translateY: float = 0 rotation: float = 0 # in degrees, counter-clockwise scaleX: float = 1 scaleY: float = 1 skewX: float = 0 # in degrees, clockwise skewY: float = 0 # in degrees, counter-clockwise tCenterX: float = 0 tCenterY: float = 0 @classmethod def fromTransform(self, transform): # Adapted from an answer on # https://math.stackexchange.com/questions/13150/extracting-rotation-scale-values-from-2d-transformation-matrix a, b, c, d, x, y = transform sx = math.copysign(1, a) if sx < 0: a *= sx b *= sx delta = a * d - b * c rotation = 0 scaleX = scaleY = 0 skewX = skewY = 0 # Apply the QR-like decomposition. if a != 0 or b != 0: r = math.sqrt(a * a + b * b) rotation = math.acos(a / r) if b >= 0 else -math.acos(a / r) scaleX, scaleY = (r, delta / r) skewX, skewY = (math.atan((a * c + b * d) / (r * r)), 0) elif c != 0 or d != 0: s = math.sqrt(c * c + d * d) rotation = math.pi / 2 - ( math.acos(-c / s) if d >= 0 else -math.acos(c / s) ) scaleX, scaleY = (delta / s, s) skewX, skewY = (0, math.atan((a * c + b * d) / (s * s))) else: # a = b = c = d = 0 pass return DecomposedTransform( x, y, math.degrees(rotation), scaleX * sx, scaleY, math.degrees(skewX) * sx, math.degrees(skewY), 0, 0, ) def toTransform(self): """Return the Transform() equivalent of this transformation. :Example: >>> DecomposedTransform(scaleX=2, scaleY=2).toTransform() <Transform [2 0 0 2 0 0]> >>> """ t = Transform() t = t.translate( self.translateX + self.tCenterX, self.translateY + self.tCenterY ) t = t.rotate(math.radians(self.rotation)) t = t.scale(self.scaleX, self.scaleY) t = t.skew(math.radians(self.skewX), math.radians(self.skewY)) t = t.translate(-self.tCenterX, -self.tCenterY) return t if __name__ == "__main__": import sys import doctest sys.exit(doctest.testmod().failed) PKaZZZ�"^��fontTools/misc/treeTools.py"""Generic tools for working with trees.""" from math import ceil, log def build_n_ary_tree(leaves, n): """Build N-ary tree from sequence of leaf nodes. Return a list of lists where each non-leaf node is a list containing max n nodes. """ if not leaves: return [] assert n > 1 depth = ceil(log(len(leaves), n)) if depth <= 1: return list(leaves) # Fully populate complete subtrees of root until we have enough leaves left root = [] unassigned = None full_step = n ** (depth - 1) for i in range(0, len(leaves), full_step): subtree = leaves[i : i + full_step] if len(subtree) < full_step: unassigned = subtree break while len(subtree) > n: subtree = [subtree[k : k + n] for k in range(0, len(subtree), n)] root.append(subtree) if unassigned: # Recurse to fill the last subtree, which is the only partially populated one subtree = build_n_ary_tree(unassigned, n) if len(subtree) <= n - len(root): # replace last subtree with its children if they can still fit root.extend(subtree) else: root.append(subtree) assert len(root) <= n return root PKaZZZPK����fontTools/misc/vector.pyfrom numbers import Number import math import operator import warnings __all__ = ["Vector"] class Vector(tuple): """A math-like vector. Represents an n-dimensional numeric vector. ``Vector`` objects support vector addition and subtraction, scalar multiplication and division, negation, rounding, and comparison tests. """ __slots__ = () def __new__(cls, values, keep=False): if keep is not False: warnings.warn( "the 'keep' argument has been deprecated", DeprecationWarning, ) if type(values) == Vector: # No need to create a new object return values return super().__new__(cls, values) def __repr__(self): return f"{self.__class__.__name__}({super().__repr__()})" def _vectorOp(self, other, op): if isinstance(other, Vector): assert len(self) == len(other) return self.__class__(op(a, b) for a, b in zip(self, other)) if isinstance(other, Number): return self.__class__(op(v, other) for v in self) raise NotImplementedError() def _scalarOp(self, other, op): if isinstance(other, Number): return self.__class__(op(v, other) for v in self) raise NotImplementedError() def _unaryOp(self, op): return self.__class__(op(v) for v in self) def __add__(self, other): return self._vectorOp(other, operator.add) __radd__ = __add__ def __sub__(self, other): return self._vectorOp(other, operator.sub) def __rsub__(self, other): return self._vectorOp(other, _operator_rsub) def __mul__(self, other): return self._scalarOp(other, operator.mul) __rmul__ = __mul__ def __truediv__(self, other): return self._scalarOp(other, operator.truediv) def __rtruediv__(self, other): return self._scalarOp(other, _operator_rtruediv) def __pos__(self): return self._unaryOp(operator.pos) def __neg__(self): return self._unaryOp(operator.neg) def __round__(self, *, round=round): return self._unaryOp(round) def __eq__(self, other): if isinstance(other, list): # bw compat Vector([1, 2, 3]) == [1, 2, 3] other = tuple(other) return super().__eq__(other) def __ne__(self, other): return not self.__eq__(other) def __bool__(self): return any(self) __nonzero__ = __bool__ def __abs__(self): return math.sqrt(sum(x * x for x in self)) def length(self): """Return the length of the vector. Equivalent to abs(vector).""" return abs(self) def normalized(self): """Return the normalized vector of the vector.""" return self / abs(self) def dot(self, other): """Performs vector dot product, returning the sum of ``a[0] * b[0], a[1] * b[1], ...``""" assert len(self) == len(other) return sum(a * b for a, b in zip(self, other)) # Deprecated methods/properties def toInt(self): warnings.warn( "the 'toInt' method has been deprecated, use round(vector) instead", DeprecationWarning, ) return self.__round__() @property def values(self): warnings.warn( "the 'values' attribute has been deprecated, use " "the vector object itself instead", DeprecationWarning, ) return list(self) @values.setter def values(self, values): raise AttributeError( "can't set attribute, the 'values' attribute has been deprecated", ) def isclose(self, other: "Vector", **kwargs) -> bool: """Return True if the vector is close to another Vector.""" assert len(self) == len(other) return all(math.isclose(a, b, **kwargs) for a, b in zip(self, other)) def _operator_rsub(a, b): return operator.sub(b, a) def _operator_rtruediv(a, b): return operator.truediv(b, a) PKaZZZs�x ��fontTools/misc/visitor.py"""Generic visitor pattern implementation for Python objects.""" import enum class Visitor(object): defaultStop = False @classmethod def _register(celf, clazzes_attrs): assert celf != Visitor, "Subclass Visitor instead." if "_visitors" not in celf.__dict__: celf._visitors = {} def wrapper(method): assert method.__name__ == "visit" for clazzes, attrs in clazzes_attrs: if type(clazzes) != tuple: clazzes = (clazzes,) if type(attrs) == str: attrs = (attrs,) for clazz in clazzes: _visitors = celf._visitors.setdefault(clazz, {}) for attr in attrs: assert attr not in _visitors, ( "Oops, class '%s' has visitor function for '%s' defined already." % (clazz.__name__, attr) ) _visitors[attr] = method return None return wrapper @classmethod def register(celf, clazzes): if type(clazzes) != tuple: clazzes = (clazzes,) return celf._register([(clazzes, (None,))]) @classmethod def register_attr(celf, clazzes, attrs): clazzes_attrs = [] if type(clazzes) != tuple: clazzes = (clazzes,) if type(attrs) == str: attrs = (attrs,) for clazz in clazzes: clazzes_attrs.append((clazz, attrs)) return celf._register(clazzes_attrs) @classmethod def register_attrs(celf, clazzes_attrs): return celf._register(clazzes_attrs) @classmethod def _visitorsFor(celf, thing, _default={}): typ = type(thing) for celf in celf.mro(): _visitors = getattr(celf, "_visitors", None) if _visitors is None: break m = celf._visitors.get(typ, None) if m is not None: return m return _default def visitObject(self, obj, *args, **kwargs): """Called to visit an object. This function loops over all non-private attributes of the objects and calls any user-registered (via @register_attr() or @register_attrs()) visit() functions. If there is no user-registered visit function, of if there is and it returns True, or it returns None (or doesn't return anything) and visitor.defaultStop is False (default), then the visitor will proceed to call self.visitAttr()""" keys = sorted(vars(obj).keys()) _visitors = self._visitorsFor(obj) defaultVisitor = _visitors.get("*", None) for key in keys: if key[0] == "_": continue value = getattr(obj, key) visitorFunc = _visitors.get(key, defaultVisitor) if visitorFunc is not None: ret = visitorFunc(self, obj, key, value, *args, **kwargs) if ret == False or (ret is None and self.defaultStop): continue self.visitAttr(obj, key, value, *args, **kwargs) def visitAttr(self, obj, attr, value, *args, **kwargs): """Called to visit an attribute of an object.""" self.visit(value, *args, **kwargs) def visitList(self, obj, *args, **kwargs): """Called to visit any value that is a list.""" for value in obj: self.visit(value, *args, **kwargs) def visitDict(self, obj, *args, **kwargs): """Called to visit any value that is a dictionary.""" for value in obj.values(): self.visit(value, *args, **kwargs) def visitLeaf(self, obj, *args, **kwargs): """Called to visit any value that is not an object, list, or dictionary.""" pass def visit(self, obj, *args, **kwargs): """This is the main entry to the visitor. The visitor will visit object obj. The visitor will first determine if there is a registered (via @register()) visit function for the type of object. If there is, it will be called, and (visitor, obj, *args, **kwargs) will be passed to the user visit function. If there is no user-registered visit function, of if there is and it returns True, or it returns None (or doesn't return anything) and visitor.defaultStop is False (default), then the visitor will proceed to dispatch to one of self.visitObject(), self.visitList(), self.visitDict(), or self.visitLeaf() (any of which can be overriden in a subclass).""" visitorFunc = self._visitorsFor(obj).get(None, None) if visitorFunc is not None: ret = visitorFunc(self, obj, *args, **kwargs) if ret == False or (ret is None and self.defaultStop): return if hasattr(obj, "__dict__") and not isinstance(obj, enum.Enum): self.visitObject(obj, *args, **kwargs) elif isinstance(obj, list): self.visitList(obj, *args, **kwargs) elif isinstance(obj, dict): self.visitDict(obj, *args, **kwargs) else: self.visitLeaf(obj, *args, **kwargs) PKaZZZ+�����fontTools/misc/xmlReader.pyfrom fontTools import ttLib from fontTools.misc.textTools import safeEval from fontTools.ttLib.tables.DefaultTable import DefaultTable import sys import os import logging log = logging.getLogger(__name__) class TTXParseError(Exception): pass BUFSIZE = 0x4000 class XMLReader(object): def __init__( self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False ): if fileOrPath == "-": fileOrPath = sys.stdin if not hasattr(fileOrPath, "read"): self.file = open(fileOrPath, "rb") self._closeStream = True else: # assume readable file object self.file = fileOrPath self._closeStream = False self.ttFont = ttFont self.progress = progress if quiet is not None: from fontTools.misc.loggingTools import deprecateArgument deprecateArgument("quiet", "configure logging instead") self.quiet = quiet self.root = None self.contentStack = [] self.contentOnly = contentOnly self.stackSize = 0 def read(self, rootless=False): if rootless: self.stackSize += 1 if self.progress: self.file.seek(0, 2) fileSize = self.file.tell() self.progress.set(0, fileSize // 100 or 1) self.file.seek(0) self._parseFile(self.file) if self._closeStream: self.close() if rootless: self.stackSize -= 1 def close(self): self.file.close() def _parseFile(self, file): from xml.parsers.expat import ParserCreate parser = ParserCreate() parser.StartElementHandler = self._startElementHandler parser.EndElementHandler = self._endElementHandler parser.CharacterDataHandler = self._characterDataHandler pos = 0 while True: chunk = file.read(BUFSIZE) if not chunk: parser.Parse(chunk, 1) break pos = pos + len(chunk) if self.progress: self.progress.set(pos // 100) parser.Parse(chunk, 0) def _startElementHandler(self, name, attrs): if self.stackSize == 1 and self.contentOnly: # We already know the table we're parsing, skip # parsing the table tag and continue to # stack '2' which begins parsing content self.contentStack.append([]) self.stackSize = 2 return stackSize = self.stackSize self.stackSize = stackSize + 1 subFile = attrs.get("src") if subFile is not None: if hasattr(self.file, "name"): # if file has a name, get its parent directory dirname = os.path.dirname(self.file.name) else: # else fall back to using the current working directory dirname = os.getcwd() subFile = os.path.join(dirname, subFile) if not stackSize: if name != "ttFont": raise TTXParseError("illegal root tag: %s" % name) if self.ttFont.reader is None and not self.ttFont.tables: sfntVersion = attrs.get("sfntVersion") if sfntVersion is not None: if len(sfntVersion) != 4: sfntVersion = safeEval('"' + sfntVersion + '"') self.ttFont.sfntVersion = sfntVersion self.contentStack.append([]) elif stackSize == 1: if subFile is not None: subReader = XMLReader(subFile, self.ttFont, self.progress) subReader.read() self.contentStack.append([]) return tag = ttLib.xmlToTag(name) msg = "Parsing '%s' table..." % tag if self.progress: self.progress.setLabel(msg) log.info(msg) if tag == "GlyphOrder": tableClass = ttLib.GlyphOrder elif "ERROR" in attrs or ("raw" in attrs and safeEval(attrs["raw"])): tableClass = DefaultTable else: tableClass = ttLib.getTableClass(tag) if tableClass is None: tableClass = DefaultTable if tag == "loca" and tag in self.ttFont: # Special-case the 'loca' table as we need the # original if the 'glyf' table isn't recompiled. self.currentTable = self.ttFont[tag] else: self.currentTable = tableClass(tag) self.ttFont[tag] = self.currentTable self.contentStack.append([]) elif stackSize == 2 and subFile is not None: subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True) subReader.read() self.contentStack.append([]) self.root = subReader.root elif stackSize == 2: self.contentStack.append([]) self.root = (name, attrs, self.contentStack[-1]) else: l = [] self.contentStack[-1].append((name, attrs, l)) self.contentStack.append(l) def _characterDataHandler(self, data): if self.stackSize > 1: # parser parses in chunks, so we may get multiple calls # for the same text node; thus we need to append the data # to the last item in the content stack: # https://github.com/fonttools/fonttools/issues/2614 if ( data != "\n" and self.contentStack[-1] and isinstance(self.contentStack[-1][-1], str) and self.contentStack[-1][-1] != "\n" ): self.contentStack[-1][-1] += data else: self.contentStack[-1].append(data) def _endElementHandler(self, name): self.stackSize = self.stackSize - 1 del self.contentStack[-1] if not self.contentOnly: if self.stackSize == 1: self.root = None elif self.stackSize == 2: name, attrs, content = self.root self.currentTable.fromXML(name, attrs, content, self.ttFont) self.root = None class ProgressPrinter(object): def __init__(self, title, maxval=100): print(title) def set(self, val, maxval=None): pass def increment(self, val=1): pass def setLabel(self, text): print(text) PKaZZZ����fontTools/misc/xmlWriter.py"""xmlWriter.py -- Simple XML authoring class""" from fontTools.misc.textTools import byteord, strjoin, tobytes, tostr import sys import os import string INDENT = " " class XMLWriter(object): def __init__( self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8", newlinestr="\n", ): if encoding.lower().replace("-", "").replace("_", "") != "utf8": raise Exception("Only UTF-8 encoding is supported.") if fileOrPath == "-": fileOrPath = sys.stdout if not hasattr(fileOrPath, "write"): self.filename = fileOrPath self.file = open(fileOrPath, "wb") self._closeStream = True else: self.filename = None # assume writable file object self.file = fileOrPath self._closeStream = False # Figure out if writer expects bytes or unicodes try: # The bytes check should be first. See: # https://github.com/fonttools/fonttools/pull/233 self.file.write(b"") self.totype = tobytes except TypeError: # This better not fail. self.file.write("") self.totype = tostr self.indentwhite = self.totype(indentwhite) if newlinestr is None: self.newlinestr = self.totype(os.linesep) else: self.newlinestr = self.totype(newlinestr) self.indentlevel = 0 self.stack = [] self.needindent = 1 self.idlefunc = idlefunc self.idlecounter = 0 self._writeraw('<?xml version="1.0" encoding="UTF-8"?>') self.newline() def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): self.close() def close(self): if self._closeStream: self.file.close() def write(self, string, indent=True): """Writes text.""" self._writeraw(escape(string), indent=indent) def writecdata(self, string): """Writes text in a CDATA section.""" self._writeraw("<![CDATA[" + string + "]]>") def write8bit(self, data, strip=False): """Writes a bytes() sequence into the XML, escaping non-ASCII bytes. When this is read in xmlReader, the original bytes can be recovered by encoding to 'latin-1'.""" self._writeraw(escape8bit(data), strip=strip) def write_noindent(self, string): """Writes text without indentation.""" self._writeraw(escape(string), indent=False) def _writeraw(self, data, indent=True, strip=False): """Writes bytes, possibly indented.""" if indent and self.needindent: self.file.write(self.indentlevel * self.indentwhite) self.needindent = 0 s = self.totype(data, encoding="utf_8") if strip: s = s.strip() self.file.write(s) def newline(self): self.file.write(self.newlinestr) self.needindent = 1 idlecounter = self.idlecounter if not idlecounter % 100 and self.idlefunc is not None: self.idlefunc() self.idlecounter = idlecounter + 1 def comment(self, data): data = escape(data) lines = data.split("\n") self._writeraw("<!-- " + lines[0]) for line in lines[1:]: self.newline() self._writeraw(" " + line) self._writeraw(" -->") def simpletag(self, _TAG_, *args, **kwargs): attrdata = self.stringifyattrs(*args, **kwargs) data = "<%s%s/>" % (_TAG_, attrdata) self._writeraw(data) def begintag(self, _TAG_, *args, **kwargs): attrdata = self.stringifyattrs(*args, **kwargs) data = "<%s%s>" % (_TAG_, attrdata) self._writeraw(data) self.stack.append(_TAG_) self.indent() def endtag(self, _TAG_): assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag" del self.stack[-1] self.dedent() data = "</%s>" % _TAG_ self._writeraw(data) def dumphex(self, data): linelength = 16 hexlinelength = linelength * 2 chunksize = 8 for i in range(0, len(data), linelength): hexline = hexStr(data[i : i + linelength]) line = "" white = "" for j in range(0, hexlinelength, chunksize): line = line + white + hexline[j : j + chunksize] white = " " self._writeraw(line) self.newline() def indent(self): self.indentlevel = self.indentlevel + 1 def dedent(self): assert self.indentlevel > 0 self.indentlevel = self.indentlevel - 1 def stringifyattrs(self, *args, **kwargs): if kwargs: assert not args attributes = sorted(kwargs.items()) elif args: assert len(args) == 1 attributes = args[0] else: return "" data = "" for attr, value in attributes: if not isinstance(value, (bytes, str)): value = str(value) data = data + ' %s="%s"' % (attr, escapeattr(value)) return data def escape(data): data = tostr(data, "utf_8") data = data.replace("&", "&amp;") data = data.replace("<", "&lt;") data = data.replace(">", "&gt;") data = data.replace("\r", "&#13;") return data def escapeattr(data): data = escape(data) data = data.replace('"', "&quot;") return data def escape8bit(data): """Input is Unicode string.""" def escapechar(c): n = ord(c) if 32 <= n <= 127 and c not in "<&>": return c else: return "&#" + repr(n) + ";" return strjoin(map(escapechar, data.decode("latin-1"))) def hexStr(s): h = string.hexdigits r = "" for c in s: i = byteord(c) r = r + h[(i >> 4) & 0xF] + h[i & 0xF] return r PKaZZZ$��yRyR#fontTools/misc/plistlib/__init__.pyimport collections.abc import re from typing import ( Any, Callable, Dict, List, Mapping, MutableMapping, Optional, Sequence, Type, Union, IO, ) import warnings from io import BytesIO from datetime import datetime from base64 import b64encode, b64decode from numbers import Integral from types import SimpleNamespace from functools import singledispatch from fontTools.misc import etree from fontTools.misc.textTools import tostr # By default, we # - deserialize <data> elements as bytes and # - serialize bytes as <data> elements. # Before, on Python 2, we # - deserialized <data> elements as plistlib.Data objects, in order to # distinguish them from the built-in str type (which is bytes on python2) # - serialized bytes as <string> elements (they must have only contained # ASCII characters in this case) # You can pass use_builtin_types=[True|False] to the load/dump etc. functions # to enforce a specific treatment. # NOTE that unicode type always maps to <string> element, and plistlib.Data # always maps to <data> element, regardless of use_builtin_types. USE_BUILTIN_TYPES = True XML_DECLARATION = b"""<?xml version='1.0' encoding='UTF-8'?>""" PLIST_DOCTYPE = ( b'<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" ' b'"http://www.apple.com/DTDs/PropertyList-1.0.dtd">' ) # Date should conform to a subset of ISO 8601: # YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z' _date_parser = re.compile( r"(?P<year>\d\d\d\d)" r"(?:-(?P<month>\d\d)" r"(?:-(?P<day>\d\d)" r"(?:T(?P<hour>\d\d)" r"(?::(?P<minute>\d\d)" r"(?::(?P<second>\d\d))" r"?)?)?)?)?Z", re.ASCII, ) def _date_from_string(s: str) -> datetime: order = ("year", "month", "day", "hour", "minute", "second") m = _date_parser.match(s) if m is None: raise ValueError(f"Expected ISO 8601 date string, but got '{s:r}'.") gd = m.groupdict() lst = [] for key in order: val = gd[key] if val is None: break lst.append(int(val)) # NOTE: mypy doesn't know that lst is 6 elements long. return datetime(*lst) # type:ignore def _date_to_string(d: datetime) -> str: return "%04d-%02d-%02dT%02d:%02d:%02dZ" % ( d.year, d.month, d.day, d.hour, d.minute, d.second, ) class Data: """Represents binary data when ``use_builtin_types=False.`` This class wraps binary data loaded from a plist file when the ``use_builtin_types`` argument to the loading function (:py:func:`fromtree`, :py:func:`load`, :py:func:`loads`) is false. The actual binary data is retrieved using the ``data`` attribute. """ def __init__(self, data: bytes) -> None: if not isinstance(data, bytes): raise TypeError("Expected bytes, found %s" % type(data).__name__) self.data = data @classmethod def fromBase64(cls, data: Union[bytes, str]) -> "Data": return cls(b64decode(data)) def asBase64(self, maxlinelength: int = 76, indent_level: int = 1) -> bytes: return _encode_base64( self.data, maxlinelength=maxlinelength, indent_level=indent_level ) def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): return self.data == other.data elif isinstance(other, bytes): return self.data == other else: return NotImplemented def __repr__(self) -> str: return "%s(%s)" % (self.__class__.__name__, repr(self.data)) def _encode_base64( data: bytes, maxlinelength: Optional[int] = 76, indent_level: int = 1 ) -> bytes: data = b64encode(data) if data and maxlinelength: # split into multiple lines right-justified to 'maxlinelength' chars indent = b"\n" + b" " * indent_level max_length = max(16, maxlinelength - len(indent)) chunks = [] for i in range(0, len(data), max_length): chunks.append(indent) chunks.append(data[i : i + max_length]) chunks.append(indent) data = b"".join(chunks) return data # Mypy does not support recursive type aliases as of 0.782, Pylance does. # https://github.com/python/mypy/issues/731 # https://devblogs.microsoft.com/python/pylance-introduces-five-new-features-that-enable-type-magic-for-python-developers/#1-support-for-recursive-type-aliases PlistEncodable = Union[ bool, bytes, Data, datetime, float, Integral, Mapping[str, Any], Sequence[Any], str, ] class PlistTarget: """Event handler using the ElementTree Target API that can be passed to a XMLParser to produce property list objects from XML. It is based on the CPython plistlib module's _PlistParser class, but does not use the expat parser. >>> from fontTools.misc import etree >>> parser = etree.XMLParser(target=PlistTarget()) >>> result = etree.XML( ... "<dict>" ... " <key>something</key>" ... " <string>blah</string>" ... "</dict>", ... parser=parser) >>> result == {"something": "blah"} True Links: https://github.com/python/cpython/blob/main/Lib/plistlib.py http://lxml.de/parsing.html#the-target-parser-interface """ def __init__( self, use_builtin_types: Optional[bool] = None, dict_type: Type[MutableMapping[str, Any]] = dict, ) -> None: self.stack: List[PlistEncodable] = [] self.current_key: Optional[str] = None self.root: Optional[PlistEncodable] = None if use_builtin_types is None: self._use_builtin_types = USE_BUILTIN_TYPES else: if use_builtin_types is False: warnings.warn( "Setting use_builtin_types to False is deprecated and will be " "removed soon.", DeprecationWarning, ) self._use_builtin_types = use_builtin_types self._dict_type = dict_type def start(self, tag: str, attrib: Mapping[str, str]) -> None: self._data: List[str] = [] handler = _TARGET_START_HANDLERS.get(tag) if handler is not None: handler(self) def end(self, tag: str) -> None: handler = _TARGET_END_HANDLERS.get(tag) if handler is not None: handler(self) def data(self, data: str) -> None: self._data.append(data) def close(self) -> PlistEncodable: if self.root is None: raise ValueError("No root set.") return self.root # helpers def add_object(self, value: PlistEncodable) -> None: if self.current_key is not None: stack_top = self.stack[-1] if not isinstance(stack_top, collections.abc.MutableMapping): raise ValueError("unexpected element: %r" % stack_top) stack_top[self.current_key] = value self.current_key = None elif not self.stack: # this is the root object self.root = value else: stack_top = self.stack[-1] if not isinstance(stack_top, list): raise ValueError("unexpected element: %r" % stack_top) stack_top.append(value) def get_data(self) -> str: data = "".join(self._data) self._data = [] return data # event handlers def start_dict(self: PlistTarget) -> None: d = self._dict_type() self.add_object(d) self.stack.append(d) def end_dict(self: PlistTarget) -> None: if self.current_key: raise ValueError("missing value for key '%s'" % self.current_key) self.stack.pop() def end_key(self: PlistTarget) -> None: if self.current_key or not isinstance(self.stack[-1], collections.abc.Mapping): raise ValueError("unexpected key") self.current_key = self.get_data() def start_array(self: PlistTarget) -> None: a: List[PlistEncodable] = [] self.add_object(a) self.stack.append(a) def end_array(self: PlistTarget) -> None: self.stack.pop() def end_true(self: PlistTarget) -> None: self.add_object(True) def end_false(self: PlistTarget) -> None: self.add_object(False) def end_integer(self: PlistTarget) -> None: self.add_object(int(self.get_data())) def end_real(self: PlistTarget) -> None: self.add_object(float(self.get_data())) def end_string(self: PlistTarget) -> None: self.add_object(self.get_data()) def end_data(self: PlistTarget) -> None: if self._use_builtin_types: self.add_object(b64decode(self.get_data())) else: self.add_object(Data.fromBase64(self.get_data())) def end_date(self: PlistTarget) -> None: self.add_object(_date_from_string(self.get_data())) _TARGET_START_HANDLERS: Dict[str, Callable[[PlistTarget], None]] = { "dict": start_dict, "array": start_array, } _TARGET_END_HANDLERS: Dict[str, Callable[[PlistTarget], None]] = { "dict": end_dict, "array": end_array, "key": end_key, "true": end_true, "false": end_false, "integer": end_integer, "real": end_real, "string": end_string, "data": end_data, "date": end_date, } # functions to build element tree from plist data def _string_element(value: str, ctx: SimpleNamespace) -> etree.Element: el = etree.Element("string") el.text = value return el def _bool_element(value: bool, ctx: SimpleNamespace) -> etree.Element: if value: return etree.Element("true") return etree.Element("false") def _integer_element(value: int, ctx: SimpleNamespace) -> etree.Element: if -1 << 63 <= value < 1 << 64: el = etree.Element("integer") el.text = "%d" % value return el raise OverflowError(value) def _real_element(value: float, ctx: SimpleNamespace) -> etree.Element: el = etree.Element("real") el.text = repr(value) return el def _dict_element( d: Mapping[str, PlistEncodable], ctx: SimpleNamespace ) -> etree.Element: el = etree.Element("dict") items = d.items() if ctx.sort_keys: items = sorted(items) # type: ignore ctx.indent_level += 1 for key, value in items: if not isinstance(key, str): if ctx.skipkeys: continue raise TypeError("keys must be strings") k = etree.SubElement(el, "key") k.text = tostr(key, "utf-8") el.append(_make_element(value, ctx)) ctx.indent_level -= 1 return el def _array_element( array: Sequence[PlistEncodable], ctx: SimpleNamespace ) -> etree.Element: el = etree.Element("array") if len(array) == 0: return el ctx.indent_level += 1 for value in array: el.append(_make_element(value, ctx)) ctx.indent_level -= 1 return el def _date_element(date: datetime, ctx: SimpleNamespace) -> etree.Element: el = etree.Element("date") el.text = _date_to_string(date) return el def _data_element(data: bytes, ctx: SimpleNamespace) -> etree.Element: el = etree.Element("data") # NOTE: mypy is confused about whether el.text should be str or bytes. el.text = _encode_base64( # type: ignore data, maxlinelength=(76 if ctx.pretty_print else None), indent_level=ctx.indent_level, ) return el def _string_or_data_element(raw_bytes: bytes, ctx: SimpleNamespace) -> etree.Element: if ctx.use_builtin_types: return _data_element(raw_bytes, ctx) else: try: string = raw_bytes.decode(encoding="ascii", errors="strict") except UnicodeDecodeError: raise ValueError( "invalid non-ASCII bytes; use unicode string instead: %r" % raw_bytes ) return _string_element(string, ctx) # The following is probably not entirely correct. The signature should take `Any` # and return `NoReturn`. At the time of this writing, neither mypy nor Pyright # can deal with singledispatch properly and will apply the signature of the base # function to all others. Being slightly dishonest makes it type-check and return # usable typing information for the optimistic case. @singledispatch def _make_element(value: PlistEncodable, ctx: SimpleNamespace) -> etree.Element: raise TypeError("unsupported type: %s" % type(value)) _make_element.register(str)(_string_element) _make_element.register(bool)(_bool_element) _make_element.register(Integral)(_integer_element) _make_element.register(float)(_real_element) _make_element.register(collections.abc.Mapping)(_dict_element) _make_element.register(list)(_array_element) _make_element.register(tuple)(_array_element) _make_element.register(datetime)(_date_element) _make_element.register(bytes)(_string_or_data_element) _make_element.register(bytearray)(_data_element) _make_element.register(Data)(lambda v, ctx: _data_element(v.data, ctx)) # Public functions to create element tree from plist-compatible python # data structures and viceversa, for use when (de)serializing GLIF xml. def totree( value: PlistEncodable, sort_keys: bool = True, skipkeys: bool = False, use_builtin_types: Optional[bool] = None, pretty_print: bool = True, indent_level: int = 1, ) -> etree.Element: """Convert a value derived from a plist into an XML tree. Args: value: Any kind of value to be serialized to XML. sort_keys: Whether keys of dictionaries should be sorted. skipkeys (bool): Whether to silently skip non-string dictionary keys. use_builtin_types (bool): If true, byte strings will be encoded in Base-64 and wrapped in a ``data`` tag; if false, they will be either stored as ASCII strings or an exception raised if they cannot be decoded as such. Defaults to ``True`` if not present. Deprecated. pretty_print (bool): Whether to indent the output. indent_level (int): Level of indentation when serializing. Returns: an ``etree`` ``Element`` object. Raises: ``TypeError`` if non-string dictionary keys are serialized and ``skipkeys`` is false. ``ValueError`` if non-ASCII binary data is present and `use_builtin_types` is false. """ if use_builtin_types is None: use_builtin_types = USE_BUILTIN_TYPES else: use_builtin_types = use_builtin_types context = SimpleNamespace( sort_keys=sort_keys, skipkeys=skipkeys, use_builtin_types=use_builtin_types, pretty_print=pretty_print, indent_level=indent_level, ) return _make_element(value, context) def fromtree( tree: etree.Element, use_builtin_types: Optional[bool] = None, dict_type: Type[MutableMapping[str, Any]] = dict, ) -> Any: """Convert an XML tree to a plist structure. Args: tree: An ``etree`` ``Element``. use_builtin_types: If True, binary data is deserialized to bytes strings. If False, it is wrapped in :py:class:`Data` objects. Defaults to True if not provided. Deprecated. dict_type: What type to use for dictionaries. Returns: An object (usually a dictionary). """ target = PlistTarget(use_builtin_types=use_builtin_types, dict_type=dict_type) for action, element in etree.iterwalk(tree, events=("start", "end")): if action == "start": target.start(element.tag, element.attrib) elif action == "end": # if there are no children, parse the leaf's data if not len(element): # always pass str, not None target.data(element.text or "") target.end(element.tag) return target.close() # python3 plistlib API def load( fp: IO[bytes], use_builtin_types: Optional[bool] = None, dict_type: Type[MutableMapping[str, Any]] = dict, ) -> Any: """Load a plist file into an object. Args: fp: An opened file. use_builtin_types: If True, binary data is deserialized to bytes strings. If False, it is wrapped in :py:class:`Data` objects. Defaults to True if not provided. Deprecated. dict_type: What type to use for dictionaries. Returns: An object (usually a dictionary) representing the top level of the plist file. """ if not hasattr(fp, "read"): raise AttributeError("'%s' object has no attribute 'read'" % type(fp).__name__) target = PlistTarget(use_builtin_types=use_builtin_types, dict_type=dict_type) parser = etree.XMLParser(target=target) result = etree.parse(fp, parser=parser) # lxml returns the target object directly, while ElementTree wraps # it as the root of an ElementTree object try: return result.getroot() except AttributeError: return result def loads( value: bytes, use_builtin_types: Optional[bool] = None, dict_type: Type[MutableMapping[str, Any]] = dict, ) -> Any: """Load a plist file from a string into an object. Args: value: A bytes string containing a plist. use_builtin_types: If True, binary data is deserialized to bytes strings. If False, it is wrapped in :py:class:`Data` objects. Defaults to True if not provided. Deprecated. dict_type: What type to use for dictionaries. Returns: An object (usually a dictionary) representing the top level of the plist file. """ fp = BytesIO(value) return load(fp, use_builtin_types=use_builtin_types, dict_type=dict_type) def dump( value: PlistEncodable, fp: IO[bytes], sort_keys: bool = True, skipkeys: bool = False, use_builtin_types: Optional[bool] = None, pretty_print: bool = True, ) -> None: """Write a Python object to a plist file. Args: value: An object to write. fp: A file opened for writing. sort_keys (bool): Whether keys of dictionaries should be sorted. skipkeys (bool): Whether to silently skip non-string dictionary keys. use_builtin_types (bool): If true, byte strings will be encoded in Base-64 and wrapped in a ``data`` tag; if false, they will be either stored as ASCII strings or an exception raised if they cannot be represented. Defaults pretty_print (bool): Whether to indent the output. indent_level (int): Level of indentation when serializing. Raises: ``TypeError`` if non-string dictionary keys are serialized and ``skipkeys`` is false. ``ValueError`` if non-representable binary data is present and `use_builtin_types` is false. """ if not hasattr(fp, "write"): raise AttributeError("'%s' object has no attribute 'write'" % type(fp).__name__) root = etree.Element("plist", version="1.0") el = totree( value, sort_keys=sort_keys, skipkeys=skipkeys, use_builtin_types=use_builtin_types, pretty_print=pretty_print, ) root.append(el) tree = etree.ElementTree(root) # we write the doctype ourselves instead of using the 'doctype' argument # of 'write' method, becuse lxml will force adding a '\n' even when # pretty_print is False. if pretty_print: header = b"\n".join((XML_DECLARATION, PLIST_DOCTYPE, b"")) else: header = XML_DECLARATION + PLIST_DOCTYPE fp.write(header) tree.write( # type: ignore fp, encoding="utf-8", pretty_print=pretty_print, xml_declaration=False, ) def dumps( value: PlistEncodable, sort_keys: bool = True, skipkeys: bool = False, use_builtin_types: Optional[bool] = None, pretty_print: bool = True, ) -> bytes: """Write a Python object to a string in plist format. Args: value: An object to write. sort_keys (bool): Whether keys of dictionaries should be sorted. skipkeys (bool): Whether to silently skip non-string dictionary keys. use_builtin_types (bool): If true, byte strings will be encoded in Base-64 and wrapped in a ``data`` tag; if false, they will be either stored as strings or an exception raised if they cannot be represented. Defaults pretty_print (bool): Whether to indent the output. indent_level (int): Level of indentation when serializing. Returns: string: A plist representation of the Python object. Raises: ``TypeError`` if non-string dictionary keys are serialized and ``skipkeys`` is false. ``ValueError`` if non-representable binary data is present and `use_builtin_types` is false. """ fp = BytesIO() dump( value, fp, sort_keys=sort_keys, skipkeys=skipkeys, use_builtin_types=use_builtin_types, pretty_print=pretty_print, ) return fp.getvalue() PKaZZZ fontTools/misc/plistlib/py.typedPKaZZZs!|���fontTools/mtiLib/__init__.py#!/usr/bin/python # FontDame-to-FontTools for OpenType Layout tables # # Source language spec is available at: # http://monotype.github.io/OpenType_Table_Source/otl_source.html # https://github.com/Monotype/OpenType_Table_Source/ from fontTools import ttLib from fontTools.ttLib.tables._c_m_a_p import cmap_classes from fontTools.ttLib.tables import otTables as ot from fontTools.ttLib.tables.otBase import ValueRecord, valueRecordFormatDict from fontTools.otlLib import builder as otl from contextlib import contextmanager from fontTools.ttLib import newTable from fontTools.feaLib.lookupDebugInfo import LOOKUP_DEBUG_ENV_VAR, LOOKUP_DEBUG_INFO_KEY from operator import setitem import os import logging class MtiLibError(Exception): pass class ReferenceNotFoundError(MtiLibError): pass class FeatureNotFoundError(ReferenceNotFoundError): pass class LookupNotFoundError(ReferenceNotFoundError): pass log = logging.getLogger("fontTools.mtiLib") def makeGlyph(s): if s[:2] in ["U ", "u "]: return ttLib.TTFont._makeGlyphName(int(s[2:], 16)) elif s[:2] == "# ": return "glyph%.5d" % int(s[2:]) assert s.find(" ") < 0, "Space found in glyph name: %s" % s assert s, "Glyph name is empty" return s def makeGlyphs(l): return [makeGlyph(g) for g in l] def mapLookup(sym, mapping): # Lookups are addressed by name. So resolved them using a map if available. # Fallback to parsing as lookup index if a map isn't provided. if mapping is not None: try: idx = mapping[sym] except KeyError: raise LookupNotFoundError(sym) else: idx = int(sym) return idx def mapFeature(sym, mapping): # Features are referenced by index according the spec. So, if symbol is an # integer, use it directly. Otherwise look up in the map if provided. try: idx = int(sym) except ValueError: try: idx = mapping[sym] except KeyError: raise FeatureNotFoundError(sym) return idx def setReference(mapper, mapping, sym, setter, collection, key): try: mapped = mapper(sym, mapping) except ReferenceNotFoundError as e: try: if mapping is not None: mapping.addDeferredMapping( lambda ref: setter(collection, key, ref), sym, e ) return except AttributeError: pass raise setter(collection, key, mapped) class DeferredMapping(dict): def __init__(self): self._deferredMappings = [] def addDeferredMapping(self, setter, sym, e): log.debug("Adding deferred mapping for symbol '%s' %s", sym, type(e).__name__) self._deferredMappings.append((setter, sym, e)) def applyDeferredMappings(self): for setter, sym, e in self._deferredMappings: log.debug( "Applying deferred mapping for symbol '%s' %s", sym, type(e).__name__ ) try: mapped = self[sym] except KeyError: raise e setter(mapped) log.debug("Set to %s", mapped) self._deferredMappings = [] def parseScriptList(lines, featureMap=None): self = ot.ScriptList() records = [] with lines.between("script table"): for line in lines: while len(line) < 4: line.append("") scriptTag, langSysTag, defaultFeature, features = line log.debug("Adding script %s language-system %s", scriptTag, langSysTag) langSys = ot.LangSys() langSys.LookupOrder = None if defaultFeature: setReference( mapFeature, featureMap, defaultFeature, setattr, langSys, "ReqFeatureIndex", ) else: langSys.ReqFeatureIndex = 0xFFFF syms = stripSplitComma(features) langSys.FeatureIndex = theList = [3] * len(syms) for i, sym in enumerate(syms): setReference(mapFeature, featureMap, sym, setitem, theList, i) langSys.FeatureCount = len(langSys.FeatureIndex) script = [s for s in records if s.ScriptTag == scriptTag] if script: script = script[0].Script else: scriptRec = ot.ScriptRecord() scriptRec.ScriptTag = scriptTag + " " * (4 - len(scriptTag)) scriptRec.Script = ot.Script() records.append(scriptRec) script = scriptRec.Script script.DefaultLangSys = None script.LangSysRecord = [] script.LangSysCount = 0 if langSysTag == "default": script.DefaultLangSys = langSys else: langSysRec = ot.LangSysRecord() langSysRec.LangSysTag = langSysTag + " " * (4 - len(langSysTag)) langSysRec.LangSys = langSys script.LangSysRecord.append(langSysRec) script.LangSysCount = len(script.LangSysRecord) for script in records: script.Script.LangSysRecord = sorted( script.Script.LangSysRecord, key=lambda rec: rec.LangSysTag ) self.ScriptRecord = sorted(records, key=lambda rec: rec.ScriptTag) self.ScriptCount = len(self.ScriptRecord) return self def parseFeatureList(lines, lookupMap=None, featureMap=None): self = ot.FeatureList() self.FeatureRecord = [] with lines.between("feature table"): for line in lines: name, featureTag, lookups = line if featureMap is not None: assert name not in featureMap, "Duplicate feature name: %s" % name featureMap[name] = len(self.FeatureRecord) # If feature name is integer, make sure it matches its index. try: assert int(name) == len(self.FeatureRecord), "%d %d" % ( name, len(self.FeatureRecord), ) except ValueError: pass featureRec = ot.FeatureRecord() featureRec.FeatureTag = featureTag featureRec.Feature = ot.Feature() self.FeatureRecord.append(featureRec) feature = featureRec.Feature feature.FeatureParams = None syms = stripSplitComma(lookups) feature.LookupListIndex = theList = [None] * len(syms) for i, sym in enumerate(syms): setReference(mapLookup, lookupMap, sym, setitem, theList, i) feature.LookupCount = len(feature.LookupListIndex) self.FeatureCount = len(self.FeatureRecord) return self def parseLookupFlags(lines): flags = 0 filterset = None allFlags = [ "righttoleft", "ignorebaseglyphs", "ignoreligatures", "ignoremarks", "markattachmenttype", "markfiltertype", ] while lines.peeks()[0].lower() in allFlags: line = next(lines) flag = { "righttoleft": 0x0001, "ignorebaseglyphs": 0x0002, "ignoreligatures": 0x0004, "ignoremarks": 0x0008, }.get(line[0].lower()) if flag: assert line[1].lower() in ["yes", "no"], line[1] if line[1].lower() == "yes": flags |= flag continue if line[0].lower() == "markattachmenttype": flags |= int(line[1]) << 8 continue if line[0].lower() == "markfiltertype": flags |= 0x10 filterset = int(line[1]) return flags, filterset def parseSingleSubst(lines, font, _lookupMap=None): mapping = {} for line in lines: assert len(line) == 2, line line = makeGlyphs(line) mapping[line[0]] = line[1] return otl.buildSingleSubstSubtable(mapping) def parseMultiple(lines, font, _lookupMap=None): mapping = {} for line in lines: line = makeGlyphs(line) mapping[line[0]] = line[1:] return otl.buildMultipleSubstSubtable(mapping) def parseAlternate(lines, font, _lookupMap=None): mapping = {} for line in lines: line = makeGlyphs(line) mapping[line[0]] = line[1:] return otl.buildAlternateSubstSubtable(mapping) def parseLigature(lines, font, _lookupMap=None): mapping = {} for line in lines: assert len(line) >= 2, line line = makeGlyphs(line) mapping[tuple(line[1:])] = line[0] return otl.buildLigatureSubstSubtable(mapping) def parseSinglePos(lines, font, _lookupMap=None): values = {} for line in lines: assert len(line) == 3, line w = line[0].title().replace(" ", "") assert w in valueRecordFormatDict g = makeGlyph(line[1]) v = int(line[2]) if g not in values: values[g] = ValueRecord() assert not hasattr(values[g], w), (g, w) setattr(values[g], w, v) return otl.buildSinglePosSubtable(values, font.getReverseGlyphMap()) def parsePair(lines, font, _lookupMap=None): self = ot.PairPos() self.ValueFormat1 = self.ValueFormat2 = 0 typ = lines.peeks()[0].split()[0].lower() if typ in ("left", "right"): self.Format = 1 values = {} for line in lines: assert len(line) == 4, line side = line[0].split()[0].lower() assert side in ("left", "right"), side what = line[0][len(side) :].title().replace(" ", "") mask = valueRecordFormatDict[what][0] glyph1, glyph2 = makeGlyphs(line[1:3]) value = int(line[3]) if not glyph1 in values: values[glyph1] = {} if not glyph2 in values[glyph1]: values[glyph1][glyph2] = (ValueRecord(), ValueRecord()) rec2 = values[glyph1][glyph2] if side == "left": self.ValueFormat1 |= mask vr = rec2[0] else: self.ValueFormat2 |= mask vr = rec2[1] assert not hasattr(vr, what), (vr, what) setattr(vr, what, value) self.Coverage = makeCoverage(set(values.keys()), font) self.PairSet = [] for glyph1 in self.Coverage.glyphs: values1 = values[glyph1] pairset = ot.PairSet() records = pairset.PairValueRecord = [] for glyph2 in sorted(values1.keys(), key=font.getGlyphID): values2 = values1[glyph2] pair = ot.PairValueRecord() pair.SecondGlyph = glyph2 pair.Value1 = values2[0] pair.Value2 = values2[1] if self.ValueFormat2 else None records.append(pair) pairset.PairValueCount = len(pairset.PairValueRecord) self.PairSet.append(pairset) self.PairSetCount = len(self.PairSet) elif typ.endswith("class"): self.Format = 2 classDefs = [None, None] while lines.peeks()[0].endswith("class definition begin"): typ = lines.peek()[0][: -len("class definition begin")].lower() idx, klass = { "first": (0, ot.ClassDef1), "second": (1, ot.ClassDef2), }[typ] assert classDefs[idx] is None classDefs[idx] = parseClassDef(lines, font, klass=klass) self.ClassDef1, self.ClassDef2 = classDefs self.Class1Count, self.Class2Count = ( 1 + max(c.classDefs.values()) for c in classDefs ) self.Class1Record = [ot.Class1Record() for i in range(self.Class1Count)] for rec1 in self.Class1Record: rec1.Class2Record = [ot.Class2Record() for j in range(self.Class2Count)] for rec2 in rec1.Class2Record: rec2.Value1 = ValueRecord() rec2.Value2 = ValueRecord() for line in lines: assert len(line) == 4, line side = line[0].split()[0].lower() assert side in ("left", "right"), side what = line[0][len(side) :].title().replace(" ", "") mask = valueRecordFormatDict[what][0] class1, class2, value = (int(x) for x in line[1:4]) rec2 = self.Class1Record[class1].Class2Record[class2] if side == "left": self.ValueFormat1 |= mask vr = rec2.Value1 else: self.ValueFormat2 |= mask vr = rec2.Value2 assert not hasattr(vr, what), (vr, what) setattr(vr, what, value) for rec1 in self.Class1Record: for rec2 in rec1.Class2Record: rec2.Value1 = ValueRecord(self.ValueFormat1, rec2.Value1) rec2.Value2 = ( ValueRecord(self.ValueFormat2, rec2.Value2) if self.ValueFormat2 else None ) self.Coverage = makeCoverage(set(self.ClassDef1.classDefs.keys()), font) else: assert 0, typ return self def parseKernset(lines, font, _lookupMap=None): typ = lines.peeks()[0].split()[0].lower() if typ in ("left", "right"): with lines.until( ("firstclass definition begin", "secondclass definition begin") ): return parsePair(lines, font) return parsePair(lines, font) def makeAnchor(data, klass=ot.Anchor): assert len(data) <= 2 anchor = klass() anchor.Format = 1 anchor.XCoordinate, anchor.YCoordinate = intSplitComma(data[0]) if len(data) > 1 and data[1] != "": anchor.Format = 2 anchor.AnchorPoint = int(data[1]) return anchor def parseCursive(lines, font, _lookupMap=None): records = {} for line in lines: assert len(line) in [3, 4], line idx, klass = { "entry": (0, ot.EntryAnchor), "exit": (1, ot.ExitAnchor), }[line[0]] glyph = makeGlyph(line[1]) if glyph not in records: records[glyph] = [None, None] assert records[glyph][idx] is None, (glyph, idx) records[glyph][idx] = makeAnchor(line[2:], klass) return otl.buildCursivePosSubtable(records, font.getReverseGlyphMap()) def makeMarkRecords(data, coverage, c): records = [] for glyph in coverage.glyphs: klass, anchor = data[glyph] record = c.MarkRecordClass() record.Class = klass setattr(record, c.MarkAnchor, anchor) records.append(record) return records def makeBaseRecords(data, coverage, c, classCount): records = [] idx = {} for glyph in coverage.glyphs: idx[glyph] = len(records) record = c.BaseRecordClass() anchors = [None] * classCount setattr(record, c.BaseAnchor, anchors) records.append(record) for (glyph, klass), anchor in data.items(): record = records[idx[glyph]] anchors = getattr(record, c.BaseAnchor) assert anchors[klass] is None, (glyph, klass) anchors[klass] = anchor return records def makeLigatureRecords(data, coverage, c, classCount): records = [None] * len(coverage.glyphs) idx = {g: i for i, g in enumerate(coverage.glyphs)} for (glyph, klass, compIdx, compCount), anchor in data.items(): record = records[idx[glyph]] if record is None: record = records[idx[glyph]] = ot.LigatureAttach() record.ComponentCount = compCount record.ComponentRecord = [ot.ComponentRecord() for i in range(compCount)] for compRec in record.ComponentRecord: compRec.LigatureAnchor = [None] * classCount assert record.ComponentCount == compCount, ( glyph, record.ComponentCount, compCount, ) anchors = record.ComponentRecord[compIdx - 1].LigatureAnchor assert anchors[klass] is None, (glyph, compIdx, klass) anchors[klass] = anchor return records def parseMarkToSomething(lines, font, c): self = c.Type() self.Format = 1 markData = {} baseData = {} Data = { "mark": (markData, c.MarkAnchorClass), "base": (baseData, c.BaseAnchorClass), "ligature": (baseData, c.BaseAnchorClass), } maxKlass = 0 for line in lines: typ = line[0] assert typ in ("mark", "base", "ligature") glyph = makeGlyph(line[1]) data, anchorClass = Data[typ] extraItems = 2 if typ == "ligature" else 0 extras = tuple(int(i) for i in line[2 : 2 + extraItems]) klass = int(line[2 + extraItems]) anchor = makeAnchor(line[3 + extraItems :], anchorClass) if typ == "mark": key, value = glyph, (klass, anchor) else: key, value = ((glyph, klass) + extras), anchor assert key not in data, key data[key] = value maxKlass = max(maxKlass, klass) # Mark markCoverage = makeCoverage(set(markData.keys()), font, c.MarkCoverageClass) markArray = c.MarkArrayClass() markRecords = makeMarkRecords(markData, markCoverage, c) setattr(markArray, c.MarkRecord, markRecords) setattr(markArray, c.MarkCount, len(markRecords)) setattr(self, c.MarkCoverage, markCoverage) setattr(self, c.MarkArray, markArray) self.ClassCount = maxKlass + 1 # Base self.classCount = 0 if not baseData else 1 + max(k[1] for k, v in baseData.items()) baseCoverage = makeCoverage( set([k[0] for k in baseData.keys()]), font, c.BaseCoverageClass ) baseArray = c.BaseArrayClass() if c.Base == "Ligature": baseRecords = makeLigatureRecords(baseData, baseCoverage, c, self.classCount) else: baseRecords = makeBaseRecords(baseData, baseCoverage, c, self.classCount) setattr(baseArray, c.BaseRecord, baseRecords) setattr(baseArray, c.BaseCount, len(baseRecords)) setattr(self, c.BaseCoverage, baseCoverage) setattr(self, c.BaseArray, baseArray) return self class MarkHelper(object): def __init__(self): for Which in ("Mark", "Base"): for What in ("Coverage", "Array", "Count", "Record", "Anchor"): key = Which + What if Which == "Mark" and What in ("Count", "Record", "Anchor"): value = key else: value = getattr(self, Which) + What if value == "LigatureRecord": value = "LigatureAttach" setattr(self, key, value) if What != "Count": klass = getattr(ot, value) setattr(self, key + "Class", klass) class MarkToBaseHelper(MarkHelper): Mark = "Mark" Base = "Base" Type = ot.MarkBasePos class MarkToMarkHelper(MarkHelper): Mark = "Mark1" Base = "Mark2" Type = ot.MarkMarkPos class MarkToLigatureHelper(MarkHelper): Mark = "Mark" Base = "Ligature" Type = ot.MarkLigPos def parseMarkToBase(lines, font, _lookupMap=None): return parseMarkToSomething(lines, font, MarkToBaseHelper()) def parseMarkToMark(lines, font, _lookupMap=None): return parseMarkToSomething(lines, font, MarkToMarkHelper()) def parseMarkToLigature(lines, font, _lookupMap=None): return parseMarkToSomething(lines, font, MarkToLigatureHelper()) def stripSplitComma(line): return [s.strip() for s in line.split(",")] if line else [] def intSplitComma(line): return [int(i) for i in line.split(",")] if line else [] # Copied from fontTools.subset class ContextHelper(object): def __init__(self, klassName, Format): if klassName.endswith("Subst"): Typ = "Sub" Type = "Subst" else: Typ = "Pos" Type = "Pos" if klassName.startswith("Chain"): Chain = "Chain" InputIdx = 1 DataLen = 3 else: Chain = "" InputIdx = 0 DataLen = 1 ChainTyp = Chain + Typ self.Typ = Typ self.Type = Type self.Chain = Chain self.ChainTyp = ChainTyp self.InputIdx = InputIdx self.DataLen = DataLen self.LookupRecord = Type + "LookupRecord" if Format == 1: Coverage = lambda r: r.Coverage ChainCoverage = lambda r: r.Coverage ContextData = lambda r: (None,) ChainContextData = lambda r: (None, None, None) SetContextData = None SetChainContextData = None RuleData = lambda r: (r.Input,) ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead) def SetRuleData(r, d): (r.Input,) = d (r.GlyphCount,) = (len(x) + 1 for x in d) def ChainSetRuleData(r, d): (r.Backtrack, r.Input, r.LookAhead) = d ( r.BacktrackGlyphCount, r.InputGlyphCount, r.LookAheadGlyphCount, ) = (len(d[0]), len(d[1]) + 1, len(d[2])) elif Format == 2: Coverage = lambda r: r.Coverage ChainCoverage = lambda r: r.Coverage ContextData = lambda r: (r.ClassDef,) ChainContextData = lambda r: ( r.BacktrackClassDef, r.InputClassDef, r.LookAheadClassDef, ) def SetContextData(r, d): (r.ClassDef,) = d def SetChainContextData(r, d): (r.BacktrackClassDef, r.InputClassDef, r.LookAheadClassDef) = d RuleData = lambda r: (r.Class,) ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead) def SetRuleData(r, d): (r.Class,) = d (r.GlyphCount,) = (len(x) + 1 for x in d) def ChainSetRuleData(r, d): (r.Backtrack, r.Input, r.LookAhead) = d ( r.BacktrackGlyphCount, r.InputGlyphCount, r.LookAheadGlyphCount, ) = (len(d[0]), len(d[1]) + 1, len(d[2])) elif Format == 3: Coverage = lambda r: r.Coverage[0] ChainCoverage = lambda r: r.InputCoverage[0] ContextData = None ChainContextData = None SetContextData = None SetChainContextData = None RuleData = lambda r: r.Coverage ChainRuleData = lambda r: ( r.BacktrackCoverage + r.InputCoverage + r.LookAheadCoverage ) def SetRuleData(r, d): (r.Coverage,) = d (r.GlyphCount,) = (len(x) for x in d) def ChainSetRuleData(r, d): (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d ( r.BacktrackGlyphCount, r.InputGlyphCount, r.LookAheadGlyphCount, ) = (len(x) for x in d) else: assert 0, "unknown format: %s" % Format if Chain: self.Coverage = ChainCoverage self.ContextData = ChainContextData self.SetContextData = SetChainContextData self.RuleData = ChainRuleData self.SetRuleData = ChainSetRuleData else: self.Coverage = Coverage self.ContextData = ContextData self.SetContextData = SetContextData self.RuleData = RuleData self.SetRuleData = SetRuleData if Format == 1: self.Rule = ChainTyp + "Rule" self.RuleCount = ChainTyp + "RuleCount" self.RuleSet = ChainTyp + "RuleSet" self.RuleSetCount = ChainTyp + "RuleSetCount" self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] elif Format == 2: self.Rule = ChainTyp + "ClassRule" self.RuleCount = ChainTyp + "ClassRuleCount" self.RuleSet = ChainTyp + "ClassSet" self.RuleSetCount = ChainTyp + "ClassSetCount" self.Intersect = lambda glyphs, c, r: ( c.intersect_class(glyphs, r) if c else (set(glyphs) if r == 0 else set()) ) self.ClassDef = "InputClassDef" if Chain else "ClassDef" self.ClassDefIndex = 1 if Chain else 0 self.Input = "Input" if Chain else "Class" def parseLookupRecords(items, klassName, lookupMap=None): klass = getattr(ot, klassName) lst = [] for item in items: rec = klass() item = stripSplitComma(item) assert len(item) == 2, item idx = int(item[0]) assert idx > 0, idx rec.SequenceIndex = idx - 1 setReference(mapLookup, lookupMap, item[1], setattr, rec, "LookupListIndex") lst.append(rec) return lst def makeClassDef(classDefs, font, klass=ot.Coverage): if not classDefs: return None self = klass() self.classDefs = dict(classDefs) return self def parseClassDef(lines, font, klass=ot.ClassDef): classDefs = {} with lines.between("class definition"): for line in lines: glyph = makeGlyph(line[0]) assert glyph not in classDefs, glyph classDefs[glyph] = int(line[1]) return makeClassDef(classDefs, font, klass) def makeCoverage(glyphs, font, klass=ot.Coverage): if not glyphs: return None if isinstance(glyphs, set): glyphs = sorted(glyphs) coverage = klass() coverage.glyphs = sorted(set(glyphs), key=font.getGlyphID) return coverage def parseCoverage(lines, font, klass=ot.Coverage): glyphs = [] with lines.between("coverage definition"): for line in lines: glyphs.append(makeGlyph(line[0])) return makeCoverage(glyphs, font, klass) def bucketizeRules(self, c, rules, bucketKeys): buckets = {} for seq, recs in rules: buckets.setdefault(seq[c.InputIdx][0], []).append( (tuple(s[1 if i == c.InputIdx else 0 :] for i, s in enumerate(seq)), recs) ) rulesets = [] for firstGlyph in bucketKeys: if firstGlyph not in buckets: rulesets.append(None) continue thisRules = [] for seq, recs in buckets[firstGlyph]: rule = getattr(ot, c.Rule)() c.SetRuleData(rule, seq) setattr(rule, c.Type + "Count", len(recs)) setattr(rule, c.LookupRecord, recs) thisRules.append(rule) ruleset = getattr(ot, c.RuleSet)() setattr(ruleset, c.Rule, thisRules) setattr(ruleset, c.RuleCount, len(thisRules)) rulesets.append(ruleset) setattr(self, c.RuleSet, rulesets) setattr(self, c.RuleSetCount, len(rulesets)) def parseContext(lines, font, Type, lookupMap=None): self = getattr(ot, Type)() typ = lines.peeks()[0].split()[0].lower() if typ == "glyph": self.Format = 1 log.debug("Parsing %s format %s", Type, self.Format) c = ContextHelper(Type, self.Format) rules = [] for line in lines: assert line[0].lower() == "glyph", line[0] while len(line) < 1 + c.DataLen: line.append("") seq = tuple(makeGlyphs(stripSplitComma(i)) for i in line[1 : 1 + c.DataLen]) recs = parseLookupRecords(line[1 + c.DataLen :], c.LookupRecord, lookupMap) rules.append((seq, recs)) firstGlyphs = set(seq[c.InputIdx][0] for seq, recs in rules) self.Coverage = makeCoverage(firstGlyphs, font) bucketizeRules(self, c, rules, self.Coverage.glyphs) elif typ.endswith("class"): self.Format = 2 log.debug("Parsing %s format %s", Type, self.Format) c = ContextHelper(Type, self.Format) classDefs = [None] * c.DataLen while lines.peeks()[0].endswith("class definition begin"): typ = lines.peek()[0][: -len("class definition begin")].lower() idx, klass = { 1: { "": (0, ot.ClassDef), }, 3: { "backtrack": (0, ot.BacktrackClassDef), "": (1, ot.InputClassDef), "lookahead": (2, ot.LookAheadClassDef), }, }[c.DataLen][typ] assert classDefs[idx] is None, idx classDefs[idx] = parseClassDef(lines, font, klass=klass) c.SetContextData(self, classDefs) rules = [] for line in lines: assert line[0].lower().startswith("class"), line[0] while len(line) < 1 + c.DataLen: line.append("") seq = tuple(intSplitComma(i) for i in line[1 : 1 + c.DataLen]) recs = parseLookupRecords(line[1 + c.DataLen :], c.LookupRecord, lookupMap) rules.append((seq, recs)) firstClasses = set(seq[c.InputIdx][0] for seq, recs in rules) firstGlyphs = set( g for g, c in classDefs[c.InputIdx].classDefs.items() if c in firstClasses ) self.Coverage = makeCoverage(firstGlyphs, font) bucketizeRules(self, c, rules, range(max(firstClasses) + 1)) elif typ.endswith("coverage"): self.Format = 3 log.debug("Parsing %s format %s", Type, self.Format) c = ContextHelper(Type, self.Format) coverages = tuple([] for i in range(c.DataLen)) while lines.peeks()[0].endswith("coverage definition begin"): typ = lines.peek()[0][: -len("coverage definition begin")].lower() idx, klass = { 1: { "": (0, ot.Coverage), }, 3: { "backtrack": (0, ot.BacktrackCoverage), "input": (1, ot.InputCoverage), "lookahead": (2, ot.LookAheadCoverage), }, }[c.DataLen][typ] coverages[idx].append(parseCoverage(lines, font, klass=klass)) c.SetRuleData(self, coverages) lines = list(lines) assert len(lines) == 1 line = lines[0] assert line[0].lower() == "coverage", line[0] recs = parseLookupRecords(line[1:], c.LookupRecord, lookupMap) setattr(self, c.Type + "Count", len(recs)) setattr(self, c.LookupRecord, recs) else: assert 0, typ return self def parseContextSubst(lines, font, lookupMap=None): return parseContext(lines, font, "ContextSubst", lookupMap=lookupMap) def parseContextPos(lines, font, lookupMap=None): return parseContext(lines, font, "ContextPos", lookupMap=lookupMap) def parseChainedSubst(lines, font, lookupMap=None): return parseContext(lines, font, "ChainContextSubst", lookupMap=lookupMap) def parseChainedPos(lines, font, lookupMap=None): return parseContext(lines, font, "ChainContextPos", lookupMap=lookupMap) def parseReverseChainedSubst(lines, font, _lookupMap=None): self = ot.ReverseChainSingleSubst() self.Format = 1 coverages = ([], []) while lines.peeks()[0].endswith("coverage definition begin"): typ = lines.peek()[0][: -len("coverage definition begin")].lower() idx, klass = { "backtrack": (0, ot.BacktrackCoverage), "lookahead": (1, ot.LookAheadCoverage), }[typ] coverages[idx].append(parseCoverage(lines, font, klass=klass)) self.BacktrackCoverage = coverages[0] self.BacktrackGlyphCount = len(self.BacktrackCoverage) self.LookAheadCoverage = coverages[1] self.LookAheadGlyphCount = len(self.LookAheadCoverage) mapping = {} for line in lines: assert len(line) == 2, line line = makeGlyphs(line) mapping[line[0]] = line[1] self.Coverage = makeCoverage(set(mapping.keys()), font) self.Substitute = [mapping[k] for k in self.Coverage.glyphs] self.GlyphCount = len(self.Substitute) return self def parseLookup(lines, tableTag, font, lookupMap=None): line = lines.expect("lookup") _, name, typ = line log.debug("Parsing lookup type %s %s", typ, name) lookup = ot.Lookup() lookup.LookupFlag, filterset = parseLookupFlags(lines) if filterset is not None: lookup.MarkFilteringSet = filterset lookup.LookupType, parseLookupSubTable = { "GSUB": { "single": (1, parseSingleSubst), "multiple": (2, parseMultiple), "alternate": (3, parseAlternate), "ligature": (4, parseLigature), "context": (5, parseContextSubst), "chained": (6, parseChainedSubst), "reversechained": (8, parseReverseChainedSubst), }, "GPOS": { "single": (1, parseSinglePos), "pair": (2, parsePair), "kernset": (2, parseKernset), "cursive": (3, parseCursive), "mark to base": (4, parseMarkToBase), "mark to ligature": (5, parseMarkToLigature), "mark to mark": (6, parseMarkToMark), "context": (7, parseContextPos), "chained": (8, parseChainedPos), }, }[tableTag][typ] with lines.until("lookup end"): subtables = [] while lines.peek(): with lines.until(("% subtable", "subtable end")): while lines.peek(): subtable = parseLookupSubTable(lines, font, lookupMap) assert lookup.LookupType == subtable.LookupType subtables.append(subtable) if lines.peeks()[0] in ("% subtable", "subtable end"): next(lines) lines.expect("lookup end") lookup.SubTable = subtables lookup.SubTableCount = len(lookup.SubTable) if lookup.SubTableCount == 0: # Remove this return when following is fixed: # https://github.com/fonttools/fonttools/issues/789 return None return lookup def parseGSUBGPOS(lines, font, tableTag): container = ttLib.getTableClass(tableTag)() lookupMap = DeferredMapping() featureMap = DeferredMapping() assert tableTag in ("GSUB", "GPOS") log.debug("Parsing %s", tableTag) self = getattr(ot, tableTag)() self.Version = 0x00010000 fields = { "script table begin": ( "ScriptList", lambda lines: parseScriptList(lines, featureMap), ), "feature table begin": ( "FeatureList", lambda lines: parseFeatureList(lines, lookupMap, featureMap), ), "lookup": ("LookupList", None), } for attr, parser in fields.values(): setattr(self, attr, None) while lines.peek() is not None: typ = lines.peek()[0].lower() if typ not in fields: log.debug("Skipping %s", lines.peek()) next(lines) continue attr, parser = fields[typ] if typ == "lookup": if self.LookupList is None: self.LookupList = ot.LookupList() self.LookupList.Lookup = [] _, name, _ = lines.peek() lookup = parseLookup(lines, tableTag, font, lookupMap) if lookupMap is not None: assert name not in lookupMap, "Duplicate lookup name: %s" % name lookupMap[name] = len(self.LookupList.Lookup) else: assert int(name) == len(self.LookupList.Lookup), "%d %d" % ( name, len(self.Lookup), ) self.LookupList.Lookup.append(lookup) else: assert getattr(self, attr) is None, attr setattr(self, attr, parser(lines)) if self.LookupList: self.LookupList.LookupCount = len(self.LookupList.Lookup) if lookupMap is not None: lookupMap.applyDeferredMappings() if os.environ.get(LOOKUP_DEBUG_ENV_VAR): if "Debg" not in font: font["Debg"] = newTable("Debg") font["Debg"].data = {} debug = ( font["Debg"] .data.setdefault(LOOKUP_DEBUG_INFO_KEY, {}) .setdefault(tableTag, {}) ) for name, lookup in lookupMap.items(): debug[str(lookup)] = ["", name, ""] featureMap.applyDeferredMappings() container.table = self return container def parseGSUB(lines, font): return parseGSUBGPOS(lines, font, "GSUB") def parseGPOS(lines, font): return parseGSUBGPOS(lines, font, "GPOS") def parseAttachList(lines, font): points = {} with lines.between("attachment list"): for line in lines: glyph = makeGlyph(line[0]) assert glyph not in points, glyph points[glyph] = [int(i) for i in line[1:]] return otl.buildAttachList(points, font.getReverseGlyphMap()) def parseCaretList(lines, font): carets = {} with lines.between("carets"): for line in lines: glyph = makeGlyph(line[0]) assert glyph not in carets, glyph num = int(line[1]) thisCarets = [int(i) for i in line[2:]] assert num == len(thisCarets), line carets[glyph] = thisCarets return otl.buildLigCaretList(carets, {}, font.getReverseGlyphMap()) def makeMarkFilteringSets(sets, font): self = ot.MarkGlyphSetsDef() self.MarkSetTableFormat = 1 self.MarkSetCount = 1 + max(sets.keys()) self.Coverage = [None] * self.MarkSetCount for k, v in sorted(sets.items()): self.Coverage[k] = makeCoverage(set(v), font) return self def parseMarkFilteringSets(lines, font): sets = {} with lines.between("set definition"): for line in lines: assert len(line) == 2, line glyph = makeGlyph(line[0]) # TODO accept set names st = int(line[1]) if st not in sets: sets[st] = [] sets[st].append(glyph) return makeMarkFilteringSets(sets, font) def parseGDEF(lines, font): container = ttLib.getTableClass("GDEF")() log.debug("Parsing GDEF") self = ot.GDEF() fields = { "class definition begin": ( "GlyphClassDef", lambda lines, font: parseClassDef(lines, font, klass=ot.GlyphClassDef), ), "attachment list begin": ("AttachList", parseAttachList), "carets begin": ("LigCaretList", parseCaretList), "mark attachment class definition begin": ( "MarkAttachClassDef", lambda lines, font: parseClassDef(lines, font, klass=ot.MarkAttachClassDef), ), "markfilter set definition begin": ("MarkGlyphSetsDef", parseMarkFilteringSets), } for attr, parser in fields.values(): setattr(self, attr, None) while lines.peek() is not None: typ = lines.peek()[0].lower() if typ not in fields: log.debug("Skipping %s", typ) next(lines) continue attr, parser = fields[typ] assert getattr(self, attr) is None, attr setattr(self, attr, parser(lines, font)) self.Version = 0x00010000 if self.MarkGlyphSetsDef is None else 0x00010002 container.table = self return container def parseCmap(lines, font): container = ttLib.getTableClass("cmap")() log.debug("Parsing cmap") tables = [] while lines.peek() is not None: lines.expect("cmap subtable %d" % len(tables)) platId, encId, fmt, lang = [ parseCmapId(lines, field) for field in ("platformID", "encodingID", "format", "language") ] table = cmap_classes[fmt](fmt) table.platformID = platId table.platEncID = encId table.language = lang table.cmap = {} line = next(lines) while line[0] != "end subtable": table.cmap[int(line[0], 16)] = line[1] line = next(lines) tables.append(table) container.tableVersion = 0 container.tables = tables return container def parseCmapId(lines, field): line = next(lines) assert field == line[0] return int(line[1]) def parseTable(lines, font, tableTag=None): log.debug("Parsing table") line = lines.peeks() tag = None if line[0].split()[0] == "FontDame": tag = line[0].split()[1] elif " ".join(line[0].split()[:3]) == "Font Chef Table": tag = line[0].split()[3] if tag is not None: next(lines) tag = tag.ljust(4) if tableTag is None: tableTag = tag else: assert tableTag == tag, (tableTag, tag) assert ( tableTag is not None ), "Don't know what table to parse and data doesn't specify" return { "GSUB": parseGSUB, "GPOS": parseGPOS, "GDEF": parseGDEF, "cmap": parseCmap, }[tableTag](lines, font) class Tokenizer(object): def __init__(self, f): # TODO BytesIO / StringIO as needed? also, figure out whether we work on bytes or unicode lines = iter(f) try: self.filename = f.name except: self.filename = None self.lines = iter(lines) self.line = "" self.lineno = 0 self.stoppers = [] self.buffer = None def __iter__(self): return self def _next_line(self): self.lineno += 1 line = self.line = next(self.lines) line = [s.strip() for s in line.split("\t")] if len(line) == 1 and not line[0]: del line[0] if line and not line[-1]: log.warning("trailing tab found on line %d: %s" % (self.lineno, self.line)) while line and not line[-1]: del line[-1] return line def _next_nonempty(self): while True: line = self._next_line() # Skip comments and empty lines if line and line[0] and (line[0][0] != "%" or line[0] == "% subtable"): return line def _next_buffered(self): if self.buffer: ret = self.buffer self.buffer = None return ret else: return self._next_nonempty() def __next__(self): line = self._next_buffered() if line[0].lower() in self.stoppers: self.buffer = line raise StopIteration return line def next(self): return self.__next__() def peek(self): if not self.buffer: try: self.buffer = self._next_nonempty() except StopIteration: return None if self.buffer[0].lower() in self.stoppers: return None return self.buffer def peeks(self): ret = self.peek() return ret if ret is not None else ("",) @contextmanager def between(self, tag): start = tag + " begin" end = tag + " end" self.expectendswith(start) self.stoppers.append(end) yield del self.stoppers[-1] self.expect(tag + " end") @contextmanager def until(self, tags): if type(tags) is not tuple: tags = (tags,) self.stoppers.extend(tags) yield del self.stoppers[-len(tags) :] def expect(self, s): line = next(self) tag = line[0].lower() assert tag == s, "Expected '%s', got '%s'" % (s, tag) return line def expectendswith(self, s): line = next(self) tag = line[0].lower() assert tag.endswith(s), "Expected '*%s', got '%s'" % (s, tag) return line def build(f, font, tableTag=None): """Convert a Monotype font layout file to an OpenType layout object A font object must be passed, but this may be a "dummy" font; it is only used for sorting glyph sets when making coverage tables and to hold the OpenType layout table while it is being built. Args: f: A file object. font (TTFont): A font object. tableTag (string): If provided, asserts that the file contains data for the given OpenType table. Returns: An object representing the table. (e.g. ``table_G_S_U_B_``) """ lines = Tokenizer(f) return parseTable(lines, font, tableTag=tableTag) def main(args=None, font=None): """Convert a FontDame OTL file to TTX XML Writes XML output to stdout. Args: args: Command line arguments (``--font``, ``--table``, input files). """ import sys from fontTools import configLogger from fontTools.misc.testTools import MockFont if args is None: args = sys.argv[1:] # configure the library logger (for >= WARNING) configLogger() # comment this out to enable debug messages from mtiLib's logger # log.setLevel(logging.DEBUG) import argparse parser = argparse.ArgumentParser( "fonttools mtiLib", description=main.__doc__, ) parser.add_argument( "--font", "-f", metavar="FILE", dest="font", help="Input TTF files (used for glyph classes and sorting coverage tables)", ) parser.add_argument( "--table", "-t", metavar="TABLE", dest="tableTag", help="Table to fill (sniffed from input file if not provided)", ) parser.add_argument( "inputs", metavar="FILE", type=str, nargs="+", help="Input FontDame .txt files" ) args = parser.parse_args(args) if font is None: if args.font: font = ttLib.TTFont(args.font) else: font = MockFont() for f in args.inputs: log.debug("Processing %s", f) with open(f, "rt", encoding="utf-8") as f: table = build(f, font, tableTag=args.tableTag) blob = table.compile(font) # Make sure it compiles decompiled = table.__class__() decompiled.decompile(blob, font) # Make sure it decompiles! # continue from fontTools.misc import xmlWriter tag = table.tableTag writer = xmlWriter.XMLWriter(sys.stdout) writer.begintag(tag) writer.newline() # table.toXML(writer, font) decompiled.toXML(writer, font) writer.endtag(tag) writer.newline() if __name__ == "__main__": import sys sys.exit(main()) PKaZZZ���^^fontTools/mtiLib/__main__.pyimport sys from fontTools.mtiLib import main if __name__ == "__main__": sys.exit(main()) PKaZZZ��zf--fontTools/otlLib/__init__.py"""OpenType Layout-related functionality.""" PKaZZZ���l����fontTools/otlLib/builder.pyfrom collections import namedtuple, OrderedDict import os from fontTools.misc.fixedTools import fixedToFloat from fontTools.misc.roundTools import otRound from fontTools import ttLib from fontTools.ttLib.tables import otTables as ot from fontTools.ttLib.tables.otBase import ( ValueRecord, valueRecordFormatDict, OTLOffsetOverflowError, OTTableWriter, CountReference, ) from fontTools.ttLib.tables import otBase from fontTools.feaLib.ast import STATNameStatement from fontTools.otlLib.optimize.gpos import ( _compression_level_from_env, compact_lookup, ) from fontTools.otlLib.error import OpenTypeLibError from functools import reduce import logging import copy log = logging.getLogger(__name__) def buildCoverage(glyphs, glyphMap): """Builds a coverage table. Coverage tables (as defined in the `OpenType spec <https://docs.microsoft.com/en-gb/typography/opentype/spec/chapter2#coverage-table>`__) are used in all OpenType Layout lookups apart from the Extension type, and define the glyphs involved in a layout subtable. This allows shaping engines to compare the glyph stream with the coverage table and quickly determine whether a subtable should be involved in a shaping operation. This function takes a list of glyphs and a glyphname-to-ID map, and returns a ``Coverage`` object representing the coverage table. Example:: glyphMap = font.getReverseGlyphMap() glyphs = [ "A", "B", "C" ] coverage = buildCoverage(glyphs, glyphMap) Args: glyphs: a sequence of glyph names. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: An ``otTables.Coverage`` object or ``None`` if there are no glyphs supplied. """ if not glyphs: return None self = ot.Coverage() try: self.glyphs = sorted(set(glyphs), key=glyphMap.__getitem__) except KeyError as e: raise ValueError(f"Could not find glyph {e} in font") from e return self LOOKUP_FLAG_RIGHT_TO_LEFT = 0x0001 LOOKUP_FLAG_IGNORE_BASE_GLYPHS = 0x0002 LOOKUP_FLAG_IGNORE_LIGATURES = 0x0004 LOOKUP_FLAG_IGNORE_MARKS = 0x0008 LOOKUP_FLAG_USE_MARK_FILTERING_SET = 0x0010 def buildLookup(subtables, flags=0, markFilterSet=None): """Turns a collection of rules into a lookup. A Lookup (as defined in the `OpenType Spec <https://docs.microsoft.com/en-gb/typography/opentype/spec/chapter2#lookupTbl>`__) wraps the individual rules in a layout operation (substitution or positioning) in a data structure expressing their overall lookup type - for example, single substitution, mark-to-base attachment, and so on - as well as the lookup flags and any mark filtering sets. You may import the following constants to express lookup flags: - ``LOOKUP_FLAG_RIGHT_TO_LEFT`` - ``LOOKUP_FLAG_IGNORE_BASE_GLYPHS`` - ``LOOKUP_FLAG_IGNORE_LIGATURES`` - ``LOOKUP_FLAG_IGNORE_MARKS`` - ``LOOKUP_FLAG_USE_MARK_FILTERING_SET`` Args: subtables: A list of layout subtable objects (e.g. ``MultipleSubst``, ``PairPos``, etc.) or ``None``. flags (int): This lookup's flags. markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. Returns: An ``otTables.Lookup`` object or ``None`` if there are no subtables supplied. """ if subtables is None: return None subtables = [st for st in subtables if st is not None] if not subtables: return None assert all( t.LookupType == subtables[0].LookupType for t in subtables ), "all subtables must have the same LookupType; got %s" % repr( [t.LookupType for t in subtables] ) self = ot.Lookup() self.LookupType = subtables[0].LookupType self.LookupFlag = flags self.SubTable = subtables self.SubTableCount = len(self.SubTable) if markFilterSet is not None: self.LookupFlag |= LOOKUP_FLAG_USE_MARK_FILTERING_SET assert isinstance(markFilterSet, int), markFilterSet self.MarkFilteringSet = markFilterSet else: assert (self.LookupFlag & LOOKUP_FLAG_USE_MARK_FILTERING_SET) == 0, ( "if markFilterSet is None, flags must not set " "LOOKUP_FLAG_USE_MARK_FILTERING_SET; flags=0x%04x" % flags ) return self class LookupBuilder(object): SUBTABLE_BREAK_ = "SUBTABLE_BREAK" def __init__(self, font, location, table, lookup_type): self.font = font self.glyphMap = font.getReverseGlyphMap() self.location = location self.table, self.lookup_type = table, lookup_type self.lookupflag = 0 self.markFilterSet = None self.lookup_index = None # assigned when making final tables assert table in ("GPOS", "GSUB") def equals(self, other): return ( isinstance(other, self.__class__) and self.table == other.table and self.lookupflag == other.lookupflag and self.markFilterSet == other.markFilterSet ) def inferGlyphClasses(self): """Infers glyph glasses for the GDEF table, such as {"cedilla":3}.""" return {} def getAlternateGlyphs(self): """Helper for building 'aalt' features.""" return {} def buildLookup_(self, subtables): return buildLookup(subtables, self.lookupflag, self.markFilterSet) def buildMarkClasses_(self, marks): """{"cedilla": ("BOTTOM", ast.Anchor), ...} --> {"BOTTOM":0, "TOP":1} Helper for MarkBasePostBuilder, MarkLigPosBuilder, and MarkMarkPosBuilder. Seems to return the same numeric IDs for mark classes as the AFDKO makeotf tool. """ ids = {} for mark in sorted(marks.keys(), key=self.font.getGlyphID): markClassName, _markAnchor = marks[mark] if markClassName not in ids: ids[markClassName] = len(ids) return ids def setBacktrackCoverage_(self, prefix, subtable): subtable.BacktrackGlyphCount = len(prefix) subtable.BacktrackCoverage = [] for p in reversed(prefix): coverage = buildCoverage(p, self.glyphMap) subtable.BacktrackCoverage.append(coverage) def setLookAheadCoverage_(self, suffix, subtable): subtable.LookAheadGlyphCount = len(suffix) subtable.LookAheadCoverage = [] for s in suffix: coverage = buildCoverage(s, self.glyphMap) subtable.LookAheadCoverage.append(coverage) def setInputCoverage_(self, glyphs, subtable): subtable.InputGlyphCount = len(glyphs) subtable.InputCoverage = [] for g in glyphs: coverage = buildCoverage(g, self.glyphMap) subtable.InputCoverage.append(coverage) def setCoverage_(self, glyphs, subtable): subtable.GlyphCount = len(glyphs) subtable.Coverage = [] for g in glyphs: coverage = buildCoverage(g, self.glyphMap) subtable.Coverage.append(coverage) def build_subst_subtables(self, mapping, klass): substitutions = [{}] for key in mapping: if key[0] == self.SUBTABLE_BREAK_: substitutions.append({}) else: substitutions[-1][key] = mapping[key] subtables = [klass(s) for s in substitutions] return subtables def add_subtable_break(self, location): """Add an explicit subtable break. Args: location: A string or tuple representing the location in the original source which produced this break, or ``None`` if no location is provided. """ log.warning( OpenTypeLibError( 'unsupported "subtable" statement for lookup type', location ) ) class AlternateSubstBuilder(LookupBuilder): """Builds an Alternate Substitution (GSUB3) lookup. Users are expected to manually add alternate glyph substitutions to the ``alternates`` attribute after the object has been initialized, e.g.:: builder.alternates["A"] = ["A.alt1", "A.alt2"] Attributes: font (``fontTools.TTLib.TTFont``): A font object. location: A string or tuple representing the location in the original source which produced this lookup. alternates: An ordered dictionary of alternates, mapping glyph names to a list of names of alternates. lookupflag (int): The lookup's flag markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. """ def __init__(self, font, location): LookupBuilder.__init__(self, font, location, "GSUB", 3) self.alternates = OrderedDict() def equals(self, other): return LookupBuilder.equals(self, other) and self.alternates == other.alternates def build(self): """Build the lookup. Returns: An ``otTables.Lookup`` object representing the alternate substitution lookup. """ subtables = self.build_subst_subtables( self.alternates, buildAlternateSubstSubtable ) return self.buildLookup_(subtables) def getAlternateGlyphs(self): return self.alternates def add_subtable_break(self, location): self.alternates[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_ class ChainContextualRule( namedtuple("ChainContextualRule", ["prefix", "glyphs", "suffix", "lookups"]) ): @property def is_subtable_break(self): return self.prefix == LookupBuilder.SUBTABLE_BREAK_ class ChainContextualRuleset: def __init__(self): self.rules = [] def addRule(self, rule): self.rules.append(rule) @property def hasPrefixOrSuffix(self): # Do we have any prefixes/suffixes? If this is False for all # rulesets, we can express the whole lookup as GPOS5/GSUB7. for rule in self.rules: if len(rule.prefix) > 0 or len(rule.suffix) > 0: return True return False @property def hasAnyGlyphClasses(self): # Do we use glyph classes anywhere in the rules? If this is False # we can express this subtable as a Format 1. for rule in self.rules: for coverage in (rule.prefix, rule.glyphs, rule.suffix): if any(len(x) > 1 for x in coverage): return True return False def format2ClassDefs(self): PREFIX, GLYPHS, SUFFIX = 0, 1, 2 classDefBuilders = [] for ix in [PREFIX, GLYPHS, SUFFIX]: context = [] for r in self.rules: context.append(r[ix]) classes = self._classBuilderForContext(context) if not classes: return None classDefBuilders.append(classes) return classDefBuilders def _classBuilderForContext(self, context): classdefbuilder = ClassDefBuilder(useClass0=False) for position in context: for glyphset in position: glyphs = set(glyphset) if not classdefbuilder.canAdd(glyphs): return None classdefbuilder.add(glyphs) return classdefbuilder class ChainContextualBuilder(LookupBuilder): def equals(self, other): return LookupBuilder.equals(self, other) and self.rules == other.rules def rulesets(self): # Return a list of ChainContextRuleset objects, taking explicit # subtable breaks into account ruleset = [ChainContextualRuleset()] for rule in self.rules: if rule.is_subtable_break: ruleset.append(ChainContextualRuleset()) continue ruleset[-1].addRule(rule) # Squish any empty subtables return [x for x in ruleset if len(x.rules) > 0] def getCompiledSize_(self, subtables): if not subtables: return 0 # We need to make a copy here because compiling # modifies the subtable (finalizing formats etc.) table = self.buildLookup_(copy.deepcopy(subtables)) w = OTTableWriter() table.compile(w, self.font) size = len(w.getAllData()) return size def build(self): """Build the lookup. Returns: An ``otTables.Lookup`` object representing the chained contextual positioning lookup. """ subtables = [] rulesets = self.rulesets() chaining = any(ruleset.hasPrefixOrSuffix for ruleset in rulesets) # https://github.com/fonttools/fonttools/issues/2539 # # Unfortunately, as of 2022-03-07, Apple's CoreText renderer does not # correctly process GPOS7 lookups, so for now we force contextual # positioning lookups to be chaining (GPOS8). # # This seems to be fixed as of macOS 13.2, but we keep disabling this # for now until we are no longer concerned about old macOS versions. # But we allow people to opt-out of this with the config key below. write_gpos7 = self.font.cfg.get("fontTools.otlLib.builder:WRITE_GPOS7") # horrible separation of concerns breach if not write_gpos7 and self.subtable_type == "Pos": chaining = True for ruleset in rulesets: # Determine format strategy. We try to build formats 1, 2 and 3 # subtables and then work out which is best. candidates list holds # the subtables in each format for this ruleset (including a dummy # "format 0" to make the addressing match the format numbers). # We can always build a format 3 lookup by accumulating each of # the rules into a list, so start with that. candidates = [None, None, None, []] for rule in ruleset.rules: candidates[3].append(self.buildFormat3Subtable(rule, chaining)) # Can we express the whole ruleset as a format 2 subtable? classdefs = ruleset.format2ClassDefs() if classdefs: candidates[2] = [ self.buildFormat2Subtable(ruleset, classdefs, chaining) ] if not ruleset.hasAnyGlyphClasses: candidates[1] = [self.buildFormat1Subtable(ruleset, chaining)] candidates_by_size = [] for i in [1, 2, 3]: if candidates[i]: try: size = self.getCompiledSize_(candidates[i]) except OTLOffsetOverflowError as e: log.warning( "Contextual format %i at %s overflowed (%s)" % (i, str(self.location), e) ) else: candidates_by_size.append((size, candidates[i])) if not candidates_by_size: raise OpenTypeLibError("All candidates overflowed", self.location) _min_size, winner = min(candidates_by_size, key=lambda x: x[0]) subtables.extend(winner) # If we are not chaining, lookup type will be automatically fixed by # buildLookup_ return self.buildLookup_(subtables) def buildFormat1Subtable(self, ruleset, chaining=True): st = self.newSubtable_(chaining=chaining) st.Format = 1 st.populateDefaults() coverage = set() rulesetsByFirstGlyph = {} ruleAttr = self.ruleAttr_(format=1, chaining=chaining) for rule in ruleset.rules: ruleAsSubtable = self.newRule_(format=1, chaining=chaining) if chaining: ruleAsSubtable.BacktrackGlyphCount = len(rule.prefix) ruleAsSubtable.LookAheadGlyphCount = len(rule.suffix) ruleAsSubtable.Backtrack = [list(x)[0] for x in reversed(rule.prefix)] ruleAsSubtable.LookAhead = [list(x)[0] for x in rule.suffix] ruleAsSubtable.InputGlyphCount = len(rule.glyphs) else: ruleAsSubtable.GlyphCount = len(rule.glyphs) ruleAsSubtable.Input = [list(x)[0] for x in rule.glyphs[1:]] self.buildLookupList(rule, ruleAsSubtable) firstGlyph = list(rule.glyphs[0])[0] if firstGlyph not in rulesetsByFirstGlyph: coverage.add(firstGlyph) rulesetsByFirstGlyph[firstGlyph] = [] rulesetsByFirstGlyph[firstGlyph].append(ruleAsSubtable) st.Coverage = buildCoverage(coverage, self.glyphMap) ruleSets = [] for g in st.Coverage.glyphs: ruleSet = self.newRuleSet_(format=1, chaining=chaining) setattr(ruleSet, ruleAttr, rulesetsByFirstGlyph[g]) setattr(ruleSet, f"{ruleAttr}Count", len(rulesetsByFirstGlyph[g])) ruleSets.append(ruleSet) setattr(st, self.ruleSetAttr_(format=1, chaining=chaining), ruleSets) setattr( st, self.ruleSetAttr_(format=1, chaining=chaining) + "Count", len(ruleSets) ) return st def buildFormat2Subtable(self, ruleset, classdefs, chaining=True): st = self.newSubtable_(chaining=chaining) st.Format = 2 st.populateDefaults() if chaining: ( st.BacktrackClassDef, st.InputClassDef, st.LookAheadClassDef, ) = [c.build() for c in classdefs] else: st.ClassDef = classdefs[1].build() inClasses = classdefs[1].classes() classSets = [] for _ in inClasses: classSet = self.newRuleSet_(format=2, chaining=chaining) classSets.append(classSet) coverage = set() classRuleAttr = self.ruleAttr_(format=2, chaining=chaining) for rule in ruleset.rules: ruleAsSubtable = self.newRule_(format=2, chaining=chaining) if chaining: ruleAsSubtable.BacktrackGlyphCount = len(rule.prefix) ruleAsSubtable.LookAheadGlyphCount = len(rule.suffix) # The glyphs in the rule may be list, tuple, odict_keys... # Order is not important anyway because they are guaranteed # to be members of the same class. ruleAsSubtable.Backtrack = [ st.BacktrackClassDef.classDefs[list(x)[0]] for x in reversed(rule.prefix) ] ruleAsSubtable.LookAhead = [ st.LookAheadClassDef.classDefs[list(x)[0]] for x in rule.suffix ] ruleAsSubtable.InputGlyphCount = len(rule.glyphs) ruleAsSubtable.Input = [ st.InputClassDef.classDefs[list(x)[0]] for x in rule.glyphs[1:] ] setForThisRule = classSets[ st.InputClassDef.classDefs[list(rule.glyphs[0])[0]] ] else: ruleAsSubtable.GlyphCount = len(rule.glyphs) ruleAsSubtable.Class = [ # The spec calls this InputSequence st.ClassDef.classDefs[list(x)[0]] for x in rule.glyphs[1:] ] setForThisRule = classSets[ st.ClassDef.classDefs[list(rule.glyphs[0])[0]] ] self.buildLookupList(rule, ruleAsSubtable) coverage |= set(rule.glyphs[0]) getattr(setForThisRule, classRuleAttr).append(ruleAsSubtable) setattr( setForThisRule, f"{classRuleAttr}Count", getattr(setForThisRule, f"{classRuleAttr}Count") + 1, ) setattr(st, self.ruleSetAttr_(format=2, chaining=chaining), classSets) setattr( st, self.ruleSetAttr_(format=2, chaining=chaining) + "Count", len(classSets) ) st.Coverage = buildCoverage(coverage, self.glyphMap) return st def buildFormat3Subtable(self, rule, chaining=True): st = self.newSubtable_(chaining=chaining) st.Format = 3 if chaining: self.setBacktrackCoverage_(rule.prefix, st) self.setLookAheadCoverage_(rule.suffix, st) self.setInputCoverage_(rule.glyphs, st) else: self.setCoverage_(rule.glyphs, st) self.buildLookupList(rule, st) return st def buildLookupList(self, rule, st): for sequenceIndex, lookupList in enumerate(rule.lookups): if lookupList is not None: if not isinstance(lookupList, list): # Can happen with synthesised lookups lookupList = [lookupList] for l in lookupList: if l.lookup_index is None: if isinstance(self, ChainContextPosBuilder): other = "substitution" else: other = "positioning" raise OpenTypeLibError( "Missing index of the specified " f"lookup, might be a {other} lookup", self.location, ) rec = self.newLookupRecord_(st) rec.SequenceIndex = sequenceIndex rec.LookupListIndex = l.lookup_index def add_subtable_break(self, location): self.rules.append( ChainContextualRule( self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_, [self.SUBTABLE_BREAK_], ) ) def newSubtable_(self, chaining=True): subtablename = f"Context{self.subtable_type}" if chaining: subtablename = "Chain" + subtablename st = getattr(ot, subtablename)() # ot.ChainContextPos()/ot.ChainSubst()/etc. setattr(st, f"{self.subtable_type}Count", 0) setattr(st, f"{self.subtable_type}LookupRecord", []) return st # Format 1 and format 2 GSUB5/GSUB6/GPOS7/GPOS8 rulesets and rules form a family: # # format 1 ruleset format 1 rule format 2 ruleset format 2 rule # GSUB5 SubRuleSet SubRule SubClassSet SubClassRule # GSUB6 ChainSubRuleSet ChainSubRule ChainSubClassSet ChainSubClassRule # GPOS7 PosRuleSet PosRule PosClassSet PosClassRule # GPOS8 ChainPosRuleSet ChainPosRule ChainPosClassSet ChainPosClassRule # # The following functions generate the attribute names and subtables according # to this naming convention. def ruleSetAttr_(self, format=1, chaining=True): if format == 1: formatType = "Rule" elif format == 2: formatType = "Class" else: raise AssertionError(formatType) subtablename = f"{self.subtable_type[0:3]}{formatType}Set" # Sub, not Subst. if chaining: subtablename = "Chain" + subtablename return subtablename def ruleAttr_(self, format=1, chaining=True): if format == 1: formatType = "" elif format == 2: formatType = "Class" else: raise AssertionError(formatType) subtablename = f"{self.subtable_type[0:3]}{formatType}Rule" # Sub, not Subst. if chaining: subtablename = "Chain" + subtablename return subtablename def newRuleSet_(self, format=1, chaining=True): st = getattr( ot, self.ruleSetAttr_(format, chaining) )() # ot.ChainPosRuleSet()/ot.SubRuleSet()/etc. st.populateDefaults() return st def newRule_(self, format=1, chaining=True): st = getattr( ot, self.ruleAttr_(format, chaining) )() # ot.ChainPosClassRule()/ot.SubClassRule()/etc. st.populateDefaults() return st def attachSubtableWithCount_( self, st, subtable_name, count_name, existing=None, index=None, chaining=False ): if chaining: subtable_name = "Chain" + subtable_name count_name = "Chain" + count_name if not hasattr(st, count_name): setattr(st, count_name, 0) setattr(st, subtable_name, []) if existing: new_subtable = existing else: # Create a new, empty subtable from otTables new_subtable = getattr(ot, subtable_name)() setattr(st, count_name, getattr(st, count_name) + 1) if index: getattr(st, subtable_name).insert(index, new_subtable) else: getattr(st, subtable_name).append(new_subtable) return new_subtable def newLookupRecord_(self, st): return self.attachSubtableWithCount_( st, f"{self.subtable_type}LookupRecord", f"{self.subtable_type}Count", chaining=False, ) # Oddly, it isn't ChainSubstLookupRecord class ChainContextPosBuilder(ChainContextualBuilder): """Builds a Chained Contextual Positioning (GPOS8) lookup. Users are expected to manually add rules to the ``rules`` attribute after the object has been initialized, e.g.:: # pos [A B] [C D] x' lookup lu1 y' z' lookup lu2 E; prefix = [ ["A", "B"], ["C", "D"] ] suffix = [ ["E"] ] glyphs = [ ["x"], ["y"], ["z"] ] lookups = [ [lu1], None, [lu2] ] builder.rules.append( (prefix, glyphs, suffix, lookups) ) Attributes: font (``fontTools.TTLib.TTFont``): A font object. location: A string or tuple representing the location in the original source which produced this lookup. rules: A list of tuples representing the rules in this lookup. lookupflag (int): The lookup's flag markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. """ def __init__(self, font, location): LookupBuilder.__init__(self, font, location, "GPOS", 8) self.rules = [] self.subtable_type = "Pos" def find_chainable_single_pos(self, lookups, glyphs, value): """Helper for add_single_pos_chained_()""" res = None for lookup in lookups[::-1]: if lookup == self.SUBTABLE_BREAK_: return res if isinstance(lookup, SinglePosBuilder) and all( lookup.can_add(glyph, value) for glyph in glyphs ): res = lookup return res class ChainContextSubstBuilder(ChainContextualBuilder): """Builds a Chained Contextual Substitution (GSUB6) lookup. Users are expected to manually add rules to the ``rules`` attribute after the object has been initialized, e.g.:: # sub [A B] [C D] x' lookup lu1 y' z' lookup lu2 E; prefix = [ ["A", "B"], ["C", "D"] ] suffix = [ ["E"] ] glyphs = [ ["x"], ["y"], ["z"] ] lookups = [ [lu1], None, [lu2] ] builder.rules.append( (prefix, glyphs, suffix, lookups) ) Attributes: font (``fontTools.TTLib.TTFont``): A font object. location: A string or tuple representing the location in the original source which produced this lookup. rules: A list of tuples representing the rules in this lookup. lookupflag (int): The lookup's flag markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. """ def __init__(self, font, location): LookupBuilder.__init__(self, font, location, "GSUB", 6) self.rules = [] # (prefix, input, suffix, lookups) self.subtable_type = "Subst" def getAlternateGlyphs(self): result = {} for rule in self.rules: if rule.is_subtable_break: continue for lookups in rule.lookups: if not isinstance(lookups, list): lookups = [lookups] for lookup in lookups: if lookup is not None: alts = lookup.getAlternateGlyphs() for glyph, replacements in alts.items(): alts_for_glyph = result.setdefault(glyph, []) alts_for_glyph.extend( g for g in replacements if g not in alts_for_glyph ) return result def find_chainable_single_subst(self, mapping): """Helper for add_single_subst_chained_()""" res = None for rule in self.rules[::-1]: if rule.is_subtable_break: return res for sub in rule.lookups: if isinstance(sub, SingleSubstBuilder) and not any( g in mapping and mapping[g] != sub.mapping[g] for g in sub.mapping ): res = sub return res class LigatureSubstBuilder(LookupBuilder): """Builds a Ligature Substitution (GSUB4) lookup. Users are expected to manually add ligatures to the ``ligatures`` attribute after the object has been initialized, e.g.:: # sub f i by f_i; builder.ligatures[("f","f","i")] = "f_f_i" Attributes: font (``fontTools.TTLib.TTFont``): A font object. location: A string or tuple representing the location in the original source which produced this lookup. ligatures: An ordered dictionary mapping a tuple of glyph names to the ligature glyphname. lookupflag (int): The lookup's flag markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. """ def __init__(self, font, location): LookupBuilder.__init__(self, font, location, "GSUB", 4) self.ligatures = OrderedDict() # {('f','f','i'): 'f_f_i'} def equals(self, other): return LookupBuilder.equals(self, other) and self.ligatures == other.ligatures def build(self): """Build the lookup. Returns: An ``otTables.Lookup`` object representing the ligature substitution lookup. """ subtables = self.build_subst_subtables( self.ligatures, buildLigatureSubstSubtable ) return self.buildLookup_(subtables) def add_subtable_break(self, location): self.ligatures[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_ class MultipleSubstBuilder(LookupBuilder): """Builds a Multiple Substitution (GSUB2) lookup. Users are expected to manually add substitutions to the ``mapping`` attribute after the object has been initialized, e.g.:: # sub uni06C0 by uni06D5.fina hamza.above; builder.mapping["uni06C0"] = [ "uni06D5.fina", "hamza.above"] Attributes: font (``fontTools.TTLib.TTFont``): A font object. location: A string or tuple representing the location in the original source which produced this lookup. mapping: An ordered dictionary mapping a glyph name to a list of substituted glyph names. lookupflag (int): The lookup's flag markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. """ def __init__(self, font, location): LookupBuilder.__init__(self, font, location, "GSUB", 2) self.mapping = OrderedDict() def equals(self, other): return LookupBuilder.equals(self, other) and self.mapping == other.mapping def build(self): subtables = self.build_subst_subtables(self.mapping, buildMultipleSubstSubtable) return self.buildLookup_(subtables) def add_subtable_break(self, location): self.mapping[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_ class CursivePosBuilder(LookupBuilder): """Builds a Cursive Positioning (GPOS3) lookup. Attributes: font (``fontTools.TTLib.TTFont``): A font object. location: A string or tuple representing the location in the original source which produced this lookup. attachments: An ordered dictionary mapping a glyph name to a two-element tuple of ``otTables.Anchor`` objects. lookupflag (int): The lookup's flag markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. """ def __init__(self, font, location): LookupBuilder.__init__(self, font, location, "GPOS", 3) self.attachments = {} def equals(self, other): return ( LookupBuilder.equals(self, other) and self.attachments == other.attachments ) def add_attachment(self, location, glyphs, entryAnchor, exitAnchor): """Adds attachment information to the cursive positioning lookup. Args: location: A string or tuple representing the location in the original source which produced this lookup. (Unused.) glyphs: A list of glyph names sharing these entry and exit anchor locations. entryAnchor: A ``otTables.Anchor`` object representing the entry anchor, or ``None`` if no entry anchor is present. exitAnchor: A ``otTables.Anchor`` object representing the exit anchor, or ``None`` if no exit anchor is present. """ for glyph in glyphs: self.attachments[glyph] = (entryAnchor, exitAnchor) def build(self): """Build the lookup. Returns: An ``otTables.Lookup`` object representing the cursive positioning lookup. """ st = buildCursivePosSubtable(self.attachments, self.glyphMap) return self.buildLookup_([st]) class MarkBasePosBuilder(LookupBuilder): """Builds a Mark-To-Base Positioning (GPOS4) lookup. Users are expected to manually add marks and bases to the ``marks`` and ``bases`` attributes after the object has been initialized, e.g.:: builder.marks["acute"] = (0, a1) builder.marks["grave"] = (0, a1) builder.marks["cedilla"] = (1, a2) builder.bases["a"] = {0: a3, 1: a5} builder.bases["b"] = {0: a4, 1: a5} Attributes: font (``fontTools.TTLib.TTFont``): A font object. location: A string or tuple representing the location in the original source which produced this lookup. marks: An dictionary mapping a glyph name to a two-element tuple containing a mark class ID and ``otTables.Anchor`` object. bases: An dictionary mapping a glyph name to a dictionary of mark class IDs and ``otTables.Anchor`` object. lookupflag (int): The lookup's flag markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. """ def __init__(self, font, location): LookupBuilder.__init__(self, font, location, "GPOS", 4) self.marks = {} # glyphName -> (markClassName, anchor) self.bases = {} # glyphName -> {markClassName: anchor} def equals(self, other): return ( LookupBuilder.equals(self, other) and self.marks == other.marks and self.bases == other.bases ) def inferGlyphClasses(self): result = {glyph: 1 for glyph in self.bases} result.update({glyph: 3 for glyph in self.marks}) return result def build(self): """Build the lookup. Returns: An ``otTables.Lookup`` object representing the mark-to-base positioning lookup. """ markClasses = self.buildMarkClasses_(self.marks) marks = {} for mark, (mc, anchor) in self.marks.items(): if mc not in markClasses: raise ValueError( "Mark class %s not found for mark glyph %s" % (mc, mark) ) marks[mark] = (markClasses[mc], anchor) bases = {} for glyph, anchors in self.bases.items(): bases[glyph] = {} for mc, anchor in anchors.items(): if mc not in markClasses: raise ValueError( "Mark class %s not found for base glyph %s" % (mc, glyph) ) bases[glyph][markClasses[mc]] = anchor subtables = buildMarkBasePos(marks, bases, self.glyphMap) return self.buildLookup_(subtables) class MarkLigPosBuilder(LookupBuilder): """Builds a Mark-To-Ligature Positioning (GPOS5) lookup. Users are expected to manually add marks and bases to the ``marks`` and ``ligatures`` attributes after the object has been initialized, e.g.:: builder.marks["acute"] = (0, a1) builder.marks["grave"] = (0, a1) builder.marks["cedilla"] = (1, a2) builder.ligatures["f_i"] = [ { 0: a3, 1: a5 }, # f { 0: a4, 1: a5 } # i ] Attributes: font (``fontTools.TTLib.TTFont``): A font object. location: A string or tuple representing the location in the original source which produced this lookup. marks: An dictionary mapping a glyph name to a two-element tuple containing a mark class ID and ``otTables.Anchor`` object. ligatures: An dictionary mapping a glyph name to an array with one element for each ligature component. Each array element should be a dictionary mapping mark class IDs to ``otTables.Anchor`` objects. lookupflag (int): The lookup's flag markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. """ def __init__(self, font, location): LookupBuilder.__init__(self, font, location, "GPOS", 5) self.marks = {} # glyphName -> (markClassName, anchor) self.ligatures = {} # glyphName -> [{markClassName: anchor}, ...] def equals(self, other): return ( LookupBuilder.equals(self, other) and self.marks == other.marks and self.ligatures == other.ligatures ) def inferGlyphClasses(self): result = {glyph: 2 for glyph in self.ligatures} result.update({glyph: 3 for glyph in self.marks}) return result def build(self): """Build the lookup. Returns: An ``otTables.Lookup`` object representing the mark-to-ligature positioning lookup. """ markClasses = self.buildMarkClasses_(self.marks) marks = { mark: (markClasses[mc], anchor) for mark, (mc, anchor) in self.marks.items() } ligs = {} for lig, components in self.ligatures.items(): ligs[lig] = [] for c in components: ligs[lig].append({markClasses[mc]: a for mc, a in c.items()}) subtables = buildMarkLigPos(marks, ligs, self.glyphMap) return self.buildLookup_(subtables) class MarkMarkPosBuilder(LookupBuilder): """Builds a Mark-To-Mark Positioning (GPOS6) lookup. Users are expected to manually add marks and bases to the ``marks`` and ``baseMarks`` attributes after the object has been initialized, e.g.:: builder.marks["acute"] = (0, a1) builder.marks["grave"] = (0, a1) builder.marks["cedilla"] = (1, a2) builder.baseMarks["acute"] = {0: a3} Attributes: font (``fontTools.TTLib.TTFont``): A font object. location: A string or tuple representing the location in the original source which produced this lookup. marks: An dictionary mapping a glyph name to a two-element tuple containing a mark class ID and ``otTables.Anchor`` object. baseMarks: An dictionary mapping a glyph name to a dictionary containing one item: a mark class ID and a ``otTables.Anchor`` object. lookupflag (int): The lookup's flag markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. """ def __init__(self, font, location): LookupBuilder.__init__(self, font, location, "GPOS", 6) self.marks = {} # glyphName -> (markClassName, anchor) self.baseMarks = {} # glyphName -> {markClassName: anchor} def equals(self, other): return ( LookupBuilder.equals(self, other) and self.marks == other.marks and self.baseMarks == other.baseMarks ) def inferGlyphClasses(self): result = {glyph: 3 for glyph in self.baseMarks} result.update({glyph: 3 for glyph in self.marks}) return result def build(self): """Build the lookup. Returns: An ``otTables.Lookup`` object representing the mark-to-mark positioning lookup. """ markClasses = self.buildMarkClasses_(self.marks) markClassList = sorted(markClasses.keys(), key=markClasses.get) marks = { mark: (markClasses[mc], anchor) for mark, (mc, anchor) in self.marks.items() } st = ot.MarkMarkPos() st.Format = 1 st.ClassCount = len(markClasses) st.Mark1Coverage = buildCoverage(marks, self.glyphMap) st.Mark2Coverage = buildCoverage(self.baseMarks, self.glyphMap) st.Mark1Array = buildMarkArray(marks, self.glyphMap) st.Mark2Array = ot.Mark2Array() st.Mark2Array.Mark2Count = len(st.Mark2Coverage.glyphs) st.Mark2Array.Mark2Record = [] for base in st.Mark2Coverage.glyphs: anchors = [self.baseMarks[base].get(mc) for mc in markClassList] st.Mark2Array.Mark2Record.append(buildMark2Record(anchors)) return self.buildLookup_([st]) class ReverseChainSingleSubstBuilder(LookupBuilder): """Builds a Reverse Chaining Contextual Single Substitution (GSUB8) lookup. Users are expected to manually add substitutions to the ``substitutions`` attribute after the object has been initialized, e.g.:: # reversesub [a e n] d' by d.alt; prefix = [ ["a", "e", "n"] ] suffix = [] mapping = { "d": "d.alt" } builder.substitutions.append( (prefix, suffix, mapping) ) Attributes: font (``fontTools.TTLib.TTFont``): A font object. location: A string or tuple representing the location in the original source which produced this lookup. substitutions: A three-element tuple consisting of a prefix sequence, a suffix sequence, and a dictionary of single substitutions. lookupflag (int): The lookup's flag markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. """ def __init__(self, font, location): LookupBuilder.__init__(self, font, location, "GSUB", 8) self.rules = [] # (prefix, suffix, mapping) def equals(self, other): return LookupBuilder.equals(self, other) and self.rules == other.rules def build(self): """Build the lookup. Returns: An ``otTables.Lookup`` object representing the chained contextual substitution lookup. """ subtables = [] for prefix, suffix, mapping in self.rules: st = ot.ReverseChainSingleSubst() st.Format = 1 self.setBacktrackCoverage_(prefix, st) self.setLookAheadCoverage_(suffix, st) st.Coverage = buildCoverage(mapping.keys(), self.glyphMap) st.GlyphCount = len(mapping) st.Substitute = [mapping[g] for g in st.Coverage.glyphs] subtables.append(st) return self.buildLookup_(subtables) def add_subtable_break(self, location): # Nothing to do here, each substitution is in its own subtable. pass class SingleSubstBuilder(LookupBuilder): """Builds a Single Substitution (GSUB1) lookup. Users are expected to manually add substitutions to the ``mapping`` attribute after the object has been initialized, e.g.:: # sub x by y; builder.mapping["x"] = "y" Attributes: font (``fontTools.TTLib.TTFont``): A font object. location: A string or tuple representing the location in the original source which produced this lookup. mapping: A dictionary mapping a single glyph name to another glyph name. lookupflag (int): The lookup's flag markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. """ def __init__(self, font, location): LookupBuilder.__init__(self, font, location, "GSUB", 1) self.mapping = OrderedDict() def equals(self, other): return LookupBuilder.equals(self, other) and self.mapping == other.mapping def build(self): """Build the lookup. Returns: An ``otTables.Lookup`` object representing the multiple substitution lookup. """ subtables = self.build_subst_subtables(self.mapping, buildSingleSubstSubtable) return self.buildLookup_(subtables) def getAlternateGlyphs(self): return {glyph: [repl] for glyph, repl in self.mapping.items()} def add_subtable_break(self, location): self.mapping[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_ class ClassPairPosSubtableBuilder(object): """Builds class-based Pair Positioning (GPOS2 format 2) subtables. Note that this does *not* build a GPOS2 ``otTables.Lookup`` directly, but builds a list of ``otTables.PairPos`` subtables. It is used by the :class:`PairPosBuilder` below. Attributes: builder (PairPosBuilder): A pair positioning lookup builder. """ def __init__(self, builder): self.builder_ = builder self.classDef1_, self.classDef2_ = None, None self.values_ = {} # (glyphclass1, glyphclass2) --> (value1, value2) self.forceSubtableBreak_ = False self.subtables_ = [] def addPair(self, gc1, value1, gc2, value2): """Add a pair positioning rule. Args: gc1: A set of glyph names for the "left" glyph value1: An ``otTables.ValueRecord`` object for the left glyph's positioning. gc2: A set of glyph names for the "right" glyph value2: An ``otTables.ValueRecord`` object for the right glyph's positioning. """ mergeable = ( not self.forceSubtableBreak_ and self.classDef1_ is not None and self.classDef1_.canAdd(gc1) and self.classDef2_ is not None and self.classDef2_.canAdd(gc2) ) if not mergeable: self.flush_() self.classDef1_ = ClassDefBuilder(useClass0=True) self.classDef2_ = ClassDefBuilder(useClass0=False) self.values_ = {} self.classDef1_.add(gc1) self.classDef2_.add(gc2) self.values_[(gc1, gc2)] = (value1, value2) def addSubtableBreak(self): """Add an explicit subtable break at this point.""" self.forceSubtableBreak_ = True def subtables(self): """Return the list of ``otTables.PairPos`` subtables constructed.""" self.flush_() return self.subtables_ def flush_(self): if self.classDef1_ is None or self.classDef2_ is None: return st = buildPairPosClassesSubtable(self.values_, self.builder_.glyphMap) if st.Coverage is None: return self.subtables_.append(st) self.forceSubtableBreak_ = False class PairPosBuilder(LookupBuilder): """Builds a Pair Positioning (GPOS2) lookup. Attributes: font (``fontTools.TTLib.TTFont``): A font object. location: A string or tuple representing the location in the original source which produced this lookup. pairs: An array of class-based pair positioning tuples. Usually manipulated with the :meth:`addClassPair` method below. glyphPairs: A dictionary mapping a tuple of glyph names to a tuple of ``otTables.ValueRecord`` objects. Usually manipulated with the :meth:`addGlyphPair` method below. lookupflag (int): The lookup's flag markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. """ def __init__(self, font, location): LookupBuilder.__init__(self, font, location, "GPOS", 2) self.pairs = [] # [(gc1, value1, gc2, value2)*] self.glyphPairs = {} # (glyph1, glyph2) --> (value1, value2) self.locations = {} # (gc1, gc2) --> (filepath, line, column) def addClassPair(self, location, glyphclass1, value1, glyphclass2, value2): """Add a class pair positioning rule to the current lookup. Args: location: A string or tuple representing the location in the original source which produced this rule. Unused. glyphclass1: A set of glyph names for the "left" glyph in the pair. value1: A ``otTables.ValueRecord`` for positioning the left glyph. glyphclass2: A set of glyph names for the "right" glyph in the pair. value2: A ``otTables.ValueRecord`` for positioning the right glyph. """ self.pairs.append((glyphclass1, value1, glyphclass2, value2)) def addGlyphPair(self, location, glyph1, value1, glyph2, value2): """Add a glyph pair positioning rule to the current lookup. Args: location: A string or tuple representing the location in the original source which produced this rule. glyph1: A glyph name for the "left" glyph in the pair. value1: A ``otTables.ValueRecord`` for positioning the left glyph. glyph2: A glyph name for the "right" glyph in the pair. value2: A ``otTables.ValueRecord`` for positioning the right glyph. """ key = (glyph1, glyph2) oldValue = self.glyphPairs.get(key, None) if oldValue is not None: # the Feature File spec explicitly allows specific pairs generated # by an 'enum' rule to be overridden by preceding single pairs otherLoc = self.locations[key] log.debug( "Already defined position for pair %s %s at %s; " "choosing the first value", glyph1, glyph2, otherLoc, ) else: self.glyphPairs[key] = (value1, value2) self.locations[key] = location def add_subtable_break(self, location): self.pairs.append( ( self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_, ) ) def equals(self, other): return ( LookupBuilder.equals(self, other) and self.glyphPairs == other.glyphPairs and self.pairs == other.pairs ) def build(self): """Build the lookup. Returns: An ``otTables.Lookup`` object representing the pair positioning lookup. """ builders = {} builder = ClassPairPosSubtableBuilder(self) for glyphclass1, value1, glyphclass2, value2 in self.pairs: if glyphclass1 is self.SUBTABLE_BREAK_: builder.addSubtableBreak() continue builder.addPair(glyphclass1, value1, glyphclass2, value2) subtables = [] if self.glyphPairs: subtables.extend(buildPairPosGlyphs(self.glyphPairs, self.glyphMap)) subtables.extend(builder.subtables()) lookup = self.buildLookup_(subtables) # Compact the lookup # This is a good moment to do it because the compaction should create # smaller subtables, which may prevent overflows from happening. # Keep reading the value from the ENV until ufo2ft switches to the config system level = self.font.cfg.get( "fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL", default=_compression_level_from_env(), ) if level != 0: log.info("Compacting GPOS...") compact_lookup(self.font, level, lookup) return lookup class SinglePosBuilder(LookupBuilder): """Builds a Single Positioning (GPOS1) lookup. Attributes: font (``fontTools.TTLib.TTFont``): A font object. location: A string or tuple representing the location in the original source which produced this lookup. mapping: A dictionary mapping a glyph name to a ``otTables.ValueRecord`` objects. Usually manipulated with the :meth:`add_pos` method below. lookupflag (int): The lookup's flag markFilterSet: Either ``None`` if no mark filtering set is used, or an integer representing the filtering set to be used for this lookup. If a mark filtering set is provided, `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's flags. """ def __init__(self, font, location): LookupBuilder.__init__(self, font, location, "GPOS", 1) self.locations = {} # glyph -> (filename, line, column) self.mapping = {} # glyph -> ot.ValueRecord def add_pos(self, location, glyph, otValueRecord): """Add a single positioning rule. Args: location: A string or tuple representing the location in the original source which produced this lookup. glyph: A glyph name. otValueRection: A ``otTables.ValueRecord`` used to position the glyph. """ if not self.can_add(glyph, otValueRecord): otherLoc = self.locations[glyph] raise OpenTypeLibError( 'Already defined different position for glyph "%s" at %s' % (glyph, otherLoc), location, ) if otValueRecord: self.mapping[glyph] = otValueRecord self.locations[glyph] = location def can_add(self, glyph, value): assert isinstance(value, ValueRecord) curValue = self.mapping.get(glyph) return curValue is None or curValue == value def equals(self, other): return LookupBuilder.equals(self, other) and self.mapping == other.mapping def build(self): """Build the lookup. Returns: An ``otTables.Lookup`` object representing the single positioning lookup. """ subtables = buildSinglePos(self.mapping, self.glyphMap) return self.buildLookup_(subtables) # GSUB def buildSingleSubstSubtable(mapping): """Builds a single substitution (GSUB1) subtable. Note that if you are implementing a layout compiler, you may find it more flexible to use :py:class:`fontTools.otlLib.lookupBuilders.SingleSubstBuilder` instead. Args: mapping: A dictionary mapping input glyph names to output glyph names. Returns: An ``otTables.SingleSubst`` object, or ``None`` if the mapping dictionary is empty. """ if not mapping: return None self = ot.SingleSubst() self.mapping = dict(mapping) return self def buildMultipleSubstSubtable(mapping): """Builds a multiple substitution (GSUB2) subtable. Note that if you are implementing a layout compiler, you may find it more flexible to use :py:class:`fontTools.otlLib.lookupBuilders.MultipleSubstBuilder` instead. Example:: # sub uni06C0 by uni06D5.fina hamza.above # sub uni06C2 by uni06C1.fina hamza.above; subtable = buildMultipleSubstSubtable({ "uni06C0": [ "uni06D5.fina", "hamza.above"], "uni06C2": [ "uni06D1.fina", "hamza.above"] }) Args: mapping: A dictionary mapping input glyph names to a list of output glyph names. Returns: An ``otTables.MultipleSubst`` object or ``None`` if the mapping dictionary is empty. """ if not mapping: return None self = ot.MultipleSubst() self.mapping = dict(mapping) return self def buildAlternateSubstSubtable(mapping): """Builds an alternate substitution (GSUB3) subtable. Note that if you are implementing a layout compiler, you may find it more flexible to use :py:class:`fontTools.otlLib.lookupBuilders.AlternateSubstBuilder` instead. Args: mapping: A dictionary mapping input glyph names to a list of output glyph names. Returns: An ``otTables.AlternateSubst`` object or ``None`` if the mapping dictionary is empty. """ if not mapping: return None self = ot.AlternateSubst() self.alternates = dict(mapping) return self def buildLigatureSubstSubtable(mapping): """Builds a ligature substitution (GSUB4) subtable. Note that if you are implementing a layout compiler, you may find it more flexible to use :py:class:`fontTools.otlLib.lookupBuilders.LigatureSubstBuilder` instead. Example:: # sub f f i by f_f_i; # sub f i by f_i; subtable = buildLigatureSubstSubtable({ ("f", "f", "i"): "f_f_i", ("f", "i"): "f_i", }) Args: mapping: A dictionary mapping tuples of glyph names to output glyph names. Returns: An ``otTables.LigatureSubst`` object or ``None`` if the mapping dictionary is empty. """ if not mapping: return None self = ot.LigatureSubst() # The following single line can replace the rest of this function # with fontTools >= 3.1: # self.ligatures = dict(mapping) self.ligatures = {} for components in sorted(mapping.keys(), key=self._getLigatureSortKey): ligature = ot.Ligature() ligature.Component = components[1:] ligature.CompCount = len(ligature.Component) + 1 ligature.LigGlyph = mapping[components] firstGlyph = components[0] self.ligatures.setdefault(firstGlyph, []).append(ligature) return self # GPOS def buildAnchor(x, y, point=None, deviceX=None, deviceY=None): """Builds an Anchor table. This determines the appropriate anchor format based on the passed parameters. Args: x (int): X coordinate. y (int): Y coordinate. point (int): Index of glyph contour point, if provided. deviceX (``otTables.Device``): X coordinate device table, if provided. deviceY (``otTables.Device``): Y coordinate device table, if provided. Returns: An ``otTables.Anchor`` object. """ self = ot.Anchor() self.XCoordinate, self.YCoordinate = x, y self.Format = 1 if point is not None: self.AnchorPoint = point self.Format = 2 if deviceX is not None or deviceY is not None: assert ( self.Format == 1 ), "Either point, or both of deviceX/deviceY, must be None." self.XDeviceTable = deviceX self.YDeviceTable = deviceY self.Format = 3 return self def buildBaseArray(bases, numMarkClasses, glyphMap): """Builds a base array record. As part of building mark-to-base positioning rules, you will need to define a ``BaseArray`` record, which "defines for each base glyph an array of anchors, one for each mark class." This function builds the base array subtable. Example:: bases = {"a": {0: a3, 1: a5}, "b": {0: a4, 1: a5}} basearray = buildBaseArray(bases, 2, font.getReverseGlyphMap()) Args: bases (dict): A dictionary mapping anchors to glyphs; the keys being glyph names, and the values being dictionaries mapping mark class ID to the appropriate ``otTables.Anchor`` object used for attaching marks of that class. numMarkClasses (int): The total number of mark classes for which anchors are defined. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: An ``otTables.BaseArray`` object. """ self = ot.BaseArray() self.BaseRecord = [] for base in sorted(bases, key=glyphMap.__getitem__): b = bases[base] anchors = [b.get(markClass) for markClass in range(numMarkClasses)] self.BaseRecord.append(buildBaseRecord(anchors)) self.BaseCount = len(self.BaseRecord) return self def buildBaseRecord(anchors): # [otTables.Anchor, otTables.Anchor, ...] --> otTables.BaseRecord self = ot.BaseRecord() self.BaseAnchor = anchors return self def buildComponentRecord(anchors): """Builds a component record. As part of building mark-to-ligature positioning rules, you will need to define ``ComponentRecord`` objects, which contain "an array of offsets... to the Anchor tables that define all the attachment points used to attach marks to the component." This function builds the component record. Args: anchors: A list of ``otTables.Anchor`` objects or ``None``. Returns: A ``otTables.ComponentRecord`` object or ``None`` if no anchors are supplied. """ if not anchors: return None self = ot.ComponentRecord() self.LigatureAnchor = anchors return self def buildCursivePosSubtable(attach, glyphMap): """Builds a cursive positioning (GPOS3) subtable. Cursive positioning lookups are made up of a coverage table of glyphs, and a set of ``EntryExitRecord`` records containing the anchors for each glyph. This function builds the cursive positioning subtable. Example:: subtable = buildCursivePosSubtable({ "AlifIni": (None, buildAnchor(0, 50)), "BehMed": (buildAnchor(500,250), buildAnchor(0,50)), # ... }, font.getReverseGlyphMap()) Args: attach (dict): A mapping between glyph names and a tuple of two ``otTables.Anchor`` objects representing entry and exit anchors. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: An ``otTables.CursivePos`` object, or ``None`` if the attachment dictionary was empty. """ if not attach: return None self = ot.CursivePos() self.Format = 1 self.Coverage = buildCoverage(attach.keys(), glyphMap) self.EntryExitRecord = [] for glyph in self.Coverage.glyphs: entryAnchor, exitAnchor = attach[glyph] rec = ot.EntryExitRecord() rec.EntryAnchor = entryAnchor rec.ExitAnchor = exitAnchor self.EntryExitRecord.append(rec) self.EntryExitCount = len(self.EntryExitRecord) return self def buildDevice(deltas): """Builds a Device record as part of a ValueRecord or Anchor. Device tables specify size-specific adjustments to value records and anchors to reflect changes based on the resolution of the output. For example, one could specify that an anchor's Y position should be increased by 1 pixel when displayed at 8 pixels per em. This routine builds device records. Args: deltas: A dictionary mapping pixels-per-em sizes to the delta adjustment in pixels when the font is displayed at that size. Returns: An ``otTables.Device`` object if any deltas were supplied, or ``None`` otherwise. """ if not deltas: return None self = ot.Device() keys = deltas.keys() self.StartSize = startSize = min(keys) self.EndSize = endSize = max(keys) assert 0 <= startSize <= endSize self.DeltaValue = deltaValues = [ deltas.get(size, 0) for size in range(startSize, endSize + 1) ] maxDelta = max(deltaValues) minDelta = min(deltaValues) assert minDelta > -129 and maxDelta < 128 if minDelta > -3 and maxDelta < 2: self.DeltaFormat = 1 elif minDelta > -9 and maxDelta < 8: self.DeltaFormat = 2 else: self.DeltaFormat = 3 return self def buildLigatureArray(ligs, numMarkClasses, glyphMap): """Builds a LigatureArray subtable. As part of building a mark-to-ligature lookup, you will need to define the set of anchors (for each mark class) on each component of the ligature where marks can be attached. For example, for an Arabic divine name ligature (lam lam heh), you may want to specify mark attachment positioning for superior marks (fatha, etc.) and inferior marks (kasra, etc.) on each glyph of the ligature. This routine builds the ligature array record. Example:: buildLigatureArray({ "lam-lam-heh": [ { 0: superiorAnchor1, 1: inferiorAnchor1 }, # attach points for lam1 { 0: superiorAnchor2, 1: inferiorAnchor2 }, # attach points for lam2 { 0: superiorAnchor3, 1: inferiorAnchor3 }, # attach points for heh ] }, 2, font.getReverseGlyphMap()) Args: ligs (dict): A mapping of ligature names to an array of dictionaries: for each component glyph in the ligature, an dictionary mapping mark class IDs to anchors. numMarkClasses (int): The number of mark classes. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: An ``otTables.LigatureArray`` object if deltas were supplied. """ self = ot.LigatureArray() self.LigatureAttach = [] for lig in sorted(ligs, key=glyphMap.__getitem__): anchors = [] for component in ligs[lig]: anchors.append([component.get(mc) for mc in range(numMarkClasses)]) self.LigatureAttach.append(buildLigatureAttach(anchors)) self.LigatureCount = len(self.LigatureAttach) return self def buildLigatureAttach(components): # [[Anchor, Anchor], [Anchor, Anchor, Anchor]] --> LigatureAttach self = ot.LigatureAttach() self.ComponentRecord = [buildComponentRecord(c) for c in components] self.ComponentCount = len(self.ComponentRecord) return self def buildMarkArray(marks, glyphMap): """Builds a mark array subtable. As part of building mark-to-* positioning rules, you will need to define a MarkArray subtable, which "defines the class and the anchor point for a mark glyph." This function builds the mark array subtable. Example:: mark = { "acute": (0, buildAnchor(300,712)), # ... } markarray = buildMarkArray(marks, font.getReverseGlyphMap()) Args: marks (dict): A dictionary mapping anchors to glyphs; the keys being glyph names, and the values being a tuple of mark class number and an ``otTables.Anchor`` object representing the mark's attachment point. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: An ``otTables.MarkArray`` object. """ self = ot.MarkArray() self.MarkRecord = [] for mark in sorted(marks.keys(), key=glyphMap.__getitem__): markClass, anchor = marks[mark] markrec = buildMarkRecord(markClass, anchor) self.MarkRecord.append(markrec) self.MarkCount = len(self.MarkRecord) return self def buildMarkBasePos(marks, bases, glyphMap): """Build a list of MarkBasePos (GPOS4) subtables. This routine turns a set of marks and bases into a list of mark-to-base positioning subtables. Currently the list will contain a single subtable containing all marks and bases, although at a later date it may return the optimal list of subtables subsetting the marks and bases into groups which save space. See :func:`buildMarkBasePosSubtable` below. Note that if you are implementing a layout compiler, you may find it more flexible to use :py:class:`fontTools.otlLib.lookupBuilders.MarkBasePosBuilder` instead. Example:: # a1, a2, a3, a4, a5 = buildAnchor(500, 100), ... marks = {"acute": (0, a1), "grave": (0, a1), "cedilla": (1, a2)} bases = {"a": {0: a3, 1: a5}, "b": {0: a4, 1: a5}} markbaseposes = buildMarkBasePos(marks, bases, font.getReverseGlyphMap()) Args: marks (dict): A dictionary mapping anchors to glyphs; the keys being glyph names, and the values being a tuple of mark class number and an ``otTables.Anchor`` object representing the mark's attachment point. (See :func:`buildMarkArray`.) bases (dict): A dictionary mapping anchors to glyphs; the keys being glyph names, and the values being dictionaries mapping mark class ID to the appropriate ``otTables.Anchor`` object used for attaching marks of that class. (See :func:`buildBaseArray`.) glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: A list of ``otTables.MarkBasePos`` objects. """ # TODO: Consider emitting multiple subtables to save space. # Partition the marks and bases into disjoint subsets, so that # MarkBasePos rules would only access glyphs from a single # subset. This would likely lead to smaller mark/base # matrices, so we might be able to omit many of the empty # anchor tables that we currently produce. Of course, this # would only work if the MarkBasePos rules of real-world fonts # allow partitioning into multiple subsets. We should find out # whether this is the case; if so, implement the optimization. # On the other hand, a very large number of subtables could # slow down layout engines; so this would need profiling. return [buildMarkBasePosSubtable(marks, bases, glyphMap)] def buildMarkBasePosSubtable(marks, bases, glyphMap): """Build a single MarkBasePos (GPOS4) subtable. This builds a mark-to-base lookup subtable containing all of the referenced marks and bases. See :func:`buildMarkBasePos`. Args: marks (dict): A dictionary mapping anchors to glyphs; the keys being glyph names, and the values being a tuple of mark class number and an ``otTables.Anchor`` object representing the mark's attachment point. (See :func:`buildMarkArray`.) bases (dict): A dictionary mapping anchors to glyphs; the keys being glyph names, and the values being dictionaries mapping mark class ID to the appropriate ``otTables.Anchor`` object used for attaching marks of that class. (See :func:`buildBaseArray`.) glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: A ``otTables.MarkBasePos`` object. """ self = ot.MarkBasePos() self.Format = 1 self.MarkCoverage = buildCoverage(marks, glyphMap) self.MarkArray = buildMarkArray(marks, glyphMap) self.ClassCount = max([mc for mc, _ in marks.values()]) + 1 self.BaseCoverage = buildCoverage(bases, glyphMap) self.BaseArray = buildBaseArray(bases, self.ClassCount, glyphMap) return self def buildMarkLigPos(marks, ligs, glyphMap): """Build a list of MarkLigPos (GPOS5) subtables. This routine turns a set of marks and ligatures into a list of mark-to-ligature positioning subtables. Currently the list will contain a single subtable containing all marks and ligatures, although at a later date it may return the optimal list of subtables subsetting the marks and ligatures into groups which save space. See :func:`buildMarkLigPosSubtable` below. Note that if you are implementing a layout compiler, you may find it more flexible to use :py:class:`fontTools.otlLib.lookupBuilders.MarkLigPosBuilder` instead. Example:: # a1, a2, a3, a4, a5 = buildAnchor(500, 100), ... marks = { "acute": (0, a1), "grave": (0, a1), "cedilla": (1, a2) } ligs = { "f_i": [ { 0: a3, 1: a5 }, # f { 0: a4, 1: a5 } # i ], # "c_t": [{...}, {...}] } markligposes = buildMarkLigPos(marks, ligs, font.getReverseGlyphMap()) Args: marks (dict): A dictionary mapping anchors to glyphs; the keys being glyph names, and the values being a tuple of mark class number and an ``otTables.Anchor`` object representing the mark's attachment point. (See :func:`buildMarkArray`.) ligs (dict): A mapping of ligature names to an array of dictionaries: for each component glyph in the ligature, an dictionary mapping mark class IDs to anchors. (See :func:`buildLigatureArray`.) glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: A list of ``otTables.MarkLigPos`` objects. """ # TODO: Consider splitting into multiple subtables to save space, # as with MarkBasePos, this would be a trade-off that would need # profiling. And, depending on how typical fonts are structured, # it might not be worth doing at all. return [buildMarkLigPosSubtable(marks, ligs, glyphMap)] def buildMarkLigPosSubtable(marks, ligs, glyphMap): """Build a single MarkLigPos (GPOS5) subtable. This builds a mark-to-base lookup subtable containing all of the referenced marks and bases. See :func:`buildMarkLigPos`. Args: marks (dict): A dictionary mapping anchors to glyphs; the keys being glyph names, and the values being a tuple of mark class number and an ``otTables.Anchor`` object representing the mark's attachment point. (See :func:`buildMarkArray`.) ligs (dict): A mapping of ligature names to an array of dictionaries: for each component glyph in the ligature, an dictionary mapping mark class IDs to anchors. (See :func:`buildLigatureArray`.) glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: A ``otTables.MarkLigPos`` object. """ self = ot.MarkLigPos() self.Format = 1 self.MarkCoverage = buildCoverage(marks, glyphMap) self.MarkArray = buildMarkArray(marks, glyphMap) self.ClassCount = max([mc for mc, _ in marks.values()]) + 1 self.LigatureCoverage = buildCoverage(ligs, glyphMap) self.LigatureArray = buildLigatureArray(ligs, self.ClassCount, glyphMap) return self def buildMarkRecord(classID, anchor): assert isinstance(classID, int) assert isinstance(anchor, ot.Anchor) self = ot.MarkRecord() self.Class = classID self.MarkAnchor = anchor return self def buildMark2Record(anchors): # [otTables.Anchor, otTables.Anchor, ...] --> otTables.Mark2Record self = ot.Mark2Record() self.Mark2Anchor = anchors return self def _getValueFormat(f, values, i): # Helper for buildPairPos{Glyphs|Classes}Subtable. if f is not None: return f mask = 0 for value in values: if value is not None and value[i] is not None: mask |= value[i].getFormat() return mask def buildPairPosClassesSubtable(pairs, glyphMap, valueFormat1=None, valueFormat2=None): """Builds a class pair adjustment (GPOS2 format 2) subtable. Kerning tables are generally expressed as pair positioning tables using class-based pair adjustments. This routine builds format 2 PairPos subtables. Note that if you are implementing a layout compiler, you may find it more flexible to use :py:class:`fontTools.otlLib.lookupBuilders.ClassPairPosSubtableBuilder` instead, as this takes care of ensuring that the supplied pairs can be formed into non-overlapping classes and emitting individual subtables whenever the non-overlapping requirement means that a new subtable is required. Example:: pairs = {} pairs[( [ "K", "X" ], [ "W", "V" ] )] = ( buildValue(xAdvance=+5), buildValue() ) # pairs[(... , ...)] = (..., ...) pairpos = buildPairPosClassesSubtable(pairs, font.getReverseGlyphMap()) Args: pairs (dict): Pair positioning data; the keys being a two-element tuple of lists of glyphnames, and the values being a two-element tuple of ``otTables.ValueRecord`` objects. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. valueFormat1: Force the "left" value records to the given format. valueFormat2: Force the "right" value records to the given format. Returns: A ``otTables.PairPos`` object. """ coverage = set() classDef1 = ClassDefBuilder(useClass0=True) classDef2 = ClassDefBuilder(useClass0=False) for gc1, gc2 in sorted(pairs): coverage.update(gc1) classDef1.add(gc1) classDef2.add(gc2) self = ot.PairPos() self.Format = 2 valueFormat1 = self.ValueFormat1 = _getValueFormat(valueFormat1, pairs.values(), 0) valueFormat2 = self.ValueFormat2 = _getValueFormat(valueFormat2, pairs.values(), 1) self.Coverage = buildCoverage(coverage, glyphMap) self.ClassDef1 = classDef1.build() self.ClassDef2 = classDef2.build() classes1 = classDef1.classes() classes2 = classDef2.classes() self.Class1Record = [] for c1 in classes1: rec1 = ot.Class1Record() rec1.Class2Record = [] self.Class1Record.append(rec1) for c2 in classes2: rec2 = ot.Class2Record() val1, val2 = pairs.get((c1, c2), (None, None)) rec2.Value1 = ( ValueRecord(src=val1, valueFormat=valueFormat1) if valueFormat1 else None ) rec2.Value2 = ( ValueRecord(src=val2, valueFormat=valueFormat2) if valueFormat2 else None ) rec1.Class2Record.append(rec2) self.Class1Count = len(self.Class1Record) self.Class2Count = len(classes2) return self def buildPairPosGlyphs(pairs, glyphMap): """Builds a list of glyph-based pair adjustment (GPOS2 format 1) subtables. This organises a list of pair positioning adjustments into subtables based on common value record formats. Note that if you are implementing a layout compiler, you may find it more flexible to use :py:class:`fontTools.otlLib.lookupBuilders.PairPosBuilder` instead. Example:: pairs = { ("K", "W"): ( buildValue(xAdvance=+5), buildValue() ), ("K", "V"): ( buildValue(xAdvance=+5), buildValue() ), # ... } subtables = buildPairPosGlyphs(pairs, font.getReverseGlyphMap()) Args: pairs (dict): Pair positioning data; the keys being a two-element tuple of glyphnames, and the values being a two-element tuple of ``otTables.ValueRecord`` objects. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: A list of ``otTables.PairPos`` objects. """ p = {} # (formatA, formatB) --> {(glyphA, glyphB): (valA, valB)} for (glyphA, glyphB), (valA, valB) in pairs.items(): formatA = valA.getFormat() if valA is not None else 0 formatB = valB.getFormat() if valB is not None else 0 pos = p.setdefault((formatA, formatB), {}) pos[(glyphA, glyphB)] = (valA, valB) return [ buildPairPosGlyphsSubtable(pos, glyphMap, formatA, formatB) for ((formatA, formatB), pos) in sorted(p.items()) ] def buildPairPosGlyphsSubtable(pairs, glyphMap, valueFormat1=None, valueFormat2=None): """Builds a single glyph-based pair adjustment (GPOS2 format 1) subtable. This builds a PairPos subtable from a dictionary of glyph pairs and their positioning adjustments. See also :func:`buildPairPosGlyphs`. Note that if you are implementing a layout compiler, you may find it more flexible to use :py:class:`fontTools.otlLib.lookupBuilders.PairPosBuilder` instead. Example:: pairs = { ("K", "W"): ( buildValue(xAdvance=+5), buildValue() ), ("K", "V"): ( buildValue(xAdvance=+5), buildValue() ), # ... } pairpos = buildPairPosGlyphsSubtable(pairs, font.getReverseGlyphMap()) Args: pairs (dict): Pair positioning data; the keys being a two-element tuple of glyphnames, and the values being a two-element tuple of ``otTables.ValueRecord`` objects. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. valueFormat1: Force the "left" value records to the given format. valueFormat2: Force the "right" value records to the given format. Returns: A ``otTables.PairPos`` object. """ self = ot.PairPos() self.Format = 1 valueFormat1 = self.ValueFormat1 = _getValueFormat(valueFormat1, pairs.values(), 0) valueFormat2 = self.ValueFormat2 = _getValueFormat(valueFormat2, pairs.values(), 1) p = {} for (glyphA, glyphB), (valA, valB) in pairs.items(): p.setdefault(glyphA, []).append((glyphB, valA, valB)) self.Coverage = buildCoverage({g for g, _ in pairs.keys()}, glyphMap) self.PairSet = [] for glyph in self.Coverage.glyphs: ps = ot.PairSet() ps.PairValueRecord = [] self.PairSet.append(ps) for glyph2, val1, val2 in sorted(p[glyph], key=lambda x: glyphMap[x[0]]): pvr = ot.PairValueRecord() pvr.SecondGlyph = glyph2 pvr.Value1 = ( ValueRecord(src=val1, valueFormat=valueFormat1) if valueFormat1 else None ) pvr.Value2 = ( ValueRecord(src=val2, valueFormat=valueFormat2) if valueFormat2 else None ) ps.PairValueRecord.append(pvr) ps.PairValueCount = len(ps.PairValueRecord) self.PairSetCount = len(self.PairSet) return self def buildSinglePos(mapping, glyphMap): """Builds a list of single adjustment (GPOS1) subtables. This builds a list of SinglePos subtables from a dictionary of glyph names and their positioning adjustments. The format of the subtables are determined to optimize the size of the resulting subtables. See also :func:`buildSinglePosSubtable`. Note that if you are implementing a layout compiler, you may find it more flexible to use :py:class:`fontTools.otlLib.lookupBuilders.SinglePosBuilder` instead. Example:: mapping = { "V": buildValue({ "xAdvance" : +5 }), # ... } subtables = buildSinglePos(pairs, font.getReverseGlyphMap()) Args: mapping (dict): A mapping between glyphnames and ``otTables.ValueRecord`` objects. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: A list of ``otTables.SinglePos`` objects. """ result, handled = [], set() # In SinglePos format 1, the covered glyphs all share the same ValueRecord. # In format 2, each glyph has its own ValueRecord, but these records # all have the same properties (eg., all have an X but no Y placement). coverages, masks, values = {}, {}, {} for glyph, value in mapping.items(): key = _getSinglePosValueKey(value) coverages.setdefault(key, []).append(glyph) masks.setdefault(key[0], []).append(key) values[key] = value # If a ValueRecord is shared between multiple glyphs, we generate # a SinglePos format 1 subtable; that is the most compact form. for key, glyphs in coverages.items(): # 5 ushorts is the length of introducing another sublookup if len(glyphs) * _getSinglePosValueSize(key) > 5: format1Mapping = {g: values[key] for g in glyphs} result.append(buildSinglePosSubtable(format1Mapping, glyphMap)) handled.add(key) # In the remaining ValueRecords, look for those whose valueFormat # (the set of used properties) is shared between multiple records. # These will get encoded in format 2. for valueFormat, keys in masks.items(): f2 = [k for k in keys if k not in handled] if len(f2) > 1: format2Mapping = {} for k in f2: format2Mapping.update((g, values[k]) for g in coverages[k]) result.append(buildSinglePosSubtable(format2Mapping, glyphMap)) handled.update(f2) # The remaining ValueRecords are only used by a few glyphs, normally # one. We encode these in format 1 again. for key, glyphs in coverages.items(): if key not in handled: for g in glyphs: st = buildSinglePosSubtable({g: values[key]}, glyphMap) result.append(st) # When the OpenType layout engine traverses the subtables, it will # stop after the first matching subtable. Therefore, we sort the # resulting subtables by decreasing coverage size; this increases # the chance that the layout engine can do an early exit. (Of course, # this would only be true if all glyphs were equally frequent, which # is not really the case; but we do not know their distribution). # If two subtables cover the same number of glyphs, we sort them # by glyph ID so that our output is deterministic. result.sort(key=lambda t: _getSinglePosTableKey(t, glyphMap)) return result def buildSinglePosSubtable(values, glyphMap): """Builds a single adjustment (GPOS1) subtable. This builds a list of SinglePos subtables from a dictionary of glyph names and their positioning adjustments. The format of the subtable is determined to optimize the size of the output. See also :func:`buildSinglePos`. Note that if you are implementing a layout compiler, you may find it more flexible to use :py:class:`fontTools.otlLib.lookupBuilders.SinglePosBuilder` instead. Example:: mapping = { "V": buildValue({ "xAdvance" : +5 }), # ... } subtable = buildSinglePos(pairs, font.getReverseGlyphMap()) Args: mapping (dict): A mapping between glyphnames and ``otTables.ValueRecord`` objects. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: A ``otTables.SinglePos`` object. """ self = ot.SinglePos() self.Coverage = buildCoverage(values.keys(), glyphMap) valueFormat = self.ValueFormat = reduce( int.__or__, [v.getFormat() for v in values.values()], 0 ) valueRecords = [ ValueRecord(src=values[g], valueFormat=valueFormat) for g in self.Coverage.glyphs ] if all(v == valueRecords[0] for v in valueRecords): self.Format = 1 if self.ValueFormat != 0: self.Value = valueRecords[0] else: self.Value = None else: self.Format = 2 self.Value = valueRecords self.ValueCount = len(self.Value) return self def _getSinglePosTableKey(subtable, glyphMap): assert isinstance(subtable, ot.SinglePos), subtable glyphs = subtable.Coverage.glyphs return (-len(glyphs), glyphMap[glyphs[0]]) def _getSinglePosValueKey(valueRecord): # otBase.ValueRecord --> (2, ("YPlacement": 12)) assert isinstance(valueRecord, ValueRecord), valueRecord valueFormat, result = 0, [] for name, value in valueRecord.__dict__.items(): if isinstance(value, ot.Device): result.append((name, _makeDeviceTuple(value))) else: result.append((name, value)) valueFormat |= valueRecordFormatDict[name][0] result.sort() result.insert(0, valueFormat) return tuple(result) _DeviceTuple = namedtuple("_DeviceTuple", "DeltaFormat StartSize EndSize DeltaValue") def _makeDeviceTuple(device): # otTables.Device --> tuple, for making device tables unique return _DeviceTuple( device.DeltaFormat, device.StartSize, device.EndSize, () if device.DeltaFormat & 0x8000 else tuple(device.DeltaValue), ) def _getSinglePosValueSize(valueKey): # Returns how many ushorts this valueKey (short form of ValueRecord) takes up count = 0 for _, v in valueKey[1:]: if isinstance(v, _DeviceTuple): count += len(v.DeltaValue) + 3 else: count += 1 return count def buildValue(value): """Builds a positioning value record. Value records are used to specify coordinates and adjustments for positioning and attaching glyphs. Many of the positioning functions in this library take ``otTables.ValueRecord`` objects as arguments. This function builds value records from dictionaries. Args: value (dict): A dictionary with zero or more of the following keys: - ``xPlacement`` - ``yPlacement`` - ``xAdvance`` - ``yAdvance`` - ``xPlaDevice`` - ``yPlaDevice`` - ``xAdvDevice`` - ``yAdvDevice`` Returns: An ``otTables.ValueRecord`` object. """ self = ValueRecord() for k, v in value.items(): setattr(self, k, v) return self # GDEF def buildAttachList(attachPoints, glyphMap): """Builds an AttachList subtable. A GDEF table may contain an Attachment Point List table (AttachList) which stores the contour indices of attachment points for glyphs with attachment points. This routine builds AttachList subtables. Args: attachPoints (dict): A mapping between glyph names and a list of contour indices. Returns: An ``otTables.AttachList`` object if attachment points are supplied, or ``None`` otherwise. """ if not attachPoints: return None self = ot.AttachList() self.Coverage = buildCoverage(attachPoints.keys(), glyphMap) self.AttachPoint = [buildAttachPoint(attachPoints[g]) for g in self.Coverage.glyphs] self.GlyphCount = len(self.AttachPoint) return self def buildAttachPoint(points): # [4, 23, 41] --> otTables.AttachPoint # Only used by above. if not points: return None self = ot.AttachPoint() self.PointIndex = sorted(set(points)) self.PointCount = len(self.PointIndex) return self def buildCaretValueForCoord(coord): # 500 --> otTables.CaretValue, format 1 # (500, DeviceTable) --> otTables.CaretValue, format 3 self = ot.CaretValue() if isinstance(coord, tuple): self.Format = 3 self.Coordinate, self.DeviceTable = coord else: self.Format = 1 self.Coordinate = coord return self def buildCaretValueForPoint(point): # 4 --> otTables.CaretValue, format 2 self = ot.CaretValue() self.Format = 2 self.CaretValuePoint = point return self def buildLigCaretList(coords, points, glyphMap): """Builds a ligature caret list table. Ligatures appear as a single glyph representing multiple characters; however when, for example, editing text containing a ``f_i`` ligature, the user may want to place the cursor between the ``f`` and the ``i``. The ligature caret list in the GDEF table specifies the position to display the "caret" (the character insertion indicator, typically a flashing vertical bar) "inside" the ligature to represent an insertion point. The insertion positions may be specified either by coordinate or by contour point. Example:: coords = { "f_f_i": [300, 600] # f|fi cursor at 300 units, ff|i cursor at 600. } points = { "c_t": [28] # c|t cursor appears at coordinate of contour point 28. } ligcaretlist = buildLigCaretList(coords, points, font.getReverseGlyphMap()) Args: coords: A mapping between glyph names and a list of coordinates for the insertion point of each ligature component after the first one. points: A mapping between glyph names and a list of contour points for the insertion point of each ligature component after the first one. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: A ``otTables.LigCaretList`` object if any carets are present, or ``None`` otherwise.""" glyphs = set(coords.keys()) if coords else set() if points: glyphs.update(points.keys()) carets = {g: buildLigGlyph(coords.get(g), points.get(g)) for g in glyphs} carets = {g: c for g, c in carets.items() if c is not None} if not carets: return None self = ot.LigCaretList() self.Coverage = buildCoverage(carets.keys(), glyphMap) self.LigGlyph = [carets[g] for g in self.Coverage.glyphs] self.LigGlyphCount = len(self.LigGlyph) return self def buildLigGlyph(coords, points): # ([500], [4]) --> otTables.LigGlyph; None for empty coords/points carets = [] if coords: coords = sorted(coords, key=lambda c: c[0] if isinstance(c, tuple) else c) carets.extend([buildCaretValueForCoord(c) for c in coords]) if points: carets.extend([buildCaretValueForPoint(p) for p in sorted(points)]) if not carets: return None self = ot.LigGlyph() self.CaretValue = carets self.CaretCount = len(self.CaretValue) return self def buildMarkGlyphSetsDef(markSets, glyphMap): """Builds a mark glyph sets definition table. OpenType Layout lookups may choose to use mark filtering sets to consider or ignore particular combinations of marks. These sets are specified by setting a flag on the lookup, but the mark filtering sets are defined in the ``GDEF`` table. This routine builds the subtable containing the mark glyph set definitions. Example:: set0 = set("acute", "grave") set1 = set("caron", "grave") markglyphsets = buildMarkGlyphSetsDef([set0, set1], font.getReverseGlyphMap()) Args: markSets: A list of sets of glyphnames. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns An ``otTables.MarkGlyphSetsDef`` object. """ if not markSets: return None self = ot.MarkGlyphSetsDef() self.MarkSetTableFormat = 1 self.Coverage = [buildCoverage(m, glyphMap) for m in markSets] self.MarkSetCount = len(self.Coverage) return self class ClassDefBuilder(object): """Helper for building ClassDef tables.""" def __init__(self, useClass0): self.classes_ = set() self.glyphs_ = {} self.useClass0_ = useClass0 def canAdd(self, glyphs): if isinstance(glyphs, (set, frozenset)): glyphs = sorted(glyphs) glyphs = tuple(glyphs) if glyphs in self.classes_: return True for glyph in glyphs: if glyph in self.glyphs_: return False return True def add(self, glyphs): if isinstance(glyphs, (set, frozenset)): glyphs = sorted(glyphs) glyphs = tuple(glyphs) if glyphs in self.classes_: return self.classes_.add(glyphs) for glyph in glyphs: if glyph in self.glyphs_: raise OpenTypeLibError( f"Glyph {glyph} is already present in class.", None ) self.glyphs_[glyph] = glyphs def classes(self): # In ClassDef1 tables, class id #0 does not need to be encoded # because zero is the default. Therefore, we use id #0 for the # glyph class that has the largest number of members. However, # in other tables than ClassDef1, 0 means "every other glyph" # so we should not use that ID for any real glyph classes; # we implement this by inserting an empty set at position 0. # # TODO: Instead of counting the number of glyphs in each class, # we should determine the encoded size. If the glyphs in a large # class form a contiguous range, the encoding is actually quite # compact, whereas a non-contiguous set might need a lot of bytes # in the output file. We don't get this right with the key below. result = sorted(self.classes_, key=lambda s: (-len(s), s)) if not self.useClass0_: result.insert(0, frozenset()) return result def build(self): glyphClasses = {} for classID, glyphs in enumerate(self.classes()): if classID == 0: continue for glyph in glyphs: glyphClasses[glyph] = classID classDef = ot.ClassDef() classDef.classDefs = glyphClasses return classDef AXIS_VALUE_NEGATIVE_INFINITY = fixedToFloat(-0x80000000, 16) AXIS_VALUE_POSITIVE_INFINITY = fixedToFloat(0x7FFFFFFF, 16) def buildStatTable( ttFont, axes, locations=None, elidedFallbackName=2, windowsNames=True, macNames=True ): """Add a 'STAT' table to 'ttFont'. 'axes' is a list of dictionaries describing axes and their values. Example:: axes = [ dict( tag="wght", name="Weight", ordering=0, # optional values=[ dict(value=100, name='Thin'), dict(value=300, name='Light'), dict(value=400, name='Regular', flags=0x2), dict(value=900, name='Black'), ], ) ] Each axis dict must have 'tag' and 'name' items. 'tag' maps to the 'AxisTag' field. 'name' can be a name ID (int), a string, or a dictionary containing multilingual names (see the addMultilingualName() name table method), and will translate to the AxisNameID field. An axis dict may contain an 'ordering' item that maps to the AxisOrdering field. If omitted, the order of the axes list is used to calculate AxisOrdering fields. The axis dict may contain a 'values' item, which is a list of dictionaries describing AxisValue records belonging to this axis. Each value dict must have a 'name' item, which can be a name ID (int), a string, or a dictionary containing multilingual names, like the axis name. It translates to the ValueNameID field. Optionally the value dict can contain a 'flags' item. It maps to the AxisValue Flags field, and will be 0 when omitted. The format of the AxisValue is determined by the remaining contents of the value dictionary: If the value dict contains a 'value' item, an AxisValue record Format 1 is created. If in addition to the 'value' item it contains a 'linkedValue' item, an AxisValue record Format 3 is built. If the value dict contains a 'nominalValue' item, an AxisValue record Format 2 is built. Optionally it may contain 'rangeMinValue' and 'rangeMaxValue' items. These map to -Infinity and +Infinity respectively if omitted. You cannot specify Format 4 AxisValue tables this way, as they are not tied to a single axis, and specify a name for a location that is defined by multiple axes values. Instead, you need to supply the 'locations' argument. The optional 'locations' argument specifies AxisValue Format 4 tables. It should be a list of dicts, where each dict has a 'name' item, which works just like the value dicts above, an optional 'flags' item (defaulting to 0x0), and a 'location' dict. A location dict key is an axis tag, and the associated value is the location on the specified axis. They map to the AxisIndex and Value fields of the AxisValueRecord. Example:: locations = [ dict(name='Regular ABCD', location=dict(wght=300, ABCD=100)), dict(name='Bold ABCD XYZ', location=dict(wght=600, ABCD=200)), ] The optional 'elidedFallbackName' argument can be a name ID (int), a string, a dictionary containing multilingual names, or a list of STATNameStatements. It translates to the ElidedFallbackNameID field. The 'ttFont' argument must be a TTFont instance that already has a 'name' table. If a 'STAT' table already exists, it will be overwritten by the newly created one. """ ttFont["STAT"] = ttLib.newTable("STAT") statTable = ttFont["STAT"].table = ot.STAT() statTable.ElidedFallbackNameID = _addName( ttFont, elidedFallbackName, windows=windowsNames, mac=macNames ) # 'locations' contains data for AxisValue Format 4 axisRecords, axisValues = _buildAxisRecords( axes, ttFont, windowsNames=windowsNames, macNames=macNames ) if not locations: statTable.Version = 0x00010001 else: # We'll be adding Format 4 AxisValue records, which # requires a higher table version statTable.Version = 0x00010002 multiAxisValues = _buildAxisValuesFormat4( locations, axes, ttFont, windowsNames=windowsNames, macNames=macNames ) axisValues = multiAxisValues + axisValues ttFont["name"].names.sort() # Store AxisRecords axisRecordArray = ot.AxisRecordArray() axisRecordArray.Axis = axisRecords # XXX these should not be hard-coded but computed automatically statTable.DesignAxisRecordSize = 8 statTable.DesignAxisRecord = axisRecordArray statTable.DesignAxisCount = len(axisRecords) statTable.AxisValueCount = 0 statTable.AxisValueArray = None if axisValues: # Store AxisValueRecords axisValueArray = ot.AxisValueArray() axisValueArray.AxisValue = axisValues statTable.AxisValueArray = axisValueArray statTable.AxisValueCount = len(axisValues) def _buildAxisRecords(axes, ttFont, windowsNames=True, macNames=True): axisRecords = [] axisValues = [] for axisRecordIndex, axisDict in enumerate(axes): axis = ot.AxisRecord() axis.AxisTag = axisDict["tag"] axis.AxisNameID = _addName( ttFont, axisDict["name"], 256, windows=windowsNames, mac=macNames ) axis.AxisOrdering = axisDict.get("ordering", axisRecordIndex) axisRecords.append(axis) for axisVal in axisDict.get("values", ()): axisValRec = ot.AxisValue() axisValRec.AxisIndex = axisRecordIndex axisValRec.Flags = axisVal.get("flags", 0) axisValRec.ValueNameID = _addName( ttFont, axisVal["name"], windows=windowsNames, mac=macNames ) if "value" in axisVal: axisValRec.Value = axisVal["value"] if "linkedValue" in axisVal: axisValRec.Format = 3 axisValRec.LinkedValue = axisVal["linkedValue"] else: axisValRec.Format = 1 elif "nominalValue" in axisVal: axisValRec.Format = 2 axisValRec.NominalValue = axisVal["nominalValue"] axisValRec.RangeMinValue = axisVal.get( "rangeMinValue", AXIS_VALUE_NEGATIVE_INFINITY ) axisValRec.RangeMaxValue = axisVal.get( "rangeMaxValue", AXIS_VALUE_POSITIVE_INFINITY ) else: raise ValueError("Can't determine format for AxisValue") axisValues.append(axisValRec) return axisRecords, axisValues def _buildAxisValuesFormat4(locations, axes, ttFont, windowsNames=True, macNames=True): axisTagToIndex = {} for axisRecordIndex, axisDict in enumerate(axes): axisTagToIndex[axisDict["tag"]] = axisRecordIndex axisValues = [] for axisLocationDict in locations: axisValRec = ot.AxisValue() axisValRec.Format = 4 axisValRec.ValueNameID = _addName( ttFont, axisLocationDict["name"], windows=windowsNames, mac=macNames ) axisValRec.Flags = axisLocationDict.get("flags", 0) axisValueRecords = [] for tag, value in axisLocationDict["location"].items(): avr = ot.AxisValueRecord() avr.AxisIndex = axisTagToIndex[tag] avr.Value = value axisValueRecords.append(avr) axisValueRecords.sort(key=lambda avr: avr.AxisIndex) axisValRec.AxisCount = len(axisValueRecords) axisValRec.AxisValueRecord = axisValueRecords axisValues.append(axisValRec) return axisValues def _addName(ttFont, value, minNameID=0, windows=True, mac=True): nameTable = ttFont["name"] if isinstance(value, int): # Already a nameID return value if isinstance(value, str): names = dict(en=value) elif isinstance(value, dict): names = value elif isinstance(value, list): nameID = nameTable._findUnusedNameID() for nameRecord in value: if isinstance(nameRecord, STATNameStatement): nameTable.setName( nameRecord.string, nameID, nameRecord.platformID, nameRecord.platEncID, nameRecord.langID, ) else: raise TypeError("value must be a list of STATNameStatements") return nameID else: raise TypeError("value must be int, str, dict or list") return nameTable.addMultilingualName( names, ttFont=ttFont, windows=windows, mac=mac, minNameID=minNameID ) def buildMathTable( ttFont, constants=None, italicsCorrections=None, topAccentAttachments=None, extendedShapes=None, mathKerns=None, minConnectorOverlap=0, vertGlyphVariants=None, horizGlyphVariants=None, vertGlyphAssembly=None, horizGlyphAssembly=None, ): """ Add a 'MATH' table to 'ttFont'. 'constants' is a dictionary of math constants. The keys are the constant names from the MATH table specification (with capital first letter), and the values are the constant values as numbers. 'italicsCorrections' is a dictionary of italic corrections. The keys are the glyph names, and the values are the italic corrections as numbers. 'topAccentAttachments' is a dictionary of top accent attachments. The keys are the glyph names, and the values are the top accent horizontal positions as numbers. 'extendedShapes' is a set of extended shape glyphs. 'mathKerns' is a dictionary of math kerns. The keys are the glyph names, and the values are dictionaries. The keys of these dictionaries are the side names ('TopRight', 'TopLeft', 'BottomRight', 'BottomLeft'), and the values are tuples of two lists. The first list contains the correction heights as numbers, and the second list contains the kern values as numbers. 'minConnectorOverlap' is the minimum connector overlap as a number. 'vertGlyphVariants' is a dictionary of vertical glyph variants. The keys are the glyph names, and the values are tuples of glyph name and full advance height. 'horizGlyphVariants' is a dictionary of horizontal glyph variants. The keys are the glyph names, and the values are tuples of glyph name and full advance width. 'vertGlyphAssembly' is a dictionary of vertical glyph assemblies. The keys are the glyph names, and the values are tuples of assembly parts and italics correction. The assembly parts are tuples of glyph name, flags, start connector length, end connector length, and full advance height. 'horizGlyphAssembly' is a dictionary of horizontal glyph assemblies. The keys are the glyph names, and the values are tuples of assembly parts and italics correction. The assembly parts are tuples of glyph name, flags, start connector length, end connector length, and full advance width. Where a number is expected, an integer or a float can be used. The floats will be rounded. Example:: constants = { "ScriptPercentScaleDown": 70, "ScriptScriptPercentScaleDown": 50, "DelimitedSubFormulaMinHeight": 24, "DisplayOperatorMinHeight": 60, ... } italicsCorrections = { "fitalic-math": 100, "fbolditalic-math": 120, ... } topAccentAttachments = { "circumflexcomb": 500, "acutecomb": 400, "A": 300, "B": 340, ... } extendedShapes = {"parenleft", "parenright", ...} mathKerns = { "A": { "TopRight": ([-50, -100], [10, 20, 30]), "TopLeft": ([50, 100], [10, 20, 30]), ... }, ... } vertGlyphVariants = { "parenleft": [("parenleft", 700), ("parenleft.size1", 1000), ...], "parenright": [("parenright", 700), ("parenright.size1", 1000), ...], ... } vertGlyphAssembly = { "braceleft": [ ( ("braceleft.bottom", 0, 0, 200, 500), ("braceleft.extender", 1, 200, 200, 200)), ("braceleft.middle", 0, 100, 100, 700), ("braceleft.extender", 1, 200, 200, 200), ("braceleft.top", 0, 200, 0, 500), ), 100, ], ... } """ glyphMap = ttFont.getReverseGlyphMap() ttFont["MATH"] = math = ttLib.newTable("MATH") math.table = table = ot.MATH() table.Version = 0x00010000 table.populateDefaults() table.MathConstants = _buildMathConstants(constants) table.MathGlyphInfo = _buildMathGlyphInfo( glyphMap, italicsCorrections, topAccentAttachments, extendedShapes, mathKerns, ) table.MathVariants = _buildMathVariants( glyphMap, minConnectorOverlap, vertGlyphVariants, horizGlyphVariants, vertGlyphAssembly, horizGlyphAssembly, ) def _buildMathConstants(constants): if not constants: return None mathConstants = ot.MathConstants() for conv in mathConstants.getConverters(): value = otRound(constants.get(conv.name, 0)) if conv.tableClass: assert issubclass(conv.tableClass, ot.MathValueRecord) value = _mathValueRecord(value) setattr(mathConstants, conv.name, value) return mathConstants def _buildMathGlyphInfo( glyphMap, italicsCorrections, topAccentAttachments, extendedShapes, mathKerns, ): if not any([extendedShapes, italicsCorrections, topAccentAttachments, mathKerns]): return None info = ot.MathGlyphInfo() info.populateDefaults() if italicsCorrections: coverage = buildCoverage(italicsCorrections.keys(), glyphMap) info.MathItalicsCorrectionInfo = ot.MathItalicsCorrectionInfo() info.MathItalicsCorrectionInfo.Coverage = coverage info.MathItalicsCorrectionInfo.ItalicsCorrectionCount = len(coverage.glyphs) info.MathItalicsCorrectionInfo.ItalicsCorrection = [ _mathValueRecord(italicsCorrections[n]) for n in coverage.glyphs ] if topAccentAttachments: coverage = buildCoverage(topAccentAttachments.keys(), glyphMap) info.MathTopAccentAttachment = ot.MathTopAccentAttachment() info.MathTopAccentAttachment.TopAccentCoverage = coverage info.MathTopAccentAttachment.TopAccentAttachmentCount = len(coverage.glyphs) info.MathTopAccentAttachment.TopAccentAttachment = [ _mathValueRecord(topAccentAttachments[n]) for n in coverage.glyphs ] if extendedShapes: info.ExtendedShapeCoverage = buildCoverage(extendedShapes, glyphMap) if mathKerns: coverage = buildCoverage(mathKerns.keys(), glyphMap) info.MathKernInfo = ot.MathKernInfo() info.MathKernInfo.MathKernCoverage = coverage info.MathKernInfo.MathKernCount = len(coverage.glyphs) info.MathKernInfo.MathKernInfoRecords = [] for glyph in coverage.glyphs: record = ot.MathKernInfoRecord() for side in {"TopRight", "TopLeft", "BottomRight", "BottomLeft"}: if side in mathKerns[glyph]: correctionHeights, kernValues = mathKerns[glyph][side] assert len(correctionHeights) == len(kernValues) - 1 kern = ot.MathKern() kern.HeightCount = len(correctionHeights) kern.CorrectionHeight = [ _mathValueRecord(h) for h in correctionHeights ] kern.KernValue = [_mathValueRecord(v) for v in kernValues] setattr(record, f"{side}MathKern", kern) info.MathKernInfo.MathKernInfoRecords.append(record) return info def _buildMathVariants( glyphMap, minConnectorOverlap, vertGlyphVariants, horizGlyphVariants, vertGlyphAssembly, horizGlyphAssembly, ): if not any( [vertGlyphVariants, horizGlyphVariants, vertGlyphAssembly, horizGlyphAssembly] ): return None variants = ot.MathVariants() variants.populateDefaults() variants.MinConnectorOverlap = minConnectorOverlap if vertGlyphVariants or vertGlyphAssembly: variants.VertGlyphCoverage, variants.VertGlyphConstruction = ( _buildMathGlyphConstruction( glyphMap, vertGlyphVariants, vertGlyphAssembly, ) ) if horizGlyphVariants or horizGlyphAssembly: variants.HorizGlyphCoverage, variants.HorizGlyphConstruction = ( _buildMathGlyphConstruction( glyphMap, horizGlyphVariants, horizGlyphAssembly, ) ) return variants def _buildMathGlyphConstruction(glyphMap, variants, assemblies): glyphs = set() if variants: glyphs.update(variants.keys()) if assemblies: glyphs.update(assemblies.keys()) coverage = buildCoverage(glyphs, glyphMap) constructions = [] for glyphName in coverage.glyphs: construction = ot.MathGlyphConstruction() construction.populateDefaults() if variants and glyphName in variants: construction.VariantCount = len(variants[glyphName]) construction.MathGlyphVariantRecord = [] for variantName, advance in variants[glyphName]: record = ot.MathGlyphVariantRecord() record.VariantGlyph = variantName record.AdvanceMeasurement = otRound(advance) construction.MathGlyphVariantRecord.append(record) if assemblies and glyphName in assemblies: parts, ic = assemblies[glyphName] construction.GlyphAssembly = ot.GlyphAssembly() construction.GlyphAssembly.ItalicsCorrection = _mathValueRecord(ic) construction.GlyphAssembly.PartCount = len(parts) construction.GlyphAssembly.PartRecords = [] for part in parts: part_name, flags, start, end, advance = part record = ot.GlyphPartRecord() record.glyph = part_name record.PartFlags = int(flags) record.StartConnectorLength = otRound(start) record.EndConnectorLength = otRound(end) record.FullAdvance = otRound(advance) construction.GlyphAssembly.PartRecords.append(record) constructions.append(construction) return coverage, constructions def _mathValueRecord(value): value_record = ot.MathValueRecord() value_record.Value = otRound(value) return value_record PKaZZZ�-��OOfontTools/otlLib/error.pyclass OpenTypeLibError(Exception): def __init__(self, message, location): Exception.__init__(self, message) self.location = location def __str__(self): message = Exception.__str__(self) if self.location: return f"{self.location}: {message}" else: return message PKaZZZ=A�%s s "fontTools/otlLib/maxContextCalc.py__all__ = ["maxCtxFont"] def maxCtxFont(font): """Calculate the usMaxContext value for an entire font.""" maxCtx = 0 for tag in ("GSUB", "GPOS"): if tag not in font: continue table = font[tag].table if not table.LookupList: continue for lookup in table.LookupList.Lookup: for st in lookup.SubTable: maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st) return maxCtx def maxCtxSubtable(maxCtx, tag, lookupType, st): """Calculate usMaxContext based on a single lookup table (and an existing max value). """ # single positioning, single / multiple substitution if (tag == "GPOS" and lookupType == 1) or ( tag == "GSUB" and lookupType in (1, 2, 3) ): maxCtx = max(maxCtx, 1) # pair positioning elif tag == "GPOS" and lookupType == 2: maxCtx = max(maxCtx, 2) # ligatures elif tag == "GSUB" and lookupType == 4: for ligatures in st.ligatures.values(): for ligature in ligatures: maxCtx = max(maxCtx, ligature.CompCount) # context elif (tag == "GPOS" and lookupType == 7) or (tag == "GSUB" and lookupType == 5): maxCtx = maxCtxContextualSubtable(maxCtx, st, "Pos" if tag == "GPOS" else "Sub") # chained context elif (tag == "GPOS" and lookupType == 8) or (tag == "GSUB" and lookupType == 6): maxCtx = maxCtxContextualSubtable( maxCtx, st, "Pos" if tag == "GPOS" else "Sub", "Chain" ) # extensions elif (tag == "GPOS" and lookupType == 9) or (tag == "GSUB" and lookupType == 7): maxCtx = maxCtxSubtable(maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable) # reverse-chained context elif tag == "GSUB" and lookupType == 8: maxCtx = maxCtxContextualRule(maxCtx, st, "Reverse") return maxCtx def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=""): """Calculate usMaxContext based on a contextual feature subtable.""" if st.Format == 1: for ruleset in getattr(st, "%s%sRuleSet" % (chain, ruleType)): if ruleset is None: continue for rule in getattr(ruleset, "%s%sRule" % (chain, ruleType)): if rule is None: continue maxCtx = maxCtxContextualRule(maxCtx, rule, chain) elif st.Format == 2: for ruleset in getattr(st, "%s%sClassSet" % (chain, ruleType)): if ruleset is None: continue for rule in getattr(ruleset, "%s%sClassRule" % (chain, ruleType)): if rule is None: continue maxCtx = maxCtxContextualRule(maxCtx, rule, chain) elif st.Format == 3: maxCtx = maxCtxContextualRule(maxCtx, st, chain) return maxCtx def maxCtxContextualRule(maxCtx, st, chain): """Calculate usMaxContext based on a contextual feature rule.""" if not chain: return max(maxCtx, st.GlyphCount) elif chain == "Reverse": return max(maxCtx, st.GlyphCount + st.LookAheadGlyphCount) return max(maxCtx, st.InputGlyphCount + st.LookAheadGlyphCount) PKaZZZ�����%fontTools/otlLib/optimize/__init__.pyfrom argparse import RawTextHelpFormatter from fontTools.otlLib.optimize.gpos import COMPRESSION_LEVEL, compact from fontTools.ttLib import TTFont def main(args=None): """Optimize the layout tables of an existing font""" from argparse import ArgumentParser from fontTools import configLogger parser = ArgumentParser( prog="otlLib.optimize", description=main.__doc__, formatter_class=RawTextHelpFormatter, ) parser.add_argument("font") parser.add_argument( "-o", metavar="OUTPUTFILE", dest="outfile", default=None, help="output file" ) parser.add_argument( "--gpos-compression-level", help=COMPRESSION_LEVEL.help, default=COMPRESSION_LEVEL.default, choices=list(range(10)), type=int, ) logging_group = parser.add_mutually_exclusive_group(required=False) logging_group.add_argument( "-v", "--verbose", action="store_true", help="Run more verbosely." ) logging_group.add_argument( "-q", "--quiet", action="store_true", help="Turn verbosity off." ) options = parser.parse_args(args) configLogger( level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO") ) font = TTFont(options.font) compact(font, options.gpos_compression_level) font.save(options.outfile or options.font) if __name__ == "__main__": import sys if len(sys.argv) > 1: sys.exit(main()) import doctest sys.exit(doctest.testmod().failed) PKaZZZI ��hh%fontTools/otlLib/optimize/__main__.pyimport sys from fontTools.otlLib.optimize import main if __name__ == "__main__": sys.exit(main()) PKaZZZj*F*H*H!fontTools/otlLib/optimize/gpos.pyimport logging import os from collections import defaultdict, namedtuple from functools import reduce from itertools import chain from math import log2 from typing import DefaultDict, Dict, Iterable, List, Sequence, Tuple from fontTools.config import OPTIONS from fontTools.misc.intTools import bit_count, bit_indices from fontTools.ttLib import TTFont from fontTools.ttLib.tables import otBase, otTables log = logging.getLogger(__name__) COMPRESSION_LEVEL = OPTIONS[f"{__name__}:COMPRESSION_LEVEL"] # Kept because ufo2ft depends on it, to be removed once ufo2ft uses the config instead # https://github.com/fonttools/fonttools/issues/2592 GPOS_COMPACT_MODE_ENV_KEY = "FONTTOOLS_GPOS_COMPACT_MODE" GPOS_COMPACT_MODE_DEFAULT = str(COMPRESSION_LEVEL.default) def _compression_level_from_env() -> int: env_level = GPOS_COMPACT_MODE_DEFAULT if GPOS_COMPACT_MODE_ENV_KEY in os.environ: import warnings warnings.warn( f"'{GPOS_COMPACT_MODE_ENV_KEY}' environment variable is deprecated. " "Please set the 'fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL' option " "in TTFont.cfg.", DeprecationWarning, ) env_level = os.environ[GPOS_COMPACT_MODE_ENV_KEY] if len(env_level) == 1 and env_level in "0123456789": return int(env_level) raise ValueError(f"Bad {GPOS_COMPACT_MODE_ENV_KEY}={env_level}") def compact(font: TTFont, level: int) -> TTFont: # Ideal plan: # 1. Find lookups of Lookup Type 2: Pair Adjustment Positioning Subtable # https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#lookup-type-2-pair-adjustment-positioning-subtable # 2. Extract glyph-glyph kerning and class-kerning from all present subtables # 3. Regroup into different subtable arrangements # 4. Put back into the lookup # # Actual implementation: # 2. Only class kerning is optimized currently # 3. If the input kerning is already in several subtables, the subtables # are not grouped together first; instead each subtable is treated # independently, so currently this step is: # Split existing subtables into more smaller subtables gpos = font["GPOS"] for lookup in gpos.table.LookupList.Lookup: if lookup.LookupType == 2: compact_lookup(font, level, lookup) elif lookup.LookupType == 9 and lookup.SubTable[0].ExtensionLookupType == 2: compact_ext_lookup(font, level, lookup) return font def compact_lookup(font: TTFont, level: int, lookup: otTables.Lookup) -> None: new_subtables = compact_pair_pos(font, level, lookup.SubTable) lookup.SubTable = new_subtables lookup.SubTableCount = len(new_subtables) def compact_ext_lookup(font: TTFont, level: int, lookup: otTables.Lookup) -> None: new_subtables = compact_pair_pos( font, level, [ext_subtable.ExtSubTable for ext_subtable in lookup.SubTable] ) new_ext_subtables = [] for subtable in new_subtables: ext_subtable = otTables.ExtensionPos() ext_subtable.Format = 1 ext_subtable.ExtSubTable = subtable new_ext_subtables.append(ext_subtable) lookup.SubTable = new_ext_subtables lookup.SubTableCount = len(new_ext_subtables) def compact_pair_pos( font: TTFont, level: int, subtables: Sequence[otTables.PairPos] ) -> Sequence[otTables.PairPos]: new_subtables = [] for subtable in subtables: if subtable.Format == 1: # Not doing anything to Format 1 (yet?) new_subtables.append(subtable) elif subtable.Format == 2: new_subtables.extend(compact_class_pairs(font, level, subtable)) return new_subtables def compact_class_pairs( font: TTFont, level: int, subtable: otTables.PairPos ) -> List[otTables.PairPos]: from fontTools.otlLib.builder import buildPairPosClassesSubtable subtables = [] classes1: DefaultDict[int, List[str]] = defaultdict(list) for g in subtable.Coverage.glyphs: classes1[subtable.ClassDef1.classDefs.get(g, 0)].append(g) classes2: DefaultDict[int, List[str]] = defaultdict(list) for g, i in subtable.ClassDef2.classDefs.items(): classes2[i].append(g) all_pairs = {} for i, class1 in enumerate(subtable.Class1Record): for j, class2 in enumerate(class1.Class2Record): if is_really_zero(class2): continue all_pairs[(tuple(sorted(classes1[i])), tuple(sorted(classes2[j])))] = ( getattr(class2, "Value1", None), getattr(class2, "Value2", None), ) grouped_pairs = cluster_pairs_by_class2_coverage_custom_cost(font, all_pairs, level) for pairs in grouped_pairs: subtables.append(buildPairPosClassesSubtable(pairs, font.getReverseGlyphMap())) return subtables def is_really_zero(class2: otTables.Class2Record) -> bool: v1 = getattr(class2, "Value1", None) v2 = getattr(class2, "Value2", None) return (v1 is None or v1.getEffectiveFormat() == 0) and ( v2 is None or v2.getEffectiveFormat() == 0 ) Pairs = Dict[ Tuple[Tuple[str, ...], Tuple[str, ...]], Tuple[otBase.ValueRecord, otBase.ValueRecord], ] # Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L935-L958 def _getClassRanges(glyphIDs: Iterable[int]): glyphIDs = sorted(glyphIDs) last = glyphIDs[0] ranges = [[last]] for glyphID in glyphIDs[1:]: if glyphID != last + 1: ranges[-1].append(last) ranges.append([glyphID]) last = glyphID ranges[-1].append(last) return ranges, glyphIDs[0], glyphIDs[-1] # Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L960-L989 def _classDef_bytes( class_data: List[Tuple[List[Tuple[int, int]], int, int]], class_ids: List[int], coverage=False, ): if not class_ids: return 0 first_ranges, min_glyph_id, max_glyph_id = class_data[class_ids[0]] range_count = len(first_ranges) for i in class_ids[1:]: data = class_data[i] range_count += len(data[0]) min_glyph_id = min(min_glyph_id, data[1]) max_glyph_id = max(max_glyph_id, data[2]) glyphCount = max_glyph_id - min_glyph_id + 1 # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-1 format1_bytes = 6 + glyphCount * 2 # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-2 format2_bytes = 4 + range_count * 6 return min(format1_bytes, format2_bytes) ClusteringContext = namedtuple( "ClusteringContext", [ "lines", "all_class1", "all_class1_data", "all_class2_data", "valueFormat1_bytes", "valueFormat2_bytes", ], ) class Cluster: # TODO(Python 3.7): Turn this into a dataclass # ctx: ClusteringContext # indices: int # Caches # TODO(Python 3.8): use functools.cached_property instead of the # manually cached properties, and remove the cache fields listed below. # _indices: Optional[List[int]] = None # _column_indices: Optional[List[int]] = None # _cost: Optional[int] = None __slots__ = "ctx", "indices_bitmask", "_indices", "_column_indices", "_cost" def __init__(self, ctx: ClusteringContext, indices_bitmask: int): self.ctx = ctx self.indices_bitmask = indices_bitmask self._indices = None self._column_indices = None self._cost = None @property def indices(self): if self._indices is None: self._indices = bit_indices(self.indices_bitmask) return self._indices @property def column_indices(self): if self._column_indices is None: # Indices of columns that have a 1 in at least 1 line # => binary OR all the lines bitmask = reduce(int.__or__, (self.ctx.lines[i] for i in self.indices)) self._column_indices = bit_indices(bitmask) return self._column_indices @property def width(self): # Add 1 because Class2=0 cannot be used but needs to be encoded. return len(self.column_indices) + 1 @property def cost(self): if self._cost is None: self._cost = ( # 2 bytes to store the offset to this subtable in the Lookup table above 2 # Contents of the subtable # From: https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#pair-adjustment-positioning-format-2-class-pair-adjustment # uint16 posFormat Format identifier: format = 2 + 2 # Offset16 coverageOffset Offset to Coverage table, from beginning of PairPos subtable. + 2 + self.coverage_bytes # uint16 valueFormat1 ValueRecord definition — for the first glyph of the pair (may be zero). + 2 # uint16 valueFormat2 ValueRecord definition — for the second glyph of the pair (may be zero). + 2 # Offset16 classDef1Offset Offset to ClassDef table, from beginning of PairPos subtable — for the first glyph of the pair. + 2 + self.classDef1_bytes # Offset16 classDef2Offset Offset to ClassDef table, from beginning of PairPos subtable — for the second glyph of the pair. + 2 + self.classDef2_bytes # uint16 class1Count Number of classes in classDef1 table — includes Class 0. + 2 # uint16 class2Count Number of classes in classDef2 table — includes Class 0. + 2 # Class1Record class1Records[class1Count] Array of Class1 records, ordered by classes in classDef1. + (self.ctx.valueFormat1_bytes + self.ctx.valueFormat2_bytes) * len(self.indices) * self.width ) return self._cost @property def coverage_bytes(self): format1_bytes = ( # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-1 # uint16 coverageFormat Format identifier — format = 1 # uint16 glyphCount Number of glyphs in the glyph array 4 # uint16 glyphArray[glyphCount] Array of glyph IDs — in numerical order + sum(len(self.ctx.all_class1[i]) for i in self.indices) * 2 ) ranges = sorted( chain.from_iterable(self.ctx.all_class1_data[i][0] for i in self.indices) ) merged_range_count = 0 last = None for start, end in ranges: if last is not None and start != last + 1: merged_range_count += 1 last = end format2_bytes = ( # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-2 # uint16 coverageFormat Format identifier — format = 2 # uint16 rangeCount Number of RangeRecords 4 # RangeRecord rangeRecords[rangeCount] Array of glyph ranges — ordered by startGlyphID. # uint16 startGlyphID First glyph ID in the range # uint16 endGlyphID Last glyph ID in the range # uint16 startCoverageIndex Coverage Index of first glyph ID in range + merged_range_count * 6 ) return min(format1_bytes, format2_bytes) @property def classDef1_bytes(self): # We can skip encoding one of the Class1 definitions, and use # Class1=0 to represent it instead, because Class1 is gated by the # Coverage definition. Use Class1=0 for the highest byte savings. # Going through all options takes too long, pick the biggest class # = what happens in otlLib.builder.ClassDefBuilder.classes() biggest_index = max(self.indices, key=lambda i: len(self.ctx.all_class1[i])) return _classDef_bytes( self.ctx.all_class1_data, [i for i in self.indices if i != biggest_index] ) @property def classDef2_bytes(self): # All Class2 need to be encoded because we can't use Class2=0 return _classDef_bytes(self.ctx.all_class2_data, self.column_indices) def cluster_pairs_by_class2_coverage_custom_cost( font: TTFont, pairs: Pairs, compression: int = 5, ) -> List[Pairs]: if not pairs: # The subtable was actually empty? return [pairs] # Sorted for reproducibility/determinism all_class1 = sorted(set(pair[0] for pair in pairs)) all_class2 = sorted(set(pair[1] for pair in pairs)) # Use Python's big ints for binary vectors representing each line lines = [ sum( 1 << i if (class1, class2) in pairs else 0 for i, class2 in enumerate(all_class2) ) for class1 in all_class1 ] # Map glyph names to ids and work with ints throughout for ClassDef formats name_to_id = font.getReverseGlyphMap() # Each entry in the arrays below is (range_count, min_glyph_id, max_glyph_id) all_class1_data = [ _getClassRanges(name_to_id[name] for name in cls) for cls in all_class1 ] all_class2_data = [ _getClassRanges(name_to_id[name] for name in cls) for cls in all_class2 ] format1 = 0 format2 = 0 for pair, value in pairs.items(): format1 |= value[0].getEffectiveFormat() if value[0] else 0 format2 |= value[1].getEffectiveFormat() if value[1] else 0 valueFormat1_bytes = bit_count(format1) * 2 valueFormat2_bytes = bit_count(format2) * 2 ctx = ClusteringContext( lines, all_class1, all_class1_data, all_class2_data, valueFormat1_bytes, valueFormat2_bytes, ) cluster_cache: Dict[int, Cluster] = {} def make_cluster(indices: int) -> Cluster: cluster = cluster_cache.get(indices, None) if cluster is not None: return cluster cluster = Cluster(ctx, indices) cluster_cache[indices] = cluster return cluster def merge(cluster: Cluster, other: Cluster) -> Cluster: return make_cluster(cluster.indices_bitmask | other.indices_bitmask) # Agglomerative clustering by hand, checking the cost gain of the new # cluster against the previously separate clusters # Start with 1 cluster per line # cluster = set of lines = new subtable clusters = [make_cluster(1 << i) for i in range(len(lines))] # Cost of 1 cluster with everything # `(1 << len) - 1` gives a bitmask full of 1's of length `len` cost_before_splitting = make_cluster((1 << len(lines)) - 1).cost log.debug(f" len(clusters) = {len(clusters)}") while len(clusters) > 1: lowest_cost_change = None best_cluster_index = None best_other_index = None best_merged = None for i, cluster in enumerate(clusters): for j, other in enumerate(clusters[i + 1 :]): merged = merge(cluster, other) cost_change = merged.cost - cluster.cost - other.cost if lowest_cost_change is None or cost_change < lowest_cost_change: lowest_cost_change = cost_change best_cluster_index = i best_other_index = i + 1 + j best_merged = merged assert lowest_cost_change is not None assert best_cluster_index is not None assert best_other_index is not None assert best_merged is not None # If the best merge we found is still taking down the file size, then # there's no question: we must do it, because it's beneficial in both # ways (lower file size and lower number of subtables). However, if the # best merge we found is not reducing file size anymore, then we need to # look at the other stop criteria = the compression factor. if lowest_cost_change > 0: # Stop critera: check whether we should keep merging. # Compute size reduction brought by splitting cost_after_splitting = sum(c.cost for c in clusters) # size_reduction so that after = before * (1 - size_reduction) # E.g. before = 1000, after = 800, 1 - 800/1000 = 0.2 size_reduction = 1 - cost_after_splitting / cost_before_splitting # Force more merging by taking into account the compression number. # Target behaviour: compression number = 1 to 9, default 5 like gzip # - 1 = accept to add 1 subtable to reduce size by 50% # - 5 = accept to add 5 subtables to reduce size by 50% # See https://github.com/harfbuzz/packtab/blob/master/Lib/packTab/__init__.py#L690-L691 # Given the size reduction we have achieved so far, compute how many # new subtables are acceptable. max_new_subtables = -log2(1 - size_reduction) * compression log.debug( f" len(clusters) = {len(clusters):3d} size_reduction={size_reduction:5.2f} max_new_subtables={max_new_subtables}", ) if compression == 9: # Override level 9 to mean: create any number of subtables max_new_subtables = len(clusters) # If we have managed to take the number of new subtables below the # threshold, then we can stop. if len(clusters) <= max_new_subtables + 1: break # No reason to stop yet, do the merge and move on to the next. del clusters[best_other_index] clusters[best_cluster_index] = best_merged # All clusters are final; turn bitmasks back into the "Pairs" format pairs_by_class1: Dict[Tuple[str, ...], Pairs] = defaultdict(dict) for pair, values in pairs.items(): pairs_by_class1[pair[0]][pair] = values pairs_groups: List[Pairs] = [] for cluster in clusters: pairs_group: Pairs = dict() for i in cluster.indices: class1 = all_class1[i] pairs_group.update(pairs_by_class1[class1]) pairs_groups.append(pairs_group) return pairs_groups PKaZZZhXXKKfontTools/pens/__init__.py"""Empty __init__.py file to signal Python this directory is a package.""" PKaZZZ�pk��fontTools/pens/areaPen.py"""Calculate the area of a glyph.""" from fontTools.pens.basePen import BasePen __all__ = ["AreaPen"] class AreaPen(BasePen): def __init__(self, glyphset=None): BasePen.__init__(self, glyphset) self.value = 0 def _moveTo(self, p0): self._p0 = self._startPoint = p0 def _lineTo(self, p1): x0, y0 = self._p0 x1, y1 = p1 self.value -= (x1 - x0) * (y1 + y0) * 0.5 self._p0 = p1 def _qCurveToOne(self, p1, p2): # https://github.com/Pomax/bezierinfo/issues/44 p0 = self._p0 x0, y0 = p0[0], p0[1] x1, y1 = p1[0] - x0, p1[1] - y0 x2, y2 = p2[0] - x0, p2[1] - y0 self.value -= (x2 * y1 - x1 * y2) / 3 self._lineTo(p2) self._p0 = p2 def _curveToOne(self, p1, p2, p3): # https://github.com/Pomax/bezierinfo/issues/44 p0 = self._p0 x0, y0 = p0[0], p0[1] x1, y1 = p1[0] - x0, p1[1] - y0 x2, y2 = p2[0] - x0, p2[1] - y0 x3, y3 = p3[0] - x0, p3[1] - y0 self.value -= (x1 * (-y2 - y3) + x2 * (y1 - 2 * y3) + x3 * (y1 + 2 * y2)) * 0.15 self._lineTo(p3) self._p0 = p3 def _closePath(self): self._lineTo(self._startPoint) del self._p0, self._startPoint def _endPath(self): if self._p0 != self._startPoint: # Area is not defined for open contours. raise NotImplementedError del self._p0, self._startPoint PKaZZZ:i���B�BfontTools/pens/basePen.py"""fontTools.pens.basePen.py -- Tools and base classes to build pen objects. The Pen Protocol A Pen is a kind of object that standardizes the way how to "draw" outlines: it is a middle man between an outline and a drawing. In other words: it is an abstraction for drawing outlines, making sure that outline objects don't need to know the details about how and where they're being drawn, and that drawings don't need to know the details of how outlines are stored. The most basic pattern is this:: outline.draw(pen) # 'outline' draws itself onto 'pen' Pens can be used to render outlines to the screen, but also to construct new outlines. Eg. an outline object can be both a drawable object (it has a draw() method) as well as a pen itself: you *build* an outline using pen methods. The AbstractPen class defines the Pen protocol. It implements almost nothing (only no-op closePath() and endPath() methods), but is useful for documentation purposes. Subclassing it basically tells the reader: "this class implements the Pen protocol.". An examples of an AbstractPen subclass is :py:class:`fontTools.pens.transformPen.TransformPen`. The BasePen class is a base implementation useful for pens that actually draw (for example a pen renders outlines using a native graphics engine). BasePen contains a lot of base functionality, making it very easy to build a pen that fully conforms to the pen protocol. Note that if you subclass BasePen, you *don't* override moveTo(), lineTo(), etc., but _moveTo(), _lineTo(), etc. See the BasePen doc string for details. Examples of BasePen subclasses are fontTools.pens.boundsPen.BoundsPen and fontTools.pens.cocoaPen.CocoaPen. Coordinates are usually expressed as (x, y) tuples, but generally any sequence of length 2 will do. """ from typing import Tuple, Dict from fontTools.misc.loggingTools import LogMixin from fontTools.misc.transform import DecomposedTransform, Identity __all__ = [ "AbstractPen", "NullPen", "BasePen", "PenError", "decomposeSuperBezierSegment", "decomposeQuadraticSegment", ] class PenError(Exception): """Represents an error during penning.""" class OpenContourError(PenError): pass class AbstractPen: def moveTo(self, pt: Tuple[float, float]) -> None: """Begin a new sub path, set the current point to 'pt'. You must end each sub path with a call to pen.closePath() or pen.endPath(). """ raise NotImplementedError def lineTo(self, pt: Tuple[float, float]) -> None: """Draw a straight line from the current point to 'pt'.""" raise NotImplementedError def curveTo(self, *points: Tuple[float, float]) -> None: """Draw a cubic bezier with an arbitrary number of control points. The last point specified is on-curve, all others are off-curve (control) points. If the number of control points is > 2, the segment is split into multiple bezier segments. This works like this: Let n be the number of control points (which is the number of arguments to this call minus 1). If n==2, a plain vanilla cubic bezier is drawn. If n==1, we fall back to a quadratic segment and if n==0 we draw a straight line. It gets interesting when n>2: n-1 PostScript-style cubic segments will be drawn as if it were one curve. See decomposeSuperBezierSegment(). The conversion algorithm used for n>2 is inspired by NURB splines, and is conceptually equivalent to the TrueType "implied points" principle. See also decomposeQuadraticSegment(). """ raise NotImplementedError def qCurveTo(self, *points: Tuple[float, float]) -> None: """Draw a whole string of quadratic curve segments. The last point specified is on-curve, all others are off-curve points. This method implements TrueType-style curves, breaking up curves using 'implied points': between each two consequtive off-curve points, there is one implied point exactly in the middle between them. See also decomposeQuadraticSegment(). The last argument (normally the on-curve point) may be None. This is to support contours that have NO on-curve points (a rarely seen feature of TrueType outlines). """ raise NotImplementedError def closePath(self) -> None: """Close the current sub path. You must call either pen.closePath() or pen.endPath() after each sub path. """ pass def endPath(self) -> None: """End the current sub path, but don't close it. You must call either pen.closePath() or pen.endPath() after each sub path. """ pass def addComponent( self, glyphName: str, transformation: Tuple[float, float, float, float, float, float], ) -> None: """Add a sub glyph. The 'transformation' argument must be a 6-tuple containing an affine transformation, or a Transform object from the fontTools.misc.transform module. More precisely: it should be a sequence containing 6 numbers. """ raise NotImplementedError def addVarComponent( self, glyphName: str, transformation: DecomposedTransform, location: Dict[str, float], ) -> None: """Add a VarComponent sub glyph. The 'transformation' argument must be a DecomposedTransform from the fontTools.misc.transform module, and the 'location' argument must be a dictionary mapping axis tags to their locations. """ # GlyphSet decomposes for us raise AttributeError class NullPen(AbstractPen): """A pen that does nothing.""" def moveTo(self, pt): pass def lineTo(self, pt): pass def curveTo(self, *points): pass def qCurveTo(self, *points): pass def closePath(self): pass def endPath(self): pass def addComponent(self, glyphName, transformation): pass def addVarComponent(self, glyphName, transformation, location): pass class LoggingPen(LogMixin, AbstractPen): """A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin)""" pass class MissingComponentError(KeyError): """Indicates a component pointing to a non-existent glyph in the glyphset.""" class DecomposingPen(LoggingPen): """Implements a 'addComponent' method that decomposes components (i.e. draws them onto self as simple contours). It can also be used as a mixin class (e.g. see ContourRecordingPen). You must override moveTo, lineTo, curveTo and qCurveTo. You may additionally override closePath, endPath and addComponent. By default a warning message is logged when a base glyph is missing; set the class variable ``skipMissingComponents`` to False if you want all instances of a sub-class to raise a :class:`MissingComponentError` exception by default. """ skipMissingComponents = True # alias error for convenience MissingComponentError = MissingComponentError def __init__( self, glyphSet, *args, skipMissingComponents=None, reverseFlipped=False, **kwargs, ): """Takes a 'glyphSet' argument (dict), in which the glyphs that are referenced as components are looked up by their name. If the optional 'reverseFlipped' argument is True, components whose transformation matrix has a negative determinant will be decomposed with a reversed path direction to compensate for the flip. The optional 'skipMissingComponents' argument can be set to True/False to override the homonymous class attribute for a given pen instance. """ super(DecomposingPen, self).__init__(*args, **kwargs) self.glyphSet = glyphSet self.skipMissingComponents = ( self.__class__.skipMissingComponents if skipMissingComponents is None else skipMissingComponents ) self.reverseFlipped = reverseFlipped def addComponent(self, glyphName, transformation): """Transform the points of the base glyph and draw it onto self.""" from fontTools.pens.transformPen import TransformPen try: glyph = self.glyphSet[glyphName] except KeyError: if not self.skipMissingComponents: raise MissingComponentError(glyphName) self.log.warning("glyph '%s' is missing from glyphSet; skipped" % glyphName) else: pen = self if transformation != Identity: pen = TransformPen(pen, transformation) if self.reverseFlipped: # if the transformation has a negative determinant, it will # reverse the contour direction of the component a, b, c, d = transformation[:4] det = a * d - b * c if det < 0: from fontTools.pens.reverseContourPen import ReverseContourPen pen = ReverseContourPen(pen) glyph.draw(pen) def addVarComponent(self, glyphName, transformation, location): # GlyphSet decomposes for us raise AttributeError class BasePen(DecomposingPen): """Base class for drawing pens. You must override _moveTo, _lineTo and _curveToOne. You may additionally override _closePath, _endPath, addComponent, addVarComponent, and/or _qCurveToOne. You should not override any other methods. """ def __init__(self, glyphSet=None): super(BasePen, self).__init__(glyphSet) self.__currentPoint = None # must override def _moveTo(self, pt): raise NotImplementedError def _lineTo(self, pt): raise NotImplementedError def _curveToOne(self, pt1, pt2, pt3): raise NotImplementedError # may override def _closePath(self): pass def _endPath(self): pass def _qCurveToOne(self, pt1, pt2): """This method implements the basic quadratic curve type. The default implementation delegates the work to the cubic curve function. Optionally override with a native implementation. """ pt0x, pt0y = self.__currentPoint pt1x, pt1y = pt1 pt2x, pt2y = pt2 mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x) mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y) mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x) mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y) self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2) # don't override def _getCurrentPoint(self): """Return the current point. This is not part of the public interface, yet is useful for subclasses. """ return self.__currentPoint def closePath(self): self._closePath() self.__currentPoint = None def endPath(self): self._endPath() self.__currentPoint = None def moveTo(self, pt): self._moveTo(pt) self.__currentPoint = pt def lineTo(self, pt): self._lineTo(pt) self.__currentPoint = pt def curveTo(self, *points): n = len(points) - 1 # 'n' is the number of control points assert n >= 0 if n == 2: # The common case, we have exactly two BCP's, so this is a standard # cubic bezier. Even though decomposeSuperBezierSegment() handles # this case just fine, we special-case it anyway since it's so # common. self._curveToOne(*points) self.__currentPoint = points[-1] elif n > 2: # n is the number of control points; split curve into n-1 cubic # bezier segments. The algorithm used here is inspired by NURB # splines and the TrueType "implied point" principle, and ensures # the smoothest possible connection between two curve segments, # with no disruption in the curvature. It is practical since it # allows one to construct multiple bezier segments with a much # smaller amount of points. _curveToOne = self._curveToOne for pt1, pt2, pt3 in decomposeSuperBezierSegment(points): _curveToOne(pt1, pt2, pt3) self.__currentPoint = pt3 elif n == 1: self.qCurveTo(*points) elif n == 0: self.lineTo(points[0]) else: raise AssertionError("can't get there from here") def qCurveTo(self, *points): n = len(points) - 1 # 'n' is the number of control points assert n >= 0 if points[-1] is None: # Special case for TrueType quadratics: it is possible to # define a contour with NO on-curve points. BasePen supports # this by allowing the final argument (the expected on-curve # point) to be None. We simulate the feature by making the implied # on-curve point between the last and the first off-curve points # explicit. x, y = points[-2] # last off-curve point nx, ny = points[0] # first off-curve point impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny)) self.__currentPoint = impliedStartPoint self._moveTo(impliedStartPoint) points = points[:-1] + (impliedStartPoint,) if n > 0: # Split the string of points into discrete quadratic curve # segments. Between any two consecutive off-curve points # there's an implied on-curve point exactly in the middle. # This is where the segment splits. _qCurveToOne = self._qCurveToOne for pt1, pt2 in decomposeQuadraticSegment(points): _qCurveToOne(pt1, pt2) self.__currentPoint = pt2 else: self.lineTo(points[0]) def decomposeSuperBezierSegment(points): """Split the SuperBezier described by 'points' into a list of regular bezier segments. The 'points' argument must be a sequence with length 3 or greater, containing (x, y) coordinates. The last point is the destination on-curve point, the rest of the points are off-curve points. The start point should not be supplied. This function returns a list of (pt1, pt2, pt3) tuples, which each specify a regular curveto-style bezier segment. """ n = len(points) - 1 assert n > 1 bezierSegments = [] pt1, pt2, pt3 = points[0], None, None for i in range(2, n + 1): # calculate points in between control points. nDivisions = min(i, 3, n - i + 2) for j in range(1, nDivisions): factor = j / nDivisions temp1 = points[i - 1] temp2 = points[i - 2] temp = ( temp2[0] + factor * (temp1[0] - temp2[0]), temp2[1] + factor * (temp1[1] - temp2[1]), ) if pt2 is None: pt2 = temp else: pt3 = (0.5 * (pt2[0] + temp[0]), 0.5 * (pt2[1] + temp[1])) bezierSegments.append((pt1, pt2, pt3)) pt1, pt2, pt3 = temp, None, None bezierSegments.append((pt1, points[-2], points[-1])) return bezierSegments def decomposeQuadraticSegment(points): """Split the quadratic curve segment described by 'points' into a list of "atomic" quadratic segments. The 'points' argument must be a sequence with length 2 or greater, containing (x, y) coordinates. The last point is the destination on-curve point, the rest of the points are off-curve points. The start point should not be supplied. This function returns a list of (pt1, pt2) tuples, which each specify a plain quadratic bezier segment. """ n = len(points) - 1 assert n > 0 quadSegments = [] for i in range(n - 1): x, y = points[i] nx, ny = points[i + 1] impliedPt = (0.5 * (x + nx), 0.5 * (y + ny)) quadSegments.append((points[i], impliedPt)) quadSegments.append((points[-2], points[-1])) return quadSegments class _TestPen(BasePen): """Test class that prints PostScript to stdout.""" def _moveTo(self, pt): print("%s %s moveto" % (pt[0], pt[1])) def _lineTo(self, pt): print("%s %s lineto" % (pt[0], pt[1])) def _curveToOne(self, bcp1, bcp2, pt): print( "%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1], bcp2[0], bcp2[1], pt[0], pt[1]) ) def _closePath(self): print("closepath") if __name__ == "__main__": pen = _TestPen(None) pen.moveTo((0, 0)) pen.lineTo((0, 100)) pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) pen.closePath() pen = _TestPen(None) # testing the "no on-curve point" scenario pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None) pen.closePath() PKaZZZ0܋{9 9 fontTools/pens/boundsPen.pyfrom fontTools.misc.arrayTools import updateBounds, pointInRect, unionRect from fontTools.misc.bezierTools import calcCubicBounds, calcQuadraticBounds from fontTools.pens.basePen import BasePen __all__ = ["BoundsPen", "ControlBoundsPen"] class ControlBoundsPen(BasePen): """Pen to calculate the "control bounds" of a shape. This is the bounding box of all control points, so may be larger than the actual bounding box if there are curves that don't have points on their extremes. When the shape has been drawn, the bounds are available as the ``bounds`` attribute of the pen object. It's a 4-tuple:: (xMin, yMin, xMax, yMax). If ``ignoreSinglePoints`` is True, single points are ignored. """ def __init__(self, glyphSet, ignoreSinglePoints=False): BasePen.__init__(self, glyphSet) self.ignoreSinglePoints = ignoreSinglePoints self.init() def init(self): self.bounds = None self._start = None def _moveTo(self, pt): self._start = pt if not self.ignoreSinglePoints: self._addMoveTo() def _addMoveTo(self): if self._start is None: return bounds = self.bounds if bounds: self.bounds = updateBounds(bounds, self._start) else: x, y = self._start self.bounds = (x, y, x, y) self._start = None def _lineTo(self, pt): self._addMoveTo() self.bounds = updateBounds(self.bounds, pt) def _curveToOne(self, bcp1, bcp2, pt): self._addMoveTo() bounds = self.bounds bounds = updateBounds(bounds, bcp1) bounds = updateBounds(bounds, bcp2) bounds = updateBounds(bounds, pt) self.bounds = bounds def _qCurveToOne(self, bcp, pt): self._addMoveTo() bounds = self.bounds bounds = updateBounds(bounds, bcp) bounds = updateBounds(bounds, pt) self.bounds = bounds class BoundsPen(ControlBoundsPen): """Pen to calculate the bounds of a shape. It calculates the correct bounds even when the shape contains curves that don't have points on their extremes. This is somewhat slower to compute than the "control bounds". When the shape has been drawn, the bounds are available as the ``bounds`` attribute of the pen object. It's a 4-tuple:: (xMin, yMin, xMax, yMax) """ def _curveToOne(self, bcp1, bcp2, pt): self._addMoveTo() bounds = self.bounds bounds = updateBounds(bounds, pt) if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds): bounds = unionRect( bounds, calcCubicBounds(self._getCurrentPoint(), bcp1, bcp2, pt) ) self.bounds = bounds def _qCurveToOne(self, bcp, pt): self._addMoveTo() bounds = self.bounds bounds = updateBounds(bounds, pt) if not pointInRect(bcp, bounds): bounds = unionRect( bounds, calcQuadraticBounds(self._getCurrentPoint(), bcp, pt) ) self.bounds = bounds PKaZZZm ��PPfontTools/pens/cairoPen.py"""Pen to draw to a Cairo graphics library context.""" from fontTools.pens.basePen import BasePen __all__ = ["CairoPen"] class CairoPen(BasePen): """Pen to draw to a Cairo graphics library context.""" def __init__(self, glyphSet, context): BasePen.__init__(self, glyphSet) self.context = context def _moveTo(self, p): self.context.move_to(*p) def _lineTo(self, p): self.context.line_to(*p) def _curveToOne(self, p1, p2, p3): self.context.curve_to(*p1, *p2, *p3) def _closePath(self): self.context.close_path() PKaZZZ5^��ddfontTools/pens/cocoaPen.pyfrom fontTools.pens.basePen import BasePen __all__ = ["CocoaPen"] class CocoaPen(BasePen): def __init__(self, glyphSet, path=None): BasePen.__init__(self, glyphSet) if path is None: from AppKit import NSBezierPath path = NSBezierPath.bezierPath() self.path = path def _moveTo(self, p): self.path.moveToPoint_(p) def _lineTo(self, p): self.path.lineToPoint_(p) def _curveToOne(self, p1, p2, p3): self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2) def _closePath(self): self.path.closePath() PKaZZZ۸�g�2�2fontTools/pens/cu2quPen.py# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import operator from fontTools.cu2qu import curve_to_quadratic, curves_to_quadratic from fontTools.pens.basePen import decomposeSuperBezierSegment from fontTools.pens.filterPen import FilterPen from fontTools.pens.reverseContourPen import ReverseContourPen from fontTools.pens.pointPen import BasePointToSegmentPen from fontTools.pens.pointPen import ReverseContourPointPen class Cu2QuPen(FilterPen): """A filter pen to convert cubic bezier curves to quadratic b-splines using the FontTools SegmentPen protocol. Args: other_pen: another SegmentPen used to draw the transformed outline. max_err: maximum approximation error in font units. For optimal results, if you know the UPEM of the font, we recommend setting this to a value equal, or close to UPEM / 1000. reverse_direction: flip the contours' direction but keep starting point. stats: a dictionary counting the point numbers of quadratic segments. all_quadratic: if True (default), only quadratic b-splines are generated. if False, quadratic curves or cubic curves are generated depending on which one is more economical. """ def __init__( self, other_pen, max_err, reverse_direction=False, stats=None, all_quadratic=True, ): if reverse_direction: other_pen = ReverseContourPen(other_pen) super().__init__(other_pen) self.max_err = max_err self.stats = stats self.all_quadratic = all_quadratic def _convert_curve(self, pt1, pt2, pt3): curve = (self.current_pt, pt1, pt2, pt3) result = curve_to_quadratic(curve, self.max_err, self.all_quadratic) if self.stats is not None: n = str(len(result) - 2) self.stats[n] = self.stats.get(n, 0) + 1 if self.all_quadratic: self.qCurveTo(*result[1:]) else: if len(result) == 3: self.qCurveTo(*result[1:]) else: assert len(result) == 4 super().curveTo(*result[1:]) def curveTo(self, *points): n = len(points) if n == 3: # this is the most common case, so we special-case it self._convert_curve(*points) elif n > 3: for segment in decomposeSuperBezierSegment(points): self._convert_curve(*segment) else: self.qCurveTo(*points) class Cu2QuPointPen(BasePointToSegmentPen): """A filter pen to convert cubic bezier curves to quadratic b-splines using the FontTools PointPen protocol. Args: other_point_pen: another PointPen used to draw the transformed outline. max_err: maximum approximation error in font units. For optimal results, if you know the UPEM of the font, we recommend setting this to a value equal, or close to UPEM / 1000. reverse_direction: reverse the winding direction of all contours. stats: a dictionary counting the point numbers of quadratic segments. all_quadratic: if True (default), only quadratic b-splines are generated. if False, quadratic curves or cubic curves are generated depending on which one is more economical. """ __points_required = { "move": (1, operator.eq), "line": (1, operator.eq), "qcurve": (2, operator.ge), "curve": (3, operator.eq), } def __init__( self, other_point_pen, max_err, reverse_direction=False, stats=None, all_quadratic=True, ): BasePointToSegmentPen.__init__(self) if reverse_direction: self.pen = ReverseContourPointPen(other_point_pen) else: self.pen = other_point_pen self.max_err = max_err self.stats = stats self.all_quadratic = all_quadratic def _flushContour(self, segments): assert len(segments) >= 1 closed = segments[0][0] != "move" new_segments = [] prev_points = segments[-1][1] prev_on_curve = prev_points[-1][0] for segment_type, points in segments: if segment_type == "curve": for sub_points in self._split_super_bezier_segments(points): on_curve, smooth, name, kwargs = sub_points[-1] bcp1, bcp2 = sub_points[0][0], sub_points[1][0] cubic = [prev_on_curve, bcp1, bcp2, on_curve] quad = curve_to_quadratic(cubic, self.max_err, self.all_quadratic) if self.stats is not None: n = str(len(quad) - 2) self.stats[n] = self.stats.get(n, 0) + 1 new_points = [(pt, False, None, {}) for pt in quad[1:-1]] new_points.append((on_curve, smooth, name, kwargs)) if self.all_quadratic or len(new_points) == 2: new_segments.append(["qcurve", new_points]) else: new_segments.append(["curve", new_points]) prev_on_curve = sub_points[-1][0] else: new_segments.append([segment_type, points]) prev_on_curve = points[-1][0] if closed: # the BasePointToSegmentPen.endPath method that calls _flushContour # rotates the point list of closed contours so that they end with # the first on-curve point. We restore the original starting point. new_segments = new_segments[-1:] + new_segments[:-1] self._drawPoints(new_segments) def _split_super_bezier_segments(self, points): sub_segments = [] # n is the number of control points n = len(points) - 1 if n == 2: # a simple bezier curve segment sub_segments.append(points) elif n > 2: # a "super" bezier; decompose it on_curve, smooth, name, kwargs = points[-1] num_sub_segments = n - 1 for i, sub_points in enumerate( decomposeSuperBezierSegment([pt for pt, _, _, _ in points]) ): new_segment = [] for point in sub_points[:-1]: new_segment.append((point, False, None, {})) if i == (num_sub_segments - 1): # the last on-curve keeps its original attributes new_segment.append((on_curve, smooth, name, kwargs)) else: # on-curves of sub-segments are always "smooth" new_segment.append((sub_points[-1], True, None, {})) sub_segments.append(new_segment) else: raise AssertionError("expected 2 control points, found: %d" % n) return sub_segments def _drawPoints(self, segments): pen = self.pen pen.beginPath() last_offcurves = [] points_required = self.__points_required for i, (segment_type, points) in enumerate(segments): if segment_type in points_required: n, op = points_required[segment_type] assert op(len(points), n), ( f"illegal {segment_type!r} segment point count: " f"expected {n}, got {len(points)}" ) offcurves = points[:-1] if i == 0: # any off-curve points preceding the first on-curve # will be appended at the end of the contour last_offcurves = offcurves else: for pt, smooth, name, kwargs in offcurves: pen.addPoint(pt, None, smooth, name, **kwargs) pt, smooth, name, kwargs = points[-1] if pt is None: assert segment_type == "qcurve" # special quadratic contour with no on-curve points: # we need to skip the "None" point. See also the Pen # protocol's qCurveTo() method and fontTools.pens.basePen pass else: pen.addPoint(pt, segment_type, smooth, name, **kwargs) else: raise AssertionError("unexpected segment type: %r" % segment_type) for pt, smooth, name, kwargs in last_offcurves: pen.addPoint(pt, None, smooth, name, **kwargs) pen.endPath() def addComponent(self, baseGlyphName, transformation): assert self.currentPath is None self.pen.addComponent(baseGlyphName, transformation) class Cu2QuMultiPen: """A filter multi-pen to convert cubic bezier curves to quadratic b-splines in a interpolation-compatible manner, using the FontTools SegmentPen protocol. Args: other_pens: list of SegmentPens used to draw the transformed outlines. max_err: maximum approximation error in font units. For optimal results, if you know the UPEM of the font, we recommend setting this to a value equal, or close to UPEM / 1000. reverse_direction: flip the contours' direction but keep starting point. This pen does not follow the normal SegmentPen protocol. Instead, its moveTo/lineTo/qCurveTo/curveTo methods take a list of tuples that are arguments that would normally be passed to a SegmentPen, one item for each of the pens in other_pens. """ # TODO Simplify like 3e8ebcdce592fe8a59ca4c3a294cc9724351e1ce # Remove start_pts and _add_moveTO def __init__(self, other_pens, max_err, reverse_direction=False): if reverse_direction: other_pens = [ ReverseContourPen(pen, outputImpliedClosingLine=True) for pen in other_pens ] self.pens = other_pens self.max_err = max_err self.start_pts = None self.current_pts = None def _check_contour_is_open(self): if self.current_pts is None: raise AssertionError("moveTo is required") def _check_contour_is_closed(self): if self.current_pts is not None: raise AssertionError("closePath or endPath is required") def _add_moveTo(self): if self.start_pts is not None: for pt, pen in zip(self.start_pts, self.pens): pen.moveTo(*pt) self.start_pts = None def moveTo(self, pts): self._check_contour_is_closed() self.start_pts = self.current_pts = pts self._add_moveTo() def lineTo(self, pts): self._check_contour_is_open() self._add_moveTo() for pt, pen in zip(pts, self.pens): pen.lineTo(*pt) self.current_pts = pts def qCurveTo(self, pointsList): self._check_contour_is_open() if len(pointsList[0]) == 1: self.lineTo([(points[0],) for points in pointsList]) return self._add_moveTo() current_pts = [] for points, pen in zip(pointsList, self.pens): pen.qCurveTo(*points) current_pts.append((points[-1],)) self.current_pts = current_pts def _curves_to_quadratic(self, pointsList): curves = [] for current_pt, points in zip(self.current_pts, pointsList): curves.append(current_pt + points) quadratics = curves_to_quadratic(curves, [self.max_err] * len(curves)) pointsList = [] for quadratic in quadratics: pointsList.append(quadratic[1:]) self.qCurveTo(pointsList) def curveTo(self, pointsList): self._check_contour_is_open() self._curves_to_quadratic(pointsList) def closePath(self): self._check_contour_is_open() if self.start_pts is None: for pen in self.pens: pen.closePath() self.current_pts = self.start_pts = None def endPath(self): self._check_contour_is_open() if self.start_pts is None: for pen in self.pens: pen.endPath() self.current_pts = self.start_pts = None def addComponent(self, glyphName, transformations): self._check_contour_is_closed() for trans, pen in zip(transformations, self.pens): pen.addComponent(glyphName, trans) PKaZZZs�?D� � (fontTools/pens/explicitClosingLinePen.pyfrom fontTools.pens.filterPen import ContourFilterPen class ExplicitClosingLinePen(ContourFilterPen): """A filter pen that adds an explicit lineTo to the first point of each closed contour if the end point of the last segment is not already the same as the first point. Otherwise, it passes the contour through unchanged. >>> from pprint import pprint >>> from fontTools.pens.recordingPen import RecordingPen >>> rec = RecordingPen() >>> pen = ExplicitClosingLinePen(rec) >>> pen.moveTo((0, 0)) >>> pen.lineTo((100, 0)) >>> pen.lineTo((100, 100)) >>> pen.closePath() >>> pprint(rec.value) [('moveTo', ((0, 0),)), ('lineTo', ((100, 0),)), ('lineTo', ((100, 100),)), ('lineTo', ((0, 0),)), ('closePath', ())] >>> rec = RecordingPen() >>> pen = ExplicitClosingLinePen(rec) >>> pen.moveTo((0, 0)) >>> pen.lineTo((100, 0)) >>> pen.lineTo((100, 100)) >>> pen.lineTo((0, 0)) >>> pen.closePath() >>> pprint(rec.value) [('moveTo', ((0, 0),)), ('lineTo', ((100, 0),)), ('lineTo', ((100, 100),)), ('lineTo', ((0, 0),)), ('closePath', ())] >>> rec = RecordingPen() >>> pen = ExplicitClosingLinePen(rec) >>> pen.moveTo((0, 0)) >>> pen.curveTo((100, 0), (0, 100), (100, 100)) >>> pen.closePath() >>> pprint(rec.value) [('moveTo', ((0, 0),)), ('curveTo', ((100, 0), (0, 100), (100, 100))), ('lineTo', ((0, 0),)), ('closePath', ())] >>> rec = RecordingPen() >>> pen = ExplicitClosingLinePen(rec) >>> pen.moveTo((0, 0)) >>> pen.curveTo((100, 0), (0, 100), (100, 100)) >>> pen.lineTo((0, 0)) >>> pen.closePath() >>> pprint(rec.value) [('moveTo', ((0, 0),)), ('curveTo', ((100, 0), (0, 100), (100, 100))), ('lineTo', ((0, 0),)), ('closePath', ())] >>> rec = RecordingPen() >>> pen = ExplicitClosingLinePen(rec) >>> pen.moveTo((0, 0)) >>> pen.curveTo((100, 0), (0, 100), (0, 0)) >>> pen.closePath() >>> pprint(rec.value) [('moveTo', ((0, 0),)), ('curveTo', ((100, 0), (0, 100), (0, 0))), ('closePath', ())] >>> rec = RecordingPen() >>> pen = ExplicitClosingLinePen(rec) >>> pen.moveTo((0, 0)) >>> pen.closePath() >>> pprint(rec.value) [('moveTo', ((0, 0),)), ('closePath', ())] >>> rec = RecordingPen() >>> pen = ExplicitClosingLinePen(rec) >>> pen.closePath() >>> pprint(rec.value) [('closePath', ())] >>> rec = RecordingPen() >>> pen = ExplicitClosingLinePen(rec) >>> pen.moveTo((0, 0)) >>> pen.lineTo((100, 0)) >>> pen.lineTo((100, 100)) >>> pen.endPath() >>> pprint(rec.value) [('moveTo', ((0, 0),)), ('lineTo', ((100, 0),)), ('lineTo', ((100, 100),)), ('endPath', ())] """ def filterContour(self, contour): if ( not contour or contour[0][0] != "moveTo" or contour[-1][0] != "closePath" or len(contour) < 3 ): return movePt = contour[0][1][0] lastSeg = contour[-2][1] if lastSeg and movePt != lastSeg[-1]: contour[-1:] = [("lineTo", (movePt,)), ("closePath", ())] PKaZZZw��CnnfontTools/pens/filterPen.pyfrom __future__ import annotations from fontTools.pens.basePen import AbstractPen, DecomposingPen from fontTools.pens.pointPen import AbstractPointPen, DecomposingPointPen from fontTools.pens.recordingPen import RecordingPen class _PassThruComponentsMixin(object): def addComponent(self, glyphName, transformation, **kwargs): self._outPen.addComponent(glyphName, transformation, **kwargs) class FilterPen(_PassThruComponentsMixin, AbstractPen): """Base class for pens that apply some transformation to the coordinates they receive and pass them to another pen. You can override any of its methods. The default implementation does nothing, but passes the commands unmodified to the other pen. >>> from fontTools.pens.recordingPen import RecordingPen >>> rec = RecordingPen() >>> pen = FilterPen(rec) >>> v = iter(rec.value) >>> pen.moveTo((0, 0)) >>> next(v) ('moveTo', ((0, 0),)) >>> pen.lineTo((1, 1)) >>> next(v) ('lineTo', ((1, 1),)) >>> pen.curveTo((2, 2), (3, 3), (4, 4)) >>> next(v) ('curveTo', ((2, 2), (3, 3), (4, 4))) >>> pen.qCurveTo((5, 5), (6, 6), (7, 7), (8, 8)) >>> next(v) ('qCurveTo', ((5, 5), (6, 6), (7, 7), (8, 8))) >>> pen.closePath() >>> next(v) ('closePath', ()) >>> pen.moveTo((9, 9)) >>> next(v) ('moveTo', ((9, 9),)) >>> pen.endPath() >>> next(v) ('endPath', ()) >>> pen.addComponent('foo', (1, 0, 0, 1, 0, 0)) >>> next(v) ('addComponent', ('foo', (1, 0, 0, 1, 0, 0))) """ def __init__(self, outPen): self._outPen = outPen self.current_pt = None def moveTo(self, pt): self._outPen.moveTo(pt) self.current_pt = pt def lineTo(self, pt): self._outPen.lineTo(pt) self.current_pt = pt def curveTo(self, *points): self._outPen.curveTo(*points) self.current_pt = points[-1] def qCurveTo(self, *points): self._outPen.qCurveTo(*points) self.current_pt = points[-1] def closePath(self): self._outPen.closePath() self.current_pt = None def endPath(self): self._outPen.endPath() self.current_pt = None class ContourFilterPen(_PassThruComponentsMixin, RecordingPen): """A "buffered" filter pen that accumulates contour data, passes it through a ``filterContour`` method when the contour is closed or ended, and finally draws the result with the output pen. Components are passed through unchanged. """ def __init__(self, outPen): super(ContourFilterPen, self).__init__() self._outPen = outPen def closePath(self): super(ContourFilterPen, self).closePath() self._flushContour() def endPath(self): super(ContourFilterPen, self).endPath() self._flushContour() def _flushContour(self): result = self.filterContour(self.value) if result is not None: self.value = result self.replay(self._outPen) self.value = [] def filterContour(self, contour): """Subclasses must override this to perform the filtering. The contour is a list of pen (operator, operands) tuples. Operators are strings corresponding to the AbstractPen methods: "moveTo", "lineTo", "curveTo", "qCurveTo", "closePath" and "endPath". The operands are the positional arguments that are passed to each method. If the method doesn't return a value (i.e. returns None), it's assumed that the argument was modified in-place. Otherwise, the return value is drawn with the output pen. """ return # or return contour class FilterPointPen(_PassThruComponentsMixin, AbstractPointPen): """Baseclass for point pens that apply some transformation to the coordinates they receive and pass them to another point pen. You can override any of its methods. The default implementation does nothing, but passes the commands unmodified to the other pen. >>> from fontTools.pens.recordingPen import RecordingPointPen >>> rec = RecordingPointPen() >>> pen = FilterPointPen(rec) >>> v = iter(rec.value) >>> pen.beginPath(identifier="abc") >>> next(v) ('beginPath', (), {'identifier': 'abc'}) >>> pen.addPoint((1, 2), "line", False) >>> next(v) ('addPoint', ((1, 2), 'line', False, None), {}) >>> pen.addComponent("a", (2, 0, 0, 2, 10, -10), identifier="0001") >>> next(v) ('addComponent', ('a', (2, 0, 0, 2, 10, -10)), {'identifier': '0001'}) >>> pen.endPath() >>> next(v) ('endPath', (), {}) """ def __init__(self, outPen): self._outPen = outPen def beginPath(self, **kwargs): self._outPen.beginPath(**kwargs) def endPath(self): self._outPen.endPath() def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs): self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs) class _DecomposingFilterPenMixin: """Mixin class that decomposes components as regular contours. Shared by both DecomposingFilterPen and DecomposingFilterPointPen. Takes two required parameters, another (segment or point) pen 'outPen' to draw with, and a 'glyphSet' dict of drawable glyph objects to draw components from. The 'skipMissingComponents' and 'reverseFlipped' optional arguments work the same as in the DecomposingPen/DecomposingPointPen. Both are False by default. In addition, the decomposing filter pens also take the following two options: 'include' is an optional set of component base glyph names to consider for decomposition; the default include=None means decompose all components no matter the base glyph name). 'decomposeNested' (bool) controls whether to recurse decomposition into nested components of components (this only matters when 'include' was also provided); if False, only decompose top-level components included in the set, but not also their children. """ # raises MissingComponentError if base glyph is not found in glyphSet skipMissingComponents = False def __init__( self, outPen, glyphSet, skipMissingComponents=None, reverseFlipped=False, include: set[str] | None = None, decomposeNested: bool = True, ): super().__init__( outPen=outPen, glyphSet=glyphSet, skipMissingComponents=skipMissingComponents, reverseFlipped=reverseFlipped, ) self.include = include self.decomposeNested = decomposeNested def addComponent(self, baseGlyphName, transformation, **kwargs): # only decompose the component if it's included in the set if self.include is None or baseGlyphName in self.include: # if we're decomposing nested components, temporarily set include to None include_bak = self.include if self.decomposeNested and self.include: self.include = None try: super().addComponent(baseGlyphName, transformation, **kwargs) finally: if self.include != include_bak: self.include = include_bak else: _PassThruComponentsMixin.addComponent( self, baseGlyphName, transformation, **kwargs ) class DecomposingFilterPen(_DecomposingFilterPenMixin, DecomposingPen, FilterPen): """Filter pen that draws components as regular contours.""" pass class DecomposingFilterPointPen( _DecomposingFilterPenMixin, DecomposingPointPen, FilterPointPen ): """Filter point pen that draws components as regular contours.""" pass PKaZZZ�ҊeMeMfontTools/pens/freetypePen.py# -*- coding: utf-8 -*- """Pen to rasterize paths with FreeType.""" __all__ = ["FreeTypePen"] import os import ctypes import platform import subprocess import collections import math import freetype from freetype.raw import FT_Outline_Get_Bitmap, FT_Outline_Get_BBox, FT_Outline_Get_CBox from freetype.ft_types import FT_Pos from freetype.ft_structs import FT_Vector, FT_BBox, FT_Bitmap, FT_Outline from freetype.ft_enums import ( FT_OUTLINE_NONE, FT_OUTLINE_EVEN_ODD_FILL, FT_PIXEL_MODE_GRAY, FT_CURVE_TAG_ON, FT_CURVE_TAG_CONIC, FT_CURVE_TAG_CUBIC, ) from freetype.ft_errors import FT_Exception from fontTools.pens.basePen import BasePen, PenError from fontTools.misc.roundTools import otRound from fontTools.misc.transform import Transform Contour = collections.namedtuple("Contour", ("points", "tags")) class FreeTypePen(BasePen): """Pen to rasterize paths with FreeType. Requires `freetype-py` module. Constructs ``FT_Outline`` from the paths, and renders it within a bitmap buffer. For ``array()`` and ``show()``, `numpy` and `matplotlib` must be installed. For ``image()``, `Pillow` is required. Each module is lazily loaded when the corresponding method is called. Args: glyphSet: a dictionary of drawable glyph objects keyed by name used to resolve component references in composite glyphs. :Examples: If `numpy` and `matplotlib` is available, the following code will show the glyph image of `fi` in a new window:: from fontTools.ttLib import TTFont from fontTools.pens.freetypePen import FreeTypePen from fontTools.misc.transform import Offset pen = FreeTypePen(None) font = TTFont('SourceSansPro-Regular.otf') glyph = font.getGlyphSet()['fi'] glyph.draw(pen) width, ascender, descender = glyph.width, font['OS/2'].usWinAscent, -font['OS/2'].usWinDescent height = ascender - descender pen.show(width=width, height=height, transform=Offset(0, -descender)) Combining with `uharfbuzz`, you can typeset a chunk of glyphs in a pen:: import uharfbuzz as hb from fontTools.pens.freetypePen import FreeTypePen from fontTools.pens.transformPen import TransformPen from fontTools.misc.transform import Offset en1, en2, ar, ja = 'Typesetting', 'Jeff', 'صف الحروف', 'たいぷせっと' for text, font_path, direction, typo_ascender, typo_descender, vhea_ascender, vhea_descender, contain, features in ( (en1, 'NotoSans-Regular.ttf', 'ltr', 2189, -600, None, None, False, {"kern": True, "liga": True}), (en2, 'NotoSans-Regular.ttf', 'ltr', 2189, -600, None, None, True, {"kern": True, "liga": True}), (ar, 'NotoSansArabic-Regular.ttf', 'rtl', 1374, -738, None, None, False, {"kern": True, "liga": True}), (ja, 'NotoSansJP-Regular.otf', 'ltr', 880, -120, 500, -500, False, {"palt": True, "kern": True}), (ja, 'NotoSansJP-Regular.otf', 'ttb', 880, -120, 500, -500, False, {"vert": True, "vpal": True, "vkrn": True}) ): blob = hb.Blob.from_file_path(font_path) face = hb.Face(blob) font = hb.Font(face) buf = hb.Buffer() buf.direction = direction buf.add_str(text) buf.guess_segment_properties() hb.shape(font, buf, features) x, y = 0, 0 pen = FreeTypePen(None) for info, pos in zip(buf.glyph_infos, buf.glyph_positions): gid = info.codepoint transformed = TransformPen(pen, Offset(x + pos.x_offset, y + pos.y_offset)) font.draw_glyph_with_pen(gid, transformed) x += pos.x_advance y += pos.y_advance offset, width, height = None, None, None if direction in ('ltr', 'rtl'): offset = (0, -typo_descender) width = x height = typo_ascender - typo_descender else: offset = (-vhea_descender, -y) width = vhea_ascender - vhea_descender height = -y pen.show(width=width, height=height, transform=Offset(*offset), contain=contain) For Jupyter Notebook, the rendered image will be displayed in a cell if you replace ``show()`` with ``image()`` in the examples. """ def __init__(self, glyphSet): BasePen.__init__(self, glyphSet) self.contours = [] def outline(self, transform=None, evenOdd=False): """Converts the current contours to ``FT_Outline``. Args: transform: An optional 6-tuple containing an affine transformation, or a ``Transform`` object from the ``fontTools.misc.transform`` module. evenOdd: Pass ``True`` for even-odd fill instead of non-zero. """ transform = transform or Transform() if not hasattr(transform, "transformPoint"): transform = Transform(*transform) n_contours = len(self.contours) n_points = sum((len(contour.points) for contour in self.contours)) points = [] for contour in self.contours: for point in contour.points: point = transform.transformPoint(point) points.append( FT_Vector( FT_Pos(otRound(point[0] * 64)), FT_Pos(otRound(point[1] * 64)) ) ) tags = [] for contour in self.contours: for tag in contour.tags: tags.append(tag) contours = [] contours_sum = 0 for contour in self.contours: contours_sum += len(contour.points) contours.append(contours_sum - 1) flags = FT_OUTLINE_EVEN_ODD_FILL if evenOdd else FT_OUTLINE_NONE return FT_Outline( (ctypes.c_short)(n_contours), (ctypes.c_short)(n_points), (FT_Vector * n_points)(*points), (ctypes.c_ubyte * n_points)(*tags), (ctypes.c_short * n_contours)(*contours), (ctypes.c_int)(flags), ) def buffer( self, width=None, height=None, transform=None, contain=False, evenOdd=False ): """Renders the current contours within a bitmap buffer. Args: width: Image width of the bitmap in pixels. If omitted, it automatically fits to the bounding box of the contours. height: Image height of the bitmap in pixels. If omitted, it automatically fits to the bounding box of the contours. transform: An optional 6-tuple containing an affine transformation, or a ``Transform`` object from the ``fontTools.misc.transform`` module. The bitmap size is not affected by this matrix. contain: If ``True``, the image size will be automatically expanded so that it fits to the bounding box of the paths. Useful for rendering glyphs with negative sidebearings without clipping. evenOdd: Pass ``True`` for even-odd fill instead of non-zero. Returns: A tuple of ``(buffer, size)``, where ``buffer`` is a ``bytes`` object of the resulted bitmap and ``size`` is a 2-tuple of its dimension. :Notes: The image size should always be given explicitly if you need to get a proper glyph image. When ``width`` and ``height`` are omitted, it forcifully fits to the bounding box and the side bearings get cropped. If you pass ``0`` to both ``width`` and ``height`` and set ``contain`` to ``True``, it expands to the bounding box while maintaining the origin of the contours, meaning that LSB will be maintained but RSB won’t. The difference between the two becomes more obvious when rotate or skew transformation is applied. :Example: .. code-block:: >> pen = FreeTypePen(None) >> glyph.draw(pen) >> buf, size = pen.buffer(width=500, height=1000) >> type(buf), len(buf), size (<class 'bytes'>, 500000, (500, 1000)) """ transform = transform or Transform() if not hasattr(transform, "transformPoint"): transform = Transform(*transform) contain_x, contain_y = contain or width is None, contain or height is None if contain_x or contain_y: dx, dy = transform.dx, transform.dy bbox = self.bbox p1, p2, p3, p4 = ( transform.transformPoint((bbox[0], bbox[1])), transform.transformPoint((bbox[2], bbox[1])), transform.transformPoint((bbox[0], bbox[3])), transform.transformPoint((bbox[2], bbox[3])), ) px, py = (p1[0], p2[0], p3[0], p4[0]), (p1[1], p2[1], p3[1], p4[1]) if contain_x: if width is None: dx = dx - min(*px) width = max(*px) - min(*px) else: dx = dx - min(min(*px), 0.0) width = max(width, max(*px) - min(min(*px), 0.0)) if contain_y: if height is None: dy = dy - min(*py) height = max(*py) - min(*py) else: dy = dy - min(min(*py), 0.0) height = max(height, max(*py) - min(min(*py), 0.0)) transform = Transform(*transform[:4], dx, dy) width, height = math.ceil(width), math.ceil(height) buf = ctypes.create_string_buffer(width * height) bitmap = FT_Bitmap( (ctypes.c_int)(height), (ctypes.c_int)(width), (ctypes.c_int)(width), (ctypes.POINTER(ctypes.c_ubyte))(buf), (ctypes.c_short)(256), (ctypes.c_ubyte)(FT_PIXEL_MODE_GRAY), (ctypes.c_char)(0), (ctypes.c_void_p)(None), ) outline = self.outline(transform=transform, evenOdd=evenOdd) err = FT_Outline_Get_Bitmap( freetype.get_handle(), ctypes.byref(outline), ctypes.byref(bitmap) ) if err != 0: raise FT_Exception(err) return buf.raw, (width, height) def array( self, width=None, height=None, transform=None, contain=False, evenOdd=False ): """Returns the rendered contours as a numpy array. Requires `numpy`. Args: width: Image width of the bitmap in pixels. If omitted, it automatically fits to the bounding box of the contours. height: Image height of the bitmap in pixels. If omitted, it automatically fits to the bounding box of the contours. transform: An optional 6-tuple containing an affine transformation, or a ``Transform`` object from the ``fontTools.misc.transform`` module. The bitmap size is not affected by this matrix. contain: If ``True``, the image size will be automatically expanded so that it fits to the bounding box of the paths. Useful for rendering glyphs with negative sidebearings without clipping. evenOdd: Pass ``True`` for even-odd fill instead of non-zero. Returns: A ``numpy.ndarray`` object with a shape of ``(height, width)``. Each element takes a value in the range of ``[0.0, 1.0]``. :Notes: The image size should always be given explicitly if you need to get a proper glyph image. When ``width`` and ``height`` are omitted, it forcifully fits to the bounding box and the side bearings get cropped. If you pass ``0`` to both ``width`` and ``height`` and set ``contain`` to ``True``, it expands to the bounding box while maintaining the origin of the contours, meaning that LSB will be maintained but RSB won’t. The difference between the two becomes more obvious when rotate or skew transformation is applied. :Example: .. code-block:: >> pen = FreeTypePen(None) >> glyph.draw(pen) >> arr = pen.array(width=500, height=1000) >> type(a), a.shape (<class 'numpy.ndarray'>, (1000, 500)) """ import numpy as np buf, size = self.buffer( width=width, height=height, transform=transform, contain=contain, evenOdd=evenOdd, ) return np.frombuffer(buf, "B").reshape((size[1], size[0])) / 255.0 def show( self, width=None, height=None, transform=None, contain=False, evenOdd=False ): """Plots the rendered contours with `pyplot`. Requires `numpy` and `matplotlib`. Args: width: Image width of the bitmap in pixels. If omitted, it automatically fits to the bounding box of the contours. height: Image height of the bitmap in pixels. If omitted, it automatically fits to the bounding box of the contours. transform: An optional 6-tuple containing an affine transformation, or a ``Transform`` object from the ``fontTools.misc.transform`` module. The bitmap size is not affected by this matrix. contain: If ``True``, the image size will be automatically expanded so that it fits to the bounding box of the paths. Useful for rendering glyphs with negative sidebearings without clipping. evenOdd: Pass ``True`` for even-odd fill instead of non-zero. :Notes: The image size should always be given explicitly if you need to get a proper glyph image. When ``width`` and ``height`` are omitted, it forcifully fits to the bounding box and the side bearings get cropped. If you pass ``0`` to both ``width`` and ``height`` and set ``contain`` to ``True``, it expands to the bounding box while maintaining the origin of the contours, meaning that LSB will be maintained but RSB won’t. The difference between the two becomes more obvious when rotate or skew transformation is applied. :Example: .. code-block:: >> pen = FreeTypePen(None) >> glyph.draw(pen) >> pen.show(width=500, height=1000) """ from matplotlib import pyplot as plt a = self.array( width=width, height=height, transform=transform, contain=contain, evenOdd=evenOdd, ) plt.imshow(a, cmap="gray_r", vmin=0, vmax=1) plt.show() def image( self, width=None, height=None, transform=None, contain=False, evenOdd=False ): """Returns the rendered contours as a PIL image. Requires `Pillow`. Can be used to display a glyph image in Jupyter Notebook. Args: width: Image width of the bitmap in pixels. If omitted, it automatically fits to the bounding box of the contours. height: Image height of the bitmap in pixels. If omitted, it automatically fits to the bounding box of the contours. transform: An optional 6-tuple containing an affine transformation, or a ``Transform`` object from the ``fontTools.misc.transform`` module. The bitmap size is not affected by this matrix. contain: If ``True``, the image size will be automatically expanded so that it fits to the bounding box of the paths. Useful for rendering glyphs with negative sidebearings without clipping. evenOdd: Pass ``True`` for even-odd fill instead of non-zero. Returns: A ``PIL.image`` object. The image is filled in black with alpha channel obtained from the rendered bitmap. :Notes: The image size should always be given explicitly if you need to get a proper glyph image. When ``width`` and ``height`` are omitted, it forcifully fits to the bounding box and the side bearings get cropped. If you pass ``0`` to both ``width`` and ``height`` and set ``contain`` to ``True``, it expands to the bounding box while maintaining the origin of the contours, meaning that LSB will be maintained but RSB won’t. The difference between the two becomes more obvious when rotate or skew transformation is applied. :Example: .. code-block:: >> pen = FreeTypePen(None) >> glyph.draw(pen) >> img = pen.image(width=500, height=1000) >> type(img), img.size (<class 'PIL.Image.Image'>, (500, 1000)) """ from PIL import Image buf, size = self.buffer( width=width, height=height, transform=transform, contain=contain, evenOdd=evenOdd, ) img = Image.new("L", size, 0) img.putalpha(Image.frombuffer("L", size, buf)) return img @property def bbox(self): """Computes the exact bounding box of an outline. Returns: A tuple of ``(xMin, yMin, xMax, yMax)``. """ bbox = FT_BBox() outline = self.outline() FT_Outline_Get_BBox(ctypes.byref(outline), ctypes.byref(bbox)) return (bbox.xMin / 64.0, bbox.yMin / 64.0, bbox.xMax / 64.0, bbox.yMax / 64.0) @property def cbox(self): """Returns an outline's ‘control box’. Returns: A tuple of ``(xMin, yMin, xMax, yMax)``. """ cbox = FT_BBox() outline = self.outline() FT_Outline_Get_CBox(ctypes.byref(outline), ctypes.byref(cbox)) return (cbox.xMin / 64.0, cbox.yMin / 64.0, cbox.xMax / 64.0, cbox.yMax / 64.0) def _moveTo(self, pt): contour = Contour([], []) self.contours.append(contour) contour.points.append(pt) contour.tags.append(FT_CURVE_TAG_ON) def _lineTo(self, pt): if not (self.contours and len(self.contours[-1].points) > 0): raise PenError("Contour missing required initial moveTo") contour = self.contours[-1] contour.points.append(pt) contour.tags.append(FT_CURVE_TAG_ON) def _curveToOne(self, p1, p2, p3): if not (self.contours and len(self.contours[-1].points) > 0): raise PenError("Contour missing required initial moveTo") t1, t2, t3 = FT_CURVE_TAG_CUBIC, FT_CURVE_TAG_CUBIC, FT_CURVE_TAG_ON contour = self.contours[-1] for p, t in ((p1, t1), (p2, t2), (p3, t3)): contour.points.append(p) contour.tags.append(t) def _qCurveToOne(self, p1, p2): if not (self.contours and len(self.contours[-1].points) > 0): raise PenError("Contour missing required initial moveTo") t1, t2 = FT_CURVE_TAG_CONIC, FT_CURVE_TAG_ON contour = self.contours[-1] for p, t in ((p1, t1), (p2, t2)): contour.points.append(p) contour.tags.append(t) PKaZZZ���� � fontTools/pens/hashPointPen.py# Modified from https://github.com/adobe-type-tools/psautohint/blob/08b346865710ed3c172f1eb581d6ef243b203f99/python/psautohint/ufoFont.py#L800-L838 import hashlib from fontTools.pens.basePen import MissingComponentError from fontTools.pens.pointPen import AbstractPointPen class HashPointPen(AbstractPointPen): """ This pen can be used to check if a glyph's contents (outlines plus components) have changed. Components are added as the original outline plus each composite's transformation. Example: You have some TrueType hinting code for a glyph which you want to compile. The hinting code specifies a hash value computed with HashPointPen that was valid for the glyph's outlines at the time the hinting code was written. Now you can calculate the hash for the glyph's current outlines to check if the outlines have changed, which would probably make the hinting code invalid. > glyph = ufo[name] > hash_pen = HashPointPen(glyph.width, ufo) > glyph.drawPoints(hash_pen) > ttdata = glyph.lib.get("public.truetype.instructions", None) > stored_hash = ttdata.get("id", None) # The hash is stored in the "id" key > if stored_hash is None or stored_hash != hash_pen.hash: > logger.error(f"Glyph hash mismatch, glyph '{name}' will have no instructions in font.") > else: > # The hash values are identical, the outline has not changed. > # Compile the hinting code ... > pass If you want to compare a glyph from a source format which supports floating point coordinates and transformations against a glyph from a format which has restrictions on the precision of floats, e.g. UFO vs. TTF, you must use an appropriate rounding function to make the values comparable. For TTF fonts with composites, this construct can be used to make the transform values conform to F2Dot14: > ttf_hash_pen = HashPointPen(ttf_glyph_width, ttFont.getGlyphSet()) > ttf_round_pen = RoundingPointPen(ttf_hash_pen, transformRoundFunc=partial(floatToFixedToFloat, precisionBits=14)) > ufo_hash_pen = HashPointPen(ufo_glyph.width, ufo) > ttf_glyph.drawPoints(ttf_round_pen, ttFont["glyf"]) > ufo_round_pen = RoundingPointPen(ufo_hash_pen, transformRoundFunc=partial(floatToFixedToFloat, precisionBits=14)) > ufo_glyph.drawPoints(ufo_round_pen) > assert ttf_hash_pen.hash == ufo_hash_pen.hash """ def __init__(self, glyphWidth=0, glyphSet=None): self.glyphset = glyphSet self.data = ["w%s" % round(glyphWidth, 9)] @property def hash(self): data = "".join(self.data) if len(data) >= 128: data = hashlib.sha512(data.encode("ascii")).hexdigest() return data def beginPath(self, identifier=None, **kwargs): pass def endPath(self): self.data.append("|") def addPoint( self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs, ): if segmentType is None: pt_type = "o" # offcurve else: pt_type = segmentType[0] self.data.append(f"{pt_type}{pt[0]:g}{pt[1]:+g}") def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs): tr = "".join([f"{t:+}" for t in transformation]) self.data.append("[") try: self.glyphset[baseGlyphName].drawPoints(self) except KeyError: raise MissingComponentError(baseGlyphName) self.data.append(f"({tr})]") PKaZZZ�^njUdUdfontTools/pens/momentsPen.pyfrom fontTools.pens.basePen import BasePen, OpenContourError try: import cython COMPILED = cython.compiled except (AttributeError, ImportError): # if cython not installed, use mock module with no-op decorators and types from fontTools.misc import cython COMPILED = False __all__ = ["MomentsPen"] class MomentsPen(BasePen): def __init__(self, glyphset=None): BasePen.__init__(self, glyphset) self.area = 0 self.momentX = 0 self.momentY = 0 self.momentXX = 0 self.momentXY = 0 self.momentYY = 0 def _moveTo(self, p0): self.__startPoint = p0 def _closePath(self): p0 = self._getCurrentPoint() if p0 != self.__startPoint: self._lineTo(self.__startPoint) def _endPath(self): p0 = self._getCurrentPoint() if p0 != self.__startPoint: raise OpenContourError("Glyph statistics not defined on open contours.") @cython.locals(r0=cython.double) @cython.locals(r1=cython.double) @cython.locals(r2=cython.double) @cython.locals(r3=cython.double) @cython.locals(r4=cython.double) @cython.locals(r5=cython.double) @cython.locals(r6=cython.double) @cython.locals(r7=cython.double) @cython.locals(r8=cython.double) @cython.locals(r9=cython.double) @cython.locals(r10=cython.double) @cython.locals(r11=cython.double) @cython.locals(r12=cython.double) @cython.locals(x0=cython.double, y0=cython.double) @cython.locals(x1=cython.double, y1=cython.double) def _lineTo(self, p1): x0, y0 = self._getCurrentPoint() x1, y1 = p1 r0 = x1 * y0 r1 = x1 * y1 r2 = x1**2 r3 = r2 * y1 r4 = y0 - y1 r5 = r4 * x0 r6 = x0**2 r7 = 2 * y0 r8 = y0**2 r9 = y1**2 r10 = x1**3 r11 = y0**3 r12 = y1**3 self.area += -r0 / 2 - r1 / 2 + x0 * (y0 + y1) / 2 self.momentX += -r2 * y0 / 6 - r3 / 3 - r5 * x1 / 6 + r6 * (r7 + y1) / 6 self.momentY += ( -r0 * y1 / 6 - r8 * x1 / 6 - r9 * x1 / 6 + x0 * (r8 + r9 + y0 * y1) / 6 ) self.momentXX += ( -r10 * y0 / 12 - r10 * y1 / 4 - r2 * r5 / 12 - r4 * r6 * x1 / 12 + x0**3 * (3 * y0 + y1) / 12 ) self.momentXY += ( -r2 * r8 / 24 - r2 * r9 / 8 - r3 * r7 / 24 + r6 * (r7 * y1 + 3 * r8 + r9) / 24 - x0 * x1 * (r8 - r9) / 12 ) self.momentYY += ( -r0 * r9 / 12 - r1 * r8 / 12 - r11 * x1 / 12 - r12 * x1 / 12 + x0 * (r11 + r12 + r8 * y1 + r9 * y0) / 12 ) @cython.locals(r0=cython.double) @cython.locals(r1=cython.double) @cython.locals(r2=cython.double) @cython.locals(r3=cython.double) @cython.locals(r4=cython.double) @cython.locals(r5=cython.double) @cython.locals(r6=cython.double) @cython.locals(r7=cython.double) @cython.locals(r8=cython.double) @cython.locals(r9=cython.double) @cython.locals(r10=cython.double) @cython.locals(r11=cython.double) @cython.locals(r12=cython.double) @cython.locals(r13=cython.double) @cython.locals(r14=cython.double) @cython.locals(r15=cython.double) @cython.locals(r16=cython.double) @cython.locals(r17=cython.double) @cython.locals(r18=cython.double) @cython.locals(r19=cython.double) @cython.locals(r20=cython.double) @cython.locals(r21=cython.double) @cython.locals(r22=cython.double) @cython.locals(r23=cython.double) @cython.locals(r24=cython.double) @cython.locals(r25=cython.double) @cython.locals(r26=cython.double) @cython.locals(r27=cython.double) @cython.locals(r28=cython.double) @cython.locals(r29=cython.double) @cython.locals(r30=cython.double) @cython.locals(r31=cython.double) @cython.locals(r32=cython.double) @cython.locals(r33=cython.double) @cython.locals(r34=cython.double) @cython.locals(r35=cython.double) @cython.locals(r36=cython.double) @cython.locals(r37=cython.double) @cython.locals(r38=cython.double) @cython.locals(r39=cython.double) @cython.locals(r40=cython.double) @cython.locals(r41=cython.double) @cython.locals(r42=cython.double) @cython.locals(r43=cython.double) @cython.locals(r44=cython.double) @cython.locals(r45=cython.double) @cython.locals(r46=cython.double) @cython.locals(r47=cython.double) @cython.locals(r48=cython.double) @cython.locals(r49=cython.double) @cython.locals(r50=cython.double) @cython.locals(r51=cython.double) @cython.locals(r52=cython.double) @cython.locals(r53=cython.double) @cython.locals(x0=cython.double, y0=cython.double) @cython.locals(x1=cython.double, y1=cython.double) @cython.locals(x2=cython.double, y2=cython.double) def _qCurveToOne(self, p1, p2): x0, y0 = self._getCurrentPoint() x1, y1 = p1 x2, y2 = p2 r0 = 2 * y1 r1 = r0 * x2 r2 = x2 * y2 r3 = 3 * r2 r4 = 2 * x1 r5 = 3 * y0 r6 = x1**2 r7 = x2**2 r8 = 4 * y1 r9 = 10 * y2 r10 = 2 * y2 r11 = r4 * x2 r12 = x0**2 r13 = 10 * y0 r14 = r4 * y2 r15 = x2 * y0 r16 = 4 * x1 r17 = r0 * x1 + r2 r18 = r2 * r8 r19 = y1**2 r20 = 2 * r19 r21 = y2**2 r22 = r21 * x2 r23 = 5 * r22 r24 = y0**2 r25 = y0 * y2 r26 = 5 * r24 r27 = x1**3 r28 = x2**3 r29 = 30 * y1 r30 = 6 * y1 r31 = 10 * r7 * x1 r32 = 5 * y2 r33 = 12 * r6 r34 = 30 * x1 r35 = x1 * y1 r36 = r3 + 20 * r35 r37 = 12 * x1 r38 = 20 * r6 r39 = 8 * r6 * y1 r40 = r32 * r7 r41 = 60 * y1 r42 = 20 * r19 r43 = 4 * r19 r44 = 15 * r21 r45 = 12 * x2 r46 = 12 * y2 r47 = 6 * x1 r48 = 8 * r19 * x1 + r23 r49 = 8 * y1**3 r50 = y2**3 r51 = y0**3 r52 = 10 * y1 r53 = 12 * y1 self.area += ( -r1 / 6 - r3 / 6 + x0 * (r0 + r5 + y2) / 6 + x1 * y2 / 3 - y0 * (r4 + x2) / 6 ) self.momentX += ( -r11 * (-r10 + y1) / 30 + r12 * (r13 + r8 + y2) / 30 + r6 * y2 / 15 - r7 * r8 / 30 - r7 * r9 / 30 + x0 * (r14 - r15 - r16 * y0 + r17) / 30 - y0 * (r11 + 2 * r6 + r7) / 30 ) self.momentY += ( -r18 / 30 - r20 * x2 / 30 - r23 / 30 - r24 * (r16 + x2) / 30 + x0 * (r0 * y2 + r20 + r21 + r25 + r26 + r8 * y0) / 30 + x1 * y2 * (r10 + y1) / 15 - y0 * (r1 + r17) / 30 ) self.momentXX += ( r12 * (r1 - 5 * r15 - r34 * y0 + r36 + r9 * x1) / 420 + 2 * r27 * y2 / 105 - r28 * r29 / 420 - r28 * y2 / 4 - r31 * (r0 - 3 * y2) / 420 - r6 * x2 * (r0 - r32) / 105 + x0**3 * (r30 + 21 * y0 + y2) / 84 - x0 * ( r0 * r7 + r15 * r37 - r2 * r37 - r33 * y2 + r38 * y0 - r39 - r40 + r5 * r7 ) / 420 - y0 * (8 * r27 + 5 * r28 + r31 + r33 * x2) / 420 ) self.momentXY += ( r12 * (r13 * y2 + 3 * r21 + 105 * r24 + r41 * y0 + r42 + r46 * y1) / 840 - r16 * x2 * (r43 - r44) / 840 - r21 * r7 / 8 - r24 * (r38 + r45 * x1 + 3 * r7) / 840 - r41 * r7 * y2 / 840 - r42 * r7 / 840 + r6 * y2 * (r32 + r8) / 210 + x0 * ( -r15 * r8 + r16 * r25 + r18 + r21 * r47 - r24 * r34 - r26 * x2 + r35 * r46 + r48 ) / 420 - y0 * (r16 * r2 + r30 * r7 + r35 * r45 + r39 + r40) / 420 ) self.momentYY += ( -r2 * r42 / 420 - r22 * r29 / 420 - r24 * (r14 + r36 + r52 * x2) / 420 - r49 * x2 / 420 - r50 * x2 / 12 - r51 * (r47 + x2) / 84 + x0 * ( r19 * r46 + r21 * r5 + r21 * r52 + r24 * r29 + r25 * r53 + r26 * y2 + r42 * y0 + r49 + 5 * r50 + 35 * r51 ) / 420 + x1 * y2 * (r43 + r44 + r9 * y1) / 210 - y0 * (r19 * r45 + r2 * r53 - r21 * r4 + r48) / 420 ) @cython.locals(r0=cython.double) @cython.locals(r1=cython.double) @cython.locals(r2=cython.double) @cython.locals(r3=cython.double) @cython.locals(r4=cython.double) @cython.locals(r5=cython.double) @cython.locals(r6=cython.double) @cython.locals(r7=cython.double) @cython.locals(r8=cython.double) @cython.locals(r9=cython.double) @cython.locals(r10=cython.double) @cython.locals(r11=cython.double) @cython.locals(r12=cython.double) @cython.locals(r13=cython.double) @cython.locals(r14=cython.double) @cython.locals(r15=cython.double) @cython.locals(r16=cython.double) @cython.locals(r17=cython.double) @cython.locals(r18=cython.double) @cython.locals(r19=cython.double) @cython.locals(r20=cython.double) @cython.locals(r21=cython.double) @cython.locals(r22=cython.double) @cython.locals(r23=cython.double) @cython.locals(r24=cython.double) @cython.locals(r25=cython.double) @cython.locals(r26=cython.double) @cython.locals(r27=cython.double) @cython.locals(r28=cython.double) @cython.locals(r29=cython.double) @cython.locals(r30=cython.double) @cython.locals(r31=cython.double) @cython.locals(r32=cython.double) @cython.locals(r33=cython.double) @cython.locals(r34=cython.double) @cython.locals(r35=cython.double) @cython.locals(r36=cython.double) @cython.locals(r37=cython.double) @cython.locals(r38=cython.double) @cython.locals(r39=cython.double) @cython.locals(r40=cython.double) @cython.locals(r41=cython.double) @cython.locals(r42=cython.double) @cython.locals(r43=cython.double) @cython.locals(r44=cython.double) @cython.locals(r45=cython.double) @cython.locals(r46=cython.double) @cython.locals(r47=cython.double) @cython.locals(r48=cython.double) @cython.locals(r49=cython.double) @cython.locals(r50=cython.double) @cython.locals(r51=cython.double) @cython.locals(r52=cython.double) @cython.locals(r53=cython.double) @cython.locals(r54=cython.double) @cython.locals(r55=cython.double) @cython.locals(r56=cython.double) @cython.locals(r57=cython.double) @cython.locals(r58=cython.double) @cython.locals(r59=cython.double) @cython.locals(r60=cython.double) @cython.locals(r61=cython.double) @cython.locals(r62=cython.double) @cython.locals(r63=cython.double) @cython.locals(r64=cython.double) @cython.locals(r65=cython.double) @cython.locals(r66=cython.double) @cython.locals(r67=cython.double) @cython.locals(r68=cython.double) @cython.locals(r69=cython.double) @cython.locals(r70=cython.double) @cython.locals(r71=cython.double) @cython.locals(r72=cython.double) @cython.locals(r73=cython.double) @cython.locals(r74=cython.double) @cython.locals(r75=cython.double) @cython.locals(r76=cython.double) @cython.locals(r77=cython.double) @cython.locals(r78=cython.double) @cython.locals(r79=cython.double) @cython.locals(r80=cython.double) @cython.locals(r81=cython.double) @cython.locals(r82=cython.double) @cython.locals(r83=cython.double) @cython.locals(r84=cython.double) @cython.locals(r85=cython.double) @cython.locals(r86=cython.double) @cython.locals(r87=cython.double) @cython.locals(r88=cython.double) @cython.locals(r89=cython.double) @cython.locals(r90=cython.double) @cython.locals(r91=cython.double) @cython.locals(r92=cython.double) @cython.locals(r93=cython.double) @cython.locals(r94=cython.double) @cython.locals(r95=cython.double) @cython.locals(r96=cython.double) @cython.locals(r97=cython.double) @cython.locals(r98=cython.double) @cython.locals(r99=cython.double) @cython.locals(r100=cython.double) @cython.locals(r101=cython.double) @cython.locals(r102=cython.double) @cython.locals(r103=cython.double) @cython.locals(r104=cython.double) @cython.locals(r105=cython.double) @cython.locals(r106=cython.double) @cython.locals(r107=cython.double) @cython.locals(r108=cython.double) @cython.locals(r109=cython.double) @cython.locals(r110=cython.double) @cython.locals(r111=cython.double) @cython.locals(r112=cython.double) @cython.locals(r113=cython.double) @cython.locals(r114=cython.double) @cython.locals(r115=cython.double) @cython.locals(r116=cython.double) @cython.locals(r117=cython.double) @cython.locals(r118=cython.double) @cython.locals(r119=cython.double) @cython.locals(r120=cython.double) @cython.locals(r121=cython.double) @cython.locals(r122=cython.double) @cython.locals(r123=cython.double) @cython.locals(r124=cython.double) @cython.locals(r125=cython.double) @cython.locals(r126=cython.double) @cython.locals(r127=cython.double) @cython.locals(r128=cython.double) @cython.locals(r129=cython.double) @cython.locals(r130=cython.double) @cython.locals(r131=cython.double) @cython.locals(r132=cython.double) @cython.locals(x0=cython.double, y0=cython.double) @cython.locals(x1=cython.double, y1=cython.double) @cython.locals(x2=cython.double, y2=cython.double) @cython.locals(x3=cython.double, y3=cython.double) def _curveToOne(self, p1, p2, p3): x0, y0 = self._getCurrentPoint() x1, y1 = p1 x2, y2 = p2 x3, y3 = p3 r0 = 6 * y2 r1 = r0 * x3 r2 = 10 * y3 r3 = r2 * x3 r4 = 3 * y1 r5 = 6 * x1 r6 = 3 * x2 r7 = 6 * y1 r8 = 3 * y2 r9 = x2**2 r10 = 45 * r9 r11 = r10 * y3 r12 = x3**2 r13 = r12 * y2 r14 = r12 * y3 r15 = 7 * y3 r16 = 15 * x3 r17 = r16 * x2 r18 = x1**2 r19 = 9 * r18 r20 = x0**2 r21 = 21 * y1 r22 = 9 * r9 r23 = r7 * x3 r24 = 9 * y2 r25 = r24 * x2 + r3 r26 = 9 * x2 r27 = x2 * y3 r28 = -r26 * y1 + 15 * r27 r29 = 3 * x1 r30 = 45 * x1 r31 = 12 * x3 r32 = 45 * r18 r33 = 5 * r12 r34 = r8 * x3 r35 = 105 * y0 r36 = 30 * y0 r37 = r36 * x2 r38 = 5 * x3 r39 = 15 * y3 r40 = 5 * y3 r41 = r40 * x3 r42 = x2 * y2 r43 = 18 * r42 r44 = 45 * y1 r45 = r41 + r43 + r44 * x1 r46 = y2 * y3 r47 = r46 * x3 r48 = y2**2 r49 = 45 * r48 r50 = r49 * x3 r51 = y3**2 r52 = r51 * x3 r53 = y1**2 r54 = 9 * r53 r55 = y0**2 r56 = 21 * x1 r57 = 6 * x2 r58 = r16 * y2 r59 = r39 * y2 r60 = 9 * r48 r61 = r6 * y3 r62 = 3 * y3 r63 = r36 * y2 r64 = y1 * y3 r65 = 45 * r53 r66 = 5 * r51 r67 = x2**3 r68 = x3**3 r69 = 630 * y2 r70 = 126 * x3 r71 = x1**3 r72 = 126 * x2 r73 = 63 * r9 r74 = r73 * x3 r75 = r15 * x3 + 15 * r42 r76 = 630 * x1 r77 = 14 * x3 r78 = 21 * r27 r79 = 42 * x1 r80 = 42 * x2 r81 = x1 * y2 r82 = 63 * r42 r83 = x1 * y1 r84 = r41 + r82 + 378 * r83 r85 = x2 * x3 r86 = r85 * y1 r87 = r27 * x3 r88 = 27 * r9 r89 = r88 * y2 r90 = 42 * r14 r91 = 90 * x1 r92 = 189 * r18 r93 = 378 * r18 r94 = r12 * y1 r95 = 252 * x1 * x2 r96 = r79 * x3 r97 = 30 * r85 r98 = r83 * x3 r99 = 30 * x3 r100 = 42 * x3 r101 = r42 * x1 r102 = r10 * y2 + 14 * r14 + 126 * r18 * y1 + r81 * r99 r103 = 378 * r48 r104 = 18 * y1 r105 = r104 * y2 r106 = y0 * y1 r107 = 252 * y2 r108 = r107 * y0 r109 = y0 * y3 r110 = 42 * r64 r111 = 378 * r53 r112 = 63 * r48 r113 = 27 * x2 r114 = r27 * y2 r115 = r113 * r48 + 42 * r52 r116 = x3 * y3 r117 = 54 * r42 r118 = r51 * x1 r119 = r51 * x2 r120 = r48 * x1 r121 = 21 * x3 r122 = r64 * x1 r123 = r81 * y3 r124 = 30 * r27 * y1 + r49 * x2 + 14 * r52 + 126 * r53 * x1 r125 = y2**3 r126 = y3**3 r127 = y1**3 r128 = y0**3 r129 = r51 * y2 r130 = r112 * y3 + r21 * r51 r131 = 189 * r53 r132 = 90 * y2 self.area += ( -r1 / 20 - r3 / 20 - r4 * (x2 + x3) / 20 + x0 * (r7 + r8 + 10 * y0 + y3) / 20 + 3 * x1 * (y2 + y3) / 20 + 3 * x2 * y3 / 10 - y0 * (r5 + r6 + x3) / 20 ) self.momentX += ( r11 / 840 - r13 / 8 - r14 / 3 - r17 * (-r15 + r8) / 840 + r19 * (r8 + 2 * y3) / 840 + r20 * (r0 + r21 + 56 * y0 + y3) / 168 + r29 * (-r23 + r25 + r28) / 840 - r4 * (10 * r12 + r17 + r22) / 840 + x0 * ( 12 * r27 + r30 * y2 + r34 - r35 * x1 - r37 - r38 * y0 + r39 * x1 - r4 * x3 + r45 ) / 840 - y0 * (r17 + r30 * x2 + r31 * x1 + r32 + r33 + 18 * r9) / 840 ) self.momentY += ( -r4 * (r25 + r58) / 840 - r47 / 8 - r50 / 840 - r52 / 6 - r54 * (r6 + 2 * x3) / 840 - r55 * (r56 + r57 + x3) / 168 + x0 * ( r35 * y1 + r40 * y0 + r44 * y2 + 18 * r48 + 140 * r55 + r59 + r63 + 12 * r64 + r65 + r66 ) / 840 + x1 * (r24 * y1 + 10 * r51 + r59 + r60 + r7 * y3) / 280 + x2 * y3 * (r15 + r8) / 56 - y0 * (r16 * y1 + r31 * y2 + r44 * x2 + r45 + r61 - r62 * x1) / 840 ) self.momentXX += ( -r12 * r72 * (-r40 + r8) / 9240 + 3 * r18 * (r28 + r34 - r38 * y1 + r75) / 3080 + r20 * ( r24 * x3 - r72 * y0 - r76 * y0 - r77 * y0 + r78 + r79 * y3 + r80 * y1 + 210 * r81 + r84 ) / 9240 - r29 * ( r12 * r21 + 14 * r13 + r44 * r9 - r73 * y3 + 54 * r86 - 84 * r87 - r89 - r90 ) / 9240 - r4 * (70 * r12 * x2 + 27 * r67 + 42 * r68 + r74) / 9240 + 3 * r67 * y3 / 220 - r68 * r69 / 9240 - r68 * y3 / 4 - r70 * r9 * (-r62 + y2) / 9240 + 3 * r71 * (r24 + r40) / 3080 + x0**3 * (r24 + r44 + 165 * y0 + y3) / 660 + x0 * ( r100 * r27 + 162 * r101 + r102 + r11 + 63 * r18 * y3 + r27 * r91 - r33 * y0 - r37 * x3 + r43 * x3 - r73 * y0 - r88 * y1 + r92 * y2 - r93 * y0 - 9 * r94 - r95 * y0 - r96 * y0 - r97 * y1 - 18 * r98 + r99 * x1 * y3 ) / 9240 - y0 * ( r12 * r56 + r12 * r80 + r32 * x3 + 45 * r67 + 14 * r68 + 126 * r71 + r74 + r85 * r91 + 135 * r9 * x1 + r92 * x2 ) / 9240 ) self.momentXY += ( -r103 * r12 / 18480 - r12 * r51 / 8 - 3 * r14 * y2 / 44 + 3 * r18 * (r105 + r2 * y1 + 18 * r46 + 15 * r48 + 7 * r51) / 6160 + r20 * ( 1260 * r106 + r107 * y1 + r108 + 28 * r109 + r110 + r111 + r112 + 30 * r46 + 2310 * r55 + r66 ) / 18480 - r54 * (7 * r12 + 18 * r85 + 15 * r9) / 18480 - r55 * (r33 + r73 + r93 + r95 + r96 + r97) / 18480 - r7 * (42 * r13 + r82 * x3 + 28 * r87 + r89 + r90) / 18480 - 3 * r85 * (r48 - r66) / 220 + 3 * r9 * y3 * (r62 + 2 * y2) / 440 + x0 * ( -r1 * y0 - 84 * r106 * x2 + r109 * r56 + 54 * r114 + r117 * y1 + 15 * r118 + 21 * r119 + 81 * r120 + r121 * r46 + 54 * r122 + 60 * r123 + r124 - r21 * x3 * y0 + r23 * y3 - r54 * x3 - r55 * r72 - r55 * r76 - r55 * r77 + r57 * y0 * y3 + r60 * x3 + 84 * r81 * y0 + 189 * r81 * y1 ) / 9240 + x1 * ( r104 * r27 - r105 * x3 - r113 * r53 + 63 * r114 + r115 - r16 * r53 + 28 * r47 + r51 * r80 ) / 3080 - y0 * ( 54 * r101 + r102 + r116 * r5 + r117 * x3 + 21 * r13 - r19 * y3 + r22 * y3 + r78 * x3 + 189 * r83 * x2 + 60 * r86 + 81 * r9 * y1 + 15 * r94 + 54 * r98 ) / 9240 ) self.momentYY += ( -r103 * r116 / 9240 - r125 * r70 / 9240 - r126 * x3 / 12 - 3 * r127 * (r26 + r38) / 3080 - r128 * (r26 + r30 + x3) / 660 - r4 * (r112 * x3 + r115 - 14 * r119 + 84 * r47) / 9240 - r52 * r69 / 9240 - r54 * (r58 + r61 + r75) / 9240 - r55 * (r100 * y1 + r121 * y2 + r26 * y3 + r79 * y2 + r84 + 210 * x2 * y1) / 9240 + x0 * ( r108 * y1 + r110 * y0 + r111 * y0 + r112 * y0 + 45 * r125 + 14 * r126 + 126 * r127 + 770 * r128 + 42 * r129 + r130 + r131 * y2 + r132 * r64 + 135 * r48 * y1 + 630 * r55 * y1 + 126 * r55 * y2 + 14 * r55 * y3 + r63 * y3 + r65 * y3 + r66 * y0 ) / 9240 + x1 * ( 27 * r125 + 42 * r126 + 70 * r129 + r130 + r39 * r53 + r44 * r48 + 27 * r53 * y2 + 54 * r64 * y2 ) / 3080 + 3 * x2 * y3 * (r48 + r66 + r8 * y3) / 220 - y0 * ( r100 * r46 + 18 * r114 - 9 * r118 - 27 * r120 - 18 * r122 - 30 * r123 + r124 + r131 * x2 + r132 * x3 * y1 + 162 * r42 * y1 + r50 + 63 * r53 * x3 + r64 * r99 ) / 9240 ) if __name__ == "__main__": from fontTools.misc.symfont import x, y, printGreenPen printGreenPen( "MomentsPen", [ ("area", 1), ("momentX", x), ("momentY", y), ("momentXX", x**2), ("momentXY", x * y), ("momentYY", y**2), ], ) PKaZZZs],iifontTools/pens/perimeterPen.py# -*- coding: utf-8 -*- """Calculate the perimeter of a glyph.""" from fontTools.pens.basePen import BasePen from fontTools.misc.bezierTools import ( approximateQuadraticArcLengthC, calcQuadraticArcLengthC, approximateCubicArcLengthC, calcCubicArcLengthC, ) import math __all__ = ["PerimeterPen"] def _distance(p0, p1): return math.hypot(p0[0] - p1[0], p0[1] - p1[1]) class PerimeterPen(BasePen): def __init__(self, glyphset=None, tolerance=0.005): BasePen.__init__(self, glyphset) self.value = 0 self.tolerance = tolerance # Choose which algorithm to use for quadratic and for cubic. # Quadrature is faster but has fixed error characteristic with no strong # error bound. The cutoff points are derived empirically. self._addCubic = ( self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive ) self._addQuadratic = ( self._addQuadraticQuadrature if tolerance >= 0.00075 else self._addQuadraticExact ) def _moveTo(self, p0): self.__startPoint = p0 def _closePath(self): p0 = self._getCurrentPoint() if p0 != self.__startPoint: self._lineTo(self.__startPoint) def _lineTo(self, p1): p0 = self._getCurrentPoint() self.value += _distance(p0, p1) def _addQuadraticExact(self, c0, c1, c2): self.value += calcQuadraticArcLengthC(c0, c1, c2) def _addQuadraticQuadrature(self, c0, c1, c2): self.value += approximateQuadraticArcLengthC(c0, c1, c2) def _qCurveToOne(self, p1, p2): p0 = self._getCurrentPoint() self._addQuadratic(complex(*p0), complex(*p1), complex(*p2)) def _addCubicRecursive(self, c0, c1, c2, c3): self.value += calcCubicArcLengthC(c0, c1, c2, c3, self.tolerance) def _addCubicQuadrature(self, c0, c1, c2, c3): self.value += approximateCubicArcLengthC(c0, c1, c2, c3) def _curveToOne(self, p1, p2, p3): p0 = self._getCurrentPoint() self._addCubic(complex(*p0), complex(*p1), complex(*p2), complex(*p3)) PKaZZZ�%���� fontTools/pens/pointInsidePen.py"""fontTools.pens.pointInsidePen -- Pen implementing "point inside" testing for shapes. """ from fontTools.pens.basePen import BasePen from fontTools.misc.bezierTools import solveQuadratic, solveCubic __all__ = ["PointInsidePen"] class PointInsidePen(BasePen): """This pen implements "point inside" testing: to test whether a given point lies inside the shape (black) or outside (white). Instances of this class can be recycled, as long as the setTestPoint() method is used to set the new point to test. Typical usage: pen = PointInsidePen(glyphSet, (100, 200)) outline.draw(pen) isInside = pen.getResult() Both the even-odd algorithm and the non-zero-winding-rule algorithm are implemented. The latter is the default, specify True for the evenOdd argument of __init__ or setTestPoint to use the even-odd algorithm. """ # This class implements the classical "shoot a ray from the test point # to infinity and count how many times it intersects the outline" (as well # as the non-zero variant, where the counter is incremented if the outline # intersects the ray in one direction and decremented if it intersects in # the other direction). # I found an amazingly clear explanation of the subtleties involved in # implementing this correctly for polygons here: # http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html # I extended the principles outlined on that page to curves. def __init__(self, glyphSet, testPoint, evenOdd=False): BasePen.__init__(self, glyphSet) self.setTestPoint(testPoint, evenOdd) def setTestPoint(self, testPoint, evenOdd=False): """Set the point to test. Call this _before_ the outline gets drawn.""" self.testPoint = testPoint self.evenOdd = evenOdd self.firstPoint = None self.intersectionCount = 0 def getWinding(self): if self.firstPoint is not None: # always make sure the sub paths are closed; the algorithm only works # for closed paths. self.closePath() return self.intersectionCount def getResult(self): """After the shape has been drawn, getResult() returns True if the test point lies within the (black) shape, and False if it doesn't. """ winding = self.getWinding() if self.evenOdd: result = winding % 2 else: # non-zero result = self.intersectionCount != 0 return not not result def _addIntersection(self, goingUp): if self.evenOdd or goingUp: self.intersectionCount += 1 else: self.intersectionCount -= 1 def _moveTo(self, point): if self.firstPoint is not None: # always make sure the sub paths are closed; the algorithm only works # for closed paths. self.closePath() self.firstPoint = point def _lineTo(self, point): x, y = self.testPoint x1, y1 = self._getCurrentPoint() x2, y2 = point if x1 < x and x2 < x: return if y1 < y and y2 < y: return if y1 >= y and y2 >= y: return dx = x2 - x1 dy = y2 - y1 t = (y - y1) / dy ix = dx * t + x1 if ix < x: return self._addIntersection(y2 > y1) def _curveToOne(self, bcp1, bcp2, point): x, y = self.testPoint x1, y1 = self._getCurrentPoint() x2, y2 = bcp1 x3, y3 = bcp2 x4, y4 = point if x1 < x and x2 < x and x3 < x and x4 < x: return if y1 < y and y2 < y and y3 < y and y4 < y: return if y1 >= y and y2 >= y and y3 >= y and y4 >= y: return dy = y1 cy = (y2 - dy) * 3.0 by = (y3 - y2) * 3.0 - cy ay = y4 - dy - cy - by solutions = sorted(solveCubic(ay, by, cy, dy - y)) solutions = [t for t in solutions if -0.0 <= t <= 1.0] if not solutions: return dx = x1 cx = (x2 - dx) * 3.0 bx = (x3 - x2) * 3.0 - cx ax = x4 - dx - cx - bx above = y1 >= y lastT = None for t in solutions: if t == lastT: continue lastT = t t2 = t * t t3 = t2 * t direction = 3 * ay * t2 + 2 * by * t + cy incomingGoingUp = outgoingGoingUp = direction > 0.0 if direction == 0.0: direction = 6 * ay * t + 2 * by outgoingGoingUp = direction > 0.0 incomingGoingUp = not outgoingGoingUp if direction == 0.0: direction = ay incomingGoingUp = outgoingGoingUp = direction > 0.0 xt = ax * t3 + bx * t2 + cx * t + dx if xt < x: continue if t in (0.0, -0.0): if not outgoingGoingUp: self._addIntersection(outgoingGoingUp) elif t == 1.0: if incomingGoingUp: self._addIntersection(incomingGoingUp) else: if incomingGoingUp == outgoingGoingUp: self._addIntersection(outgoingGoingUp) # else: # we're not really intersecting, merely touching def _qCurveToOne_unfinished(self, bcp, point): # XXX need to finish this, for now doing it through a cubic # (BasePen implements _qCurveTo in terms of a cubic) will # have to do. x, y = self.testPoint x1, y1 = self._getCurrentPoint() x2, y2 = bcp x3, y3 = point c = y1 b = (y2 - c) * 2.0 a = y3 - c - b solutions = sorted(solveQuadratic(a, b, c - y)) solutions = [ t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON ] if not solutions: return # XXX def _closePath(self): if self._getCurrentPoint() != self.firstPoint: self.lineTo(self.firstPoint) self.firstPoint = None def _endPath(self): """Insideness is not defined for open contours.""" raise NotImplementedError PKaZZZ9k~WWfontTools/pens/pointPen.py""" ========= PointPens ========= Where **SegmentPens** have an intuitive approach to drawing (if you're familiar with postscript anyway), the **PointPen** is geared towards accessing all the data in the contours of the glyph. A PointPen has a very simple interface, it just steps through all the points in a call from glyph.drawPoints(). This allows the caller to provide more data for each point. For instance, whether or not a point is smooth, and its name. """ import math from typing import Any, Optional, Tuple, Dict from fontTools.misc.loggingTools import LogMixin from fontTools.pens.basePen import AbstractPen, MissingComponentError, PenError from fontTools.misc.transform import DecomposedTransform, Identity __all__ = [ "AbstractPointPen", "BasePointToSegmentPen", "PointToSegmentPen", "SegmentToPointPen", "GuessSmoothPointPen", "ReverseContourPointPen", ] class AbstractPointPen: """Baseclass for all PointPens.""" def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None: """Start a new sub path.""" raise NotImplementedError def endPath(self) -> None: """End the current sub path.""" raise NotImplementedError def addPoint( self, pt: Tuple[float, float], segmentType: Optional[str] = None, smooth: bool = False, name: Optional[str] = None, identifier: Optional[str] = None, **kwargs: Any, ) -> None: """Add a point to the current sub path.""" raise NotImplementedError def addComponent( self, baseGlyphName: str, transformation: Tuple[float, float, float, float, float, float], identifier: Optional[str] = None, **kwargs: Any, ) -> None: """Add a sub glyph.""" raise NotImplementedError def addVarComponent( self, glyphName: str, transformation: DecomposedTransform, location: Dict[str, float], identifier: Optional[str] = None, **kwargs: Any, ) -> None: """Add a VarComponent sub glyph. The 'transformation' argument must be a DecomposedTransform from the fontTools.misc.transform module, and the 'location' argument must be a dictionary mapping axis tags to their locations. """ # ttGlyphSet decomposes for us raise AttributeError class BasePointToSegmentPen(AbstractPointPen): """ Base class for retrieving the outline in a segment-oriented way. The PointPen protocol is simple yet also a little tricky, so when you need an outline presented as segments but you have as points, do use this base implementation as it properly takes care of all the edge cases. """ def __init__(self): self.currentPath = None def beginPath(self, identifier=None, **kwargs): if self.currentPath is not None: raise PenError("Path already begun.") self.currentPath = [] def _flushContour(self, segments): """Override this method. It will be called for each non-empty sub path with a list of segments: the 'segments' argument. The segments list contains tuples of length 2: (segmentType, points) segmentType is one of "move", "line", "curve" or "qcurve". "move" may only occur as the first segment, and it signifies an OPEN path. A CLOSED path does NOT start with a "move", in fact it will not contain a "move" at ALL. The 'points' field in the 2-tuple is a list of point info tuples. The list has 1 or more items, a point tuple has four items: (point, smooth, name, kwargs) 'point' is an (x, y) coordinate pair. For a closed path, the initial moveTo point is defined as the last point of the last segment. The 'points' list of "move" and "line" segments always contains exactly one point tuple. """ raise NotImplementedError def endPath(self): if self.currentPath is None: raise PenError("Path not begun.") points = self.currentPath self.currentPath = None if not points: return if len(points) == 1: # Not much more we can do than output a single move segment. pt, segmentType, smooth, name, kwargs = points[0] segments = [("move", [(pt, smooth, name, kwargs)])] self._flushContour(segments) return segments = [] if points[0][1] == "move": # It's an open contour, insert a "move" segment for the first # point and remove that first point from the point list. pt, segmentType, smooth, name, kwargs = points[0] segments.append(("move", [(pt, smooth, name, kwargs)])) points.pop(0) else: # It's a closed contour. Locate the first on-curve point, and # rotate the point list so that it _ends_ with an on-curve # point. firstOnCurve = None for i in range(len(points)): segmentType = points[i][1] if segmentType is not None: firstOnCurve = i break if firstOnCurve is None: # Special case for quadratics: a contour with no on-curve # points. Add a "None" point. (See also the Pen protocol's # qCurveTo() method and fontTools.pens.basePen.py.) points.append((None, "qcurve", None, None, None)) else: points = points[firstOnCurve + 1 :] + points[: firstOnCurve + 1] currentSegment = [] for pt, segmentType, smooth, name, kwargs in points: currentSegment.append((pt, smooth, name, kwargs)) if segmentType is None: continue segments.append((segmentType, currentSegment)) currentSegment = [] self._flushContour(segments) def addPoint( self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs ): if self.currentPath is None: raise PenError("Path not begun") self.currentPath.append((pt, segmentType, smooth, name, kwargs)) class PointToSegmentPen(BasePointToSegmentPen): """ Adapter class that converts the PointPen protocol to the (Segment)Pen protocol. NOTE: The segment pen does not support and will drop point names, identifiers and kwargs. """ def __init__(self, segmentPen, outputImpliedClosingLine=False): BasePointToSegmentPen.__init__(self) self.pen = segmentPen self.outputImpliedClosingLine = outputImpliedClosingLine def _flushContour(self, segments): if not segments: raise PenError("Must have at least one segment.") pen = self.pen if segments[0][0] == "move": # It's an open path. closed = False points = segments[0][1] if len(points) != 1: raise PenError(f"Illegal move segment point count: {len(points)}") movePt, _, _, _ = points[0] del segments[0] else: # It's a closed path, do a moveTo to the last # point of the last segment. closed = True segmentType, points = segments[-1] movePt, _, _, _ = points[-1] if movePt is None: # quad special case: a contour with no on-curve points contains # one "qcurve" segment that ends with a point that's None. We # must not output a moveTo() in that case. pass else: pen.moveTo(movePt) outputImpliedClosingLine = self.outputImpliedClosingLine nSegments = len(segments) lastPt = movePt for i in range(nSegments): segmentType, points = segments[i] points = [pt for pt, _, _, _ in points] if segmentType == "line": if len(points) != 1: raise PenError(f"Illegal line segment point count: {len(points)}") pt = points[0] # For closed contours, a 'lineTo' is always implied from the last oncurve # point to the starting point, thus we can omit it when the last and # starting point don't overlap. # However, when the last oncurve point is a "line" segment and has same # coordinates as the starting point of a closed contour, we need to output # the closing 'lineTo' explicitly (regardless of the value of the # 'outputImpliedClosingLine' option) in order to disambiguate this case from # the implied closing 'lineTo', otherwise the duplicate point would be lost. # See https://github.com/googlefonts/fontmake/issues/572. if ( i + 1 != nSegments or outputImpliedClosingLine or not closed or pt == lastPt ): pen.lineTo(pt) lastPt = pt elif segmentType == "curve": pen.curveTo(*points) lastPt = points[-1] elif segmentType == "qcurve": pen.qCurveTo(*points) lastPt = points[-1] else: raise PenError(f"Illegal segmentType: {segmentType}") if closed: pen.closePath() else: pen.endPath() def addComponent(self, glyphName, transform, identifier=None, **kwargs): del identifier # unused del kwargs # unused self.pen.addComponent(glyphName, transform) class SegmentToPointPen(AbstractPen): """ Adapter class that converts the (Segment)Pen protocol to the PointPen protocol. """ def __init__(self, pointPen, guessSmooth=True): if guessSmooth: self.pen = GuessSmoothPointPen(pointPen) else: self.pen = pointPen self.contour = None def _flushContour(self): pen = self.pen pen.beginPath() for pt, segmentType in self.contour: pen.addPoint(pt, segmentType=segmentType) pen.endPath() def moveTo(self, pt): self.contour = [] self.contour.append((pt, "move")) def lineTo(self, pt): if self.contour is None: raise PenError("Contour missing required initial moveTo") self.contour.append((pt, "line")) def curveTo(self, *pts): if not pts: raise TypeError("Must pass in at least one point") if self.contour is None: raise PenError("Contour missing required initial moveTo") for pt in pts[:-1]: self.contour.append((pt, None)) self.contour.append((pts[-1], "curve")) def qCurveTo(self, *pts): if not pts: raise TypeError("Must pass in at least one point") if pts[-1] is None: self.contour = [] else: if self.contour is None: raise PenError("Contour missing required initial moveTo") for pt in pts[:-1]: self.contour.append((pt, None)) if pts[-1] is not None: self.contour.append((pts[-1], "qcurve")) def closePath(self): if self.contour is None: raise PenError("Contour missing required initial moveTo") if len(self.contour) > 1 and self.contour[0][0] == self.contour[-1][0]: self.contour[0] = self.contour[-1] del self.contour[-1] else: # There's an implied line at the end, replace "move" with "line" # for the first point pt, tp = self.contour[0] if tp == "move": self.contour[0] = pt, "line" self._flushContour() self.contour = None def endPath(self): if self.contour is None: raise PenError("Contour missing required initial moveTo") self._flushContour() self.contour = None def addComponent(self, glyphName, transform): if self.contour is not None: raise PenError("Components must be added before or after contours") self.pen.addComponent(glyphName, transform) class GuessSmoothPointPen(AbstractPointPen): """ Filtering PointPen that tries to determine whether an on-curve point should be "smooth", ie. that it's a "tangent" point or a "curve" point. """ def __init__(self, outPen, error=0.05): self._outPen = outPen self._error = error self._points = None def _flushContour(self): if self._points is None: raise PenError("Path not begun") points = self._points nPoints = len(points) if not nPoints: return if points[0][1] == "move": # Open path. indices = range(1, nPoints - 1) elif nPoints > 1: # Closed path. To avoid having to mod the contour index, we # simply abuse Python's negative index feature, and start at -1 indices = range(-1, nPoints - 1) else: # closed path containing 1 point (!), ignore. indices = [] for i in indices: pt, segmentType, _, name, kwargs = points[i] if segmentType is None: continue prev = i - 1 next = i + 1 if points[prev][1] is not None and points[next][1] is not None: continue # At least one of our neighbors is an off-curve point pt = points[i][0] prevPt = points[prev][0] nextPt = points[next][0] if pt != prevPt and pt != nextPt: dx1, dy1 = pt[0] - prevPt[0], pt[1] - prevPt[1] dx2, dy2 = nextPt[0] - pt[0], nextPt[1] - pt[1] a1 = math.atan2(dy1, dx1) a2 = math.atan2(dy2, dx2) if abs(a1 - a2) < self._error: points[i] = pt, segmentType, True, name, kwargs for pt, segmentType, smooth, name, kwargs in points: self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs) def beginPath(self, identifier=None, **kwargs): if self._points is not None: raise PenError("Path already begun") self._points = [] if identifier is not None: kwargs["identifier"] = identifier self._outPen.beginPath(**kwargs) def endPath(self): self._flushContour() self._outPen.endPath() self._points = None def addPoint( self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs ): if self._points is None: raise PenError("Path not begun") if identifier is not None: kwargs["identifier"] = identifier self._points.append((pt, segmentType, False, name, kwargs)) def addComponent(self, glyphName, transformation, identifier=None, **kwargs): if self._points is not None: raise PenError("Components must be added before or after contours") if identifier is not None: kwargs["identifier"] = identifier self._outPen.addComponent(glyphName, transformation, **kwargs) def addVarComponent( self, glyphName, transformation, location, identifier=None, **kwargs ): if self._points is not None: raise PenError("VarComponents must be added before or after contours") if identifier is not None: kwargs["identifier"] = identifier self._outPen.addVarComponent(glyphName, transformation, location, **kwargs) class ReverseContourPointPen(AbstractPointPen): """ This is a PointPen that passes outline data to another PointPen, but reversing the winding direction of all contours. Components are simply passed through unchanged. Closed contours are reversed in such a way that the first point remains the first point. """ def __init__(self, outputPointPen): self.pen = outputPointPen # a place to store the points for the current sub path self.currentContour = None def _flushContour(self): pen = self.pen contour = self.currentContour if not contour: pen.beginPath(identifier=self.currentContourIdentifier) pen.endPath() return closed = contour[0][1] != "move" if not closed: lastSegmentType = "move" else: # Remove the first point and insert it at the end. When # the list of points gets reversed, this point will then # again be at the start. In other words, the following # will hold: # for N in range(len(originalContour)): # originalContour[N] == reversedContour[-N] contour.append(contour.pop(0)) # Find the first on-curve point. firstOnCurve = None for i in range(len(contour)): if contour[i][1] is not None: firstOnCurve = i break if firstOnCurve is None: # There are no on-curve points, be basically have to # do nothing but contour.reverse(). lastSegmentType = None else: lastSegmentType = contour[firstOnCurve][1] contour.reverse() if not closed: # Open paths must start with a move, so we simply dump # all off-curve points leading up to the first on-curve. while contour[0][1] is None: contour.pop(0) pen.beginPath(identifier=self.currentContourIdentifier) for pt, nextSegmentType, smooth, name, kwargs in contour: if nextSegmentType is not None: segmentType = lastSegmentType lastSegmentType = nextSegmentType else: segmentType = None pen.addPoint( pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs ) pen.endPath() def beginPath(self, identifier=None, **kwargs): if self.currentContour is not None: raise PenError("Path already begun") self.currentContour = [] self.currentContourIdentifier = identifier self.onCurve = [] def endPath(self): if self.currentContour is None: raise PenError("Path not begun") self._flushContour() self.currentContour = None def addPoint( self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs ): if self.currentContour is None: raise PenError("Path not begun") if identifier is not None: kwargs["identifier"] = identifier self.currentContour.append((pt, segmentType, smooth, name, kwargs)) def addComponent(self, glyphName, transform, identifier=None, **kwargs): if self.currentContour is not None: raise PenError("Components must be added before or after contours") self.pen.addComponent(glyphName, transform, identifier=identifier, **kwargs) class DecomposingPointPen(LogMixin, AbstractPointPen): """Implements a 'addComponent' method that decomposes components (i.e. draws them onto self as simple contours). It can also be used as a mixin class (e.g. see DecomposingRecordingPointPen). You must override beginPath, addPoint, endPath. You may additionally override addVarComponent and addComponent. By default a warning message is logged when a base glyph is missing; set the class variable ``skipMissingComponents`` to False if you want all instances of a sub-class to raise a :class:`MissingComponentError` exception by default. """ skipMissingComponents = True # alias error for convenience MissingComponentError = MissingComponentError def __init__( self, glyphSet, *args, skipMissingComponents=None, reverseFlipped=False, **kwargs, ): """Takes a 'glyphSet' argument (dict), in which the glyphs that are referenced as components are looked up by their name. If the optional 'reverseFlipped' argument is True, components whose transformation matrix has a negative determinant will be decomposed with a reversed path direction to compensate for the flip. The optional 'skipMissingComponents' argument can be set to True/False to override the homonymous class attribute for a given pen instance. """ super().__init__(*args, **kwargs) self.glyphSet = glyphSet self.skipMissingComponents = ( self.__class__.skipMissingComponents if skipMissingComponents is None else skipMissingComponents ) self.reverseFlipped = reverseFlipped def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs): """Transform the points of the base glyph and draw it onto self. The `identifier` parameter and any extra kwargs are ignored. """ from fontTools.pens.transformPen import TransformPointPen try: glyph = self.glyphSet[baseGlyphName] except KeyError: if not self.skipMissingComponents: raise MissingComponentError(baseGlyphName) self.log.warning( "glyph '%s' is missing from glyphSet; skipped" % baseGlyphName ) else: pen = self if transformation != Identity: pen = TransformPointPen(pen, transformation) if self.reverseFlipped: # if the transformation has a negative determinant, it will # reverse the contour direction of the component a, b, c, d = transformation[:4] det = a * d - b * c if a * d - b * c < 0: pen = ReverseContourPointPen(pen) glyph.drawPoints(pen) PKaZZZJ3�zzfontTools/pens/qtPen.pyfrom fontTools.pens.basePen import BasePen __all__ = ["QtPen"] class QtPen(BasePen): def __init__(self, glyphSet, path=None): BasePen.__init__(self, glyphSet) if path is None: from PyQt5.QtGui import QPainterPath path = QPainterPath() self.path = path def _moveTo(self, p): self.path.moveTo(*p) def _lineTo(self, p): self.path.lineTo(*p) def _curveToOne(self, p1, p2, p3): self.path.cubicTo(*p1, *p2, *p3) def _qCurveToOne(self, p1, p2): self.path.quadTo(*p1, *p2) def _closePath(self): self.path.closeSubpath() PKaZZZ�4)���fontTools/pens/qu2cuPen.py# Copyright 2016 Google Inc. All Rights Reserved. # Copyright 2023 Behdad Esfahbod. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from fontTools.qu2cu import quadratic_to_curves from fontTools.pens.filterPen import ContourFilterPen from fontTools.pens.reverseContourPen import ReverseContourPen import math class Qu2CuPen(ContourFilterPen): """A filter pen to convert quadratic bezier splines to cubic curves using the FontTools SegmentPen protocol. Args: other_pen: another SegmentPen used to draw the transformed outline. max_err: maximum approximation error in font units. For optimal results, if you know the UPEM of the font, we recommend setting this to a value equal, or close to UPEM / 1000. reverse_direction: flip the contours' direction but keep starting point. stats: a dictionary counting the point numbers of cubic segments. """ def __init__( self, other_pen, max_err, all_cubic=False, reverse_direction=False, stats=None, ): if reverse_direction: other_pen = ReverseContourPen(other_pen) super().__init__(other_pen) self.all_cubic = all_cubic self.max_err = max_err self.stats = stats def _quadratics_to_curve(self, q): curves = quadratic_to_curves(q, self.max_err, all_cubic=self.all_cubic) if self.stats is not None: for curve in curves: n = str(len(curve) - 2) self.stats[n] = self.stats.get(n, 0) + 1 for curve in curves: if len(curve) == 4: yield ("curveTo", curve[1:]) else: yield ("qCurveTo", curve[1:]) def filterContour(self, contour): quadratics = [] currentPt = None newContour = [] for op, args in contour: if op == "qCurveTo" and ( self.all_cubic or (len(args) > 2 and args[-1] is not None) ): if args[-1] is None: raise NotImplementedError( "oncurve-less contours with all_cubic not implemented" ) quadratics.append((currentPt,) + args) else: if quadratics: newContour.extend(self._quadratics_to_curve(quadratics)) quadratics = [] newContour.append((op, args)) currentPt = args[-1] if args else None if quadratics: newContour.extend(self._quadratics_to_curve(quadratics)) if not self.all_cubic: # Add back implicit oncurve points contour = newContour newContour = [] for op, args in contour: if op == "qCurveTo" and newContour and newContour[-1][0] == "qCurveTo": pt0 = newContour[-1][1][-2] pt1 = newContour[-1][1][-1] pt2 = args[0] if ( pt1 is not None and math.isclose(pt2[0] - pt1[0], pt1[0] - pt0[0]) and math.isclose(pt2[1] - pt1[1], pt1[1] - pt0[1]) ): newArgs = newContour[-1][1][:-1] + args newContour[-1] = (op, newArgs) continue newContour.append((op, args)) return newContour PKaZZZ"��fontTools/pens/quartzPen.pyfrom fontTools.pens.basePen import BasePen from Quartz.CoreGraphics import CGPathCreateMutable, CGPathMoveToPoint from Quartz.CoreGraphics import CGPathAddLineToPoint, CGPathAddCurveToPoint from Quartz.CoreGraphics import CGPathAddQuadCurveToPoint, CGPathCloseSubpath __all__ = ["QuartzPen"] class QuartzPen(BasePen): """A pen that creates a CGPath Parameters - path: an optional CGPath to add to - xform: an optional CGAffineTransform to apply to the path """ def __init__(self, glyphSet, path=None, xform=None): BasePen.__init__(self, glyphSet) if path is None: path = CGPathCreateMutable() self.path = path self.xform = xform def _moveTo(self, pt): x, y = pt CGPathMoveToPoint(self.path, self.xform, x, y) def _lineTo(self, pt): x, y = pt CGPathAddLineToPoint(self.path, self.xform, x, y) def _curveToOne(self, p1, p2, p3): (x1, y1), (x2, y2), (x3, y3) = p1, p2, p3 CGPathAddCurveToPoint(self.path, self.xform, x1, y1, x2, y2, x3, y3) def _qCurveToOne(self, p1, p2): (x1, y1), (x2, y2) = p1, p2 CGPathAddQuadCurveToPoint(self.path, self.xform, x1, y1, x2, y2) def _closePath(self): CGPathCloseSubpath(self.path) PKaZZZ�ө�.�.fontTools/pens/recordingPen.py"""Pen recording operations that can be accessed or replayed.""" from fontTools.pens.basePen import AbstractPen, DecomposingPen from fontTools.pens.pointPen import AbstractPointPen, DecomposingPointPen __all__ = [ "replayRecording", "RecordingPen", "DecomposingRecordingPen", "DecomposingRecordingPointPen", "RecordingPointPen", "lerpRecordings", ] def replayRecording(recording, pen): """Replay a recording, as produced by RecordingPen or DecomposingRecordingPen, to a pen. Note that recording does not have to be produced by those pens. It can be any iterable of tuples of method name and tuple-of-arguments. Likewise, pen can be any objects receiving those method calls. """ for operator, operands in recording: getattr(pen, operator)(*operands) class RecordingPen(AbstractPen): """Pen recording operations that can be accessed or replayed. The recording can be accessed as pen.value; or replayed using pen.replay(otherPen). :Example: from fontTools.ttLib import TTFont from fontTools.pens.recordingPen import RecordingPen glyph_name = 'dollar' font_path = 'MyFont.otf' font = TTFont(font_path) glyphset = font.getGlyphSet() glyph = glyphset[glyph_name] pen = RecordingPen() glyph.draw(pen) print(pen.value) """ def __init__(self): self.value = [] def moveTo(self, p0): self.value.append(("moveTo", (p0,))) def lineTo(self, p1): self.value.append(("lineTo", (p1,))) def qCurveTo(self, *points): self.value.append(("qCurveTo", points)) def curveTo(self, *points): self.value.append(("curveTo", points)) def closePath(self): self.value.append(("closePath", ())) def endPath(self): self.value.append(("endPath", ())) def addComponent(self, glyphName, transformation): self.value.append(("addComponent", (glyphName, transformation))) def addVarComponent(self, glyphName, transformation, location): self.value.append(("addVarComponent", (glyphName, transformation, location))) def replay(self, pen): replayRecording(self.value, pen) draw = replay class DecomposingRecordingPen(DecomposingPen, RecordingPen): """Same as RecordingPen, except that it doesn't keep components as references, but draws them decomposed as regular contours. The constructor takes a required 'glyphSet' positional argument, a dictionary of glyph objects (i.e. with a 'draw' method) keyed by thir name; other arguments are forwarded to the DecomposingPen's constructor:: >>> class SimpleGlyph(object): ... def draw(self, pen): ... pen.moveTo((0, 0)) ... pen.curveTo((1, 1), (2, 2), (3, 3)) ... pen.closePath() >>> class CompositeGlyph(object): ... def draw(self, pen): ... pen.addComponent('a', (1, 0, 0, 1, -1, 1)) >>> class MissingComponent(object): ... def draw(self, pen): ... pen.addComponent('foobar', (1, 0, 0, 1, 0, 0)) >>> class FlippedComponent(object): ... def draw(self, pen): ... pen.addComponent('a', (-1, 0, 0, 1, 0, 0)) >>> glyphSet = { ... 'a': SimpleGlyph(), ... 'b': CompositeGlyph(), ... 'c': MissingComponent(), ... 'd': FlippedComponent(), ... } >>> for name, glyph in sorted(glyphSet.items()): ... pen = DecomposingRecordingPen(glyphSet) ... try: ... glyph.draw(pen) ... except pen.MissingComponentError: ... pass ... print("{}: {}".format(name, pen.value)) a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())] b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())] c: [] d: [('moveTo', ((0, 0),)), ('curveTo', ((-1, 1), (-2, 2), (-3, 3))), ('closePath', ())] >>> for name, glyph in sorted(glyphSet.items()): ... pen = DecomposingRecordingPen( ... glyphSet, skipMissingComponents=True, reverseFlipped=True, ... ) ... glyph.draw(pen) ... print("{}: {}".format(name, pen.value)) a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())] b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())] c: [] d: [('moveTo', ((0, 0),)), ('lineTo', ((-3, 3),)), ('curveTo', ((-2, 2), (-1, 1), (0, 0))), ('closePath', ())] """ # raises MissingComponentError(KeyError) if base glyph is not found in glyphSet skipMissingComponents = False class RecordingPointPen(AbstractPointPen): """PointPen recording operations that can be accessed or replayed. The recording can be accessed as pen.value; or replayed using pointPen.replay(otherPointPen). :Example: from defcon import Font from fontTools.pens.recordingPen import RecordingPointPen glyph_name = 'a' font_path = 'MyFont.ufo' font = Font(font_path) glyph = font[glyph_name] pen = RecordingPointPen() glyph.drawPoints(pen) print(pen.value) new_glyph = font.newGlyph('b') pen.replay(new_glyph.getPointPen()) """ def __init__(self): self.value = [] def beginPath(self, identifier=None, **kwargs): if identifier is not None: kwargs["identifier"] = identifier self.value.append(("beginPath", (), kwargs)) def endPath(self): self.value.append(("endPath", (), {})) def addPoint( self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs ): if identifier is not None: kwargs["identifier"] = identifier self.value.append(("addPoint", (pt, segmentType, smooth, name), kwargs)) def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs): if identifier is not None: kwargs["identifier"] = identifier self.value.append(("addComponent", (baseGlyphName, transformation), kwargs)) def addVarComponent( self, baseGlyphName, transformation, location, identifier=None, **kwargs ): if identifier is not None: kwargs["identifier"] = identifier self.value.append( ("addVarComponent", (baseGlyphName, transformation, location), kwargs) ) def replay(self, pointPen): for operator, args, kwargs in self.value: getattr(pointPen, operator)(*args, **kwargs) drawPoints = replay class DecomposingRecordingPointPen(DecomposingPointPen, RecordingPointPen): """Same as RecordingPointPen, except that it doesn't keep components as references, but draws them decomposed as regular contours. The constructor takes a required 'glyphSet' positional argument, a dictionary of pointPen-drawable glyph objects (i.e. with a 'drawPoints' method) keyed by thir name; other arguments are forwarded to the DecomposingPointPen's constructor:: >>> from pprint import pprint >>> class SimpleGlyph(object): ... def drawPoints(self, pen): ... pen.beginPath() ... pen.addPoint((0, 0), "line") ... pen.addPoint((1, 1)) ... pen.addPoint((2, 2)) ... pen.addPoint((3, 3), "curve") ... pen.endPath() >>> class CompositeGlyph(object): ... def drawPoints(self, pen): ... pen.addComponent('a', (1, 0, 0, 1, -1, 1)) >>> class MissingComponent(object): ... def drawPoints(self, pen): ... pen.addComponent('foobar', (1, 0, 0, 1, 0, 0)) >>> class FlippedComponent(object): ... def drawPoints(self, pen): ... pen.addComponent('a', (-1, 0, 0, 1, 0, 0)) >>> glyphSet = { ... 'a': SimpleGlyph(), ... 'b': CompositeGlyph(), ... 'c': MissingComponent(), ... 'd': FlippedComponent(), ... } >>> for name, glyph in sorted(glyphSet.items()): ... pen = DecomposingRecordingPointPen(glyphSet) ... try: ... glyph.drawPoints(pen) ... except pen.MissingComponentError: ... pass ... pprint({name: pen.value}) {'a': [('beginPath', (), {}), ('addPoint', ((0, 0), 'line', False, None), {}), ('addPoint', ((1, 1), None, False, None), {}), ('addPoint', ((2, 2), None, False, None), {}), ('addPoint', ((3, 3), 'curve', False, None), {}), ('endPath', (), {})]} {'b': [('beginPath', (), {}), ('addPoint', ((-1, 1), 'line', False, None), {}), ('addPoint', ((0, 2), None, False, None), {}), ('addPoint', ((1, 3), None, False, None), {}), ('addPoint', ((2, 4), 'curve', False, None), {}), ('endPath', (), {})]} {'c': []} {'d': [('beginPath', (), {}), ('addPoint', ((0, 0), 'line', False, None), {}), ('addPoint', ((-1, 1), None, False, None), {}), ('addPoint', ((-2, 2), None, False, None), {}), ('addPoint', ((-3, 3), 'curve', False, None), {}), ('endPath', (), {})]} >>> for name, glyph in sorted(glyphSet.items()): ... pen = DecomposingRecordingPointPen( ... glyphSet, skipMissingComponents=True, reverseFlipped=True, ... ) ... glyph.drawPoints(pen) ... pprint({name: pen.value}) {'a': [('beginPath', (), {}), ('addPoint', ((0, 0), 'line', False, None), {}), ('addPoint', ((1, 1), None, False, None), {}), ('addPoint', ((2, 2), None, False, None), {}), ('addPoint', ((3, 3), 'curve', False, None), {}), ('endPath', (), {})]} {'b': [('beginPath', (), {}), ('addPoint', ((-1, 1), 'line', False, None), {}), ('addPoint', ((0, 2), None, False, None), {}), ('addPoint', ((1, 3), None, False, None), {}), ('addPoint', ((2, 4), 'curve', False, None), {}), ('endPath', (), {})]} {'c': []} {'d': [('beginPath', (), {}), ('addPoint', ((0, 0), 'curve', False, None), {}), ('addPoint', ((-3, 3), 'line', False, None), {}), ('addPoint', ((-2, 2), None, False, None), {}), ('addPoint', ((-1, 1), None, False, None), {}), ('endPath', (), {})]} """ # raises MissingComponentError(KeyError) if base glyph is not found in glyphSet skipMissingComponents = False def lerpRecordings(recording1, recording2, factor=0.5): """Linearly interpolate between two recordings. The recordings must be decomposed, i.e. they must not contain any components. Factor is typically between 0 and 1. 0 means the first recording, 1 means the second recording, and 0.5 means the average of the two recordings. Other values are possible, and can be useful to extrapolate. Defaults to 0.5. Returns a generator with the new recording. """ if len(recording1) != len(recording2): raise ValueError( "Mismatched lengths: %d and %d" % (len(recording1), len(recording2)) ) for (op1, args1), (op2, args2) in zip(recording1, recording2): if op1 != op2: raise ValueError("Mismatched operations: %s, %s" % (op1, op2)) if op1 == "addComponent": raise ValueError("Cannot interpolate components") else: mid_args = [ (x1 + (x2 - x1) * factor, y1 + (y2 - y1) * factor) for (x1, y1), (x2, y2) in zip(args1, args2) ] yield (op1, mid_args) if __name__ == "__main__": pen = RecordingPen() pen.moveTo((0, 0)) pen.lineTo((0, 100)) pen.curveTo((50, 75), (60, 50), (50, 25)) pen.closePath() from pprint import pprint pprint(pen.value) PKaZZZ� �/fontTools/pens/reportLabPen.pyfrom fontTools.pens.basePen import BasePen from reportlab.graphics.shapes import Path __all__ = ["ReportLabPen"] class ReportLabPen(BasePen): """A pen for drawing onto a ``reportlab.graphics.shapes.Path`` object.""" def __init__(self, glyphSet, path=None): BasePen.__init__(self, glyphSet) if path is None: path = Path() self.path = path def _moveTo(self, p): (x, y) = p self.path.moveTo(x, y) def _lineTo(self, p): (x, y) = p self.path.lineTo(x, y) def _curveToOne(self, p1, p2, p3): (x1, y1) = p1 (x2, y2) = p2 (x3, y3) = p3 self.path.curveTo(x1, y1, x2, y2, x3, y3) def _closePath(self): self.path.closePath() if __name__ == "__main__": import sys if len(sys.argv) < 3: print( "Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]" ) print( " If no image file name is created, by default <glyphname>.png is created." ) print(" example: reportLabPen.py Arial.TTF R test.png") print( " (The file format will be PNG, regardless of the image file name supplied)" ) sys.exit(0) from fontTools.ttLib import TTFont from reportlab.lib import colors path = sys.argv[1] glyphName = sys.argv[2] if len(sys.argv) > 3: imageFile = sys.argv[3] else: imageFile = "%s.png" % glyphName font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font gs = font.getGlyphSet() pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5)) g = gs[glyphName] g.draw(pen) w, h = g.width, 1000 from reportlab.graphics import renderPM from reportlab.graphics.shapes import Group, Drawing, scale # Everything is wrapped in a group to allow transformations. g = Group(pen.path) g.translate(0, 200) g.scale(0.3, 0.3) d = Drawing(w, h) d.add(g) renderPM.drawToFile(d, imageFile, fmt="PNG") PKaZZZ���{��#fontTools/pens/reverseContourPen.pyfrom fontTools.misc.arrayTools import pairwise from fontTools.pens.filterPen import ContourFilterPen __all__ = ["reversedContour", "ReverseContourPen"] class ReverseContourPen(ContourFilterPen): """Filter pen that passes outline data to another pen, but reversing the winding direction of all contours. Components are simply passed through unchanged. Closed contours are reversed in such a way that the first point remains the first point. """ def __init__(self, outPen, outputImpliedClosingLine=False): super().__init__(outPen) self.outputImpliedClosingLine = outputImpliedClosingLine def filterContour(self, contour): return reversedContour(contour, self.outputImpliedClosingLine) def reversedContour(contour, outputImpliedClosingLine=False): """Generator that takes a list of pen's (operator, operands) tuples, and yields them with the winding direction reversed. """ if not contour: return # nothing to do, stop iteration # valid contours must have at least a starting and ending command, # can't have one without the other assert len(contour) > 1, "invalid contour" # the type of the last command determines if the contour is closed contourType = contour.pop()[0] assert contourType in ("endPath", "closePath") closed = contourType == "closePath" firstType, firstPts = contour.pop(0) assert firstType in ("moveTo", "qCurveTo"), ( "invalid initial segment type: %r" % firstType ) firstOnCurve = firstPts[-1] if firstType == "qCurveTo": # special case for TrueType paths contaning only off-curve points assert firstOnCurve is None, "off-curve only paths must end with 'None'" assert not contour, "only one qCurveTo allowed per off-curve path" firstPts = (firstPts[0],) + tuple(reversed(firstPts[1:-1])) + (None,) if not contour: # contour contains only one segment, nothing to reverse if firstType == "moveTo": closed = False # single-point paths can't be closed else: closed = True # off-curve paths are closed by definition yield firstType, firstPts else: lastType, lastPts = contour[-1] lastOnCurve = lastPts[-1] if closed: # for closed paths, we keep the starting point yield firstType, firstPts if firstOnCurve != lastOnCurve: # emit an implied line between the last and first points yield "lineTo", (lastOnCurve,) contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,)) if len(contour) > 1: secondType, secondPts = contour[0] else: # contour has only two points, the second and last are the same secondType, secondPts = lastType, lastPts if not outputImpliedClosingLine: # if a lineTo follows the initial moveTo, after reversing it # will be implied by the closePath, so we don't emit one; # unless the lineTo and moveTo overlap, in which case we keep the # duplicate points if secondType == "lineTo" and firstPts != secondPts: del contour[0] if contour: contour[-1] = (lastType, tuple(lastPts[:-1]) + secondPts) else: # for open paths, the last point will become the first yield firstType, (lastOnCurve,) contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,)) # we iterate over all segment pairs in reverse order, and yield # each one with the off-curve points reversed (if any), and # with the on-curve point of the following segment for (curType, curPts), (_, nextPts) in pairwise(contour, reverse=True): yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],) yield "closePath" if closed else "endPath", () PKaZZZ��K�))fontTools/pens/roundingPen.pyfrom fontTools.misc.roundTools import noRound, otRound from fontTools.misc.transform import Transform from fontTools.pens.filterPen import FilterPen, FilterPointPen __all__ = ["RoundingPen", "RoundingPointPen"] class RoundingPen(FilterPen): """ Filter pen that rounds point coordinates and component XY offsets to integer. For rounding the component transform values, a separate round function can be passed to the pen. >>> from fontTools.pens.recordingPen import RecordingPen >>> recpen = RecordingPen() >>> roundpen = RoundingPen(recpen) >>> roundpen.moveTo((0.4, 0.6)) >>> roundpen.lineTo((1.6, 2.5)) >>> roundpen.qCurveTo((2.4, 4.6), (3.3, 5.7), (4.9, 6.1)) >>> roundpen.curveTo((6.4, 8.6), (7.3, 9.7), (8.9, 10.1)) >>> roundpen.addComponent("a", (1.5, 0, 0, 1.5, 10.5, -10.5)) >>> recpen.value == [ ... ('moveTo', ((0, 1),)), ... ('lineTo', ((2, 3),)), ... ('qCurveTo', ((2, 5), (3, 6), (5, 6))), ... ('curveTo', ((6, 9), (7, 10), (9, 10))), ... ('addComponent', ('a', (1.5, 0, 0, 1.5, 11, -10))), ... ] True """ def __init__(self, outPen, roundFunc=otRound, transformRoundFunc=noRound): super().__init__(outPen) self.roundFunc = roundFunc self.transformRoundFunc = transformRoundFunc def moveTo(self, pt): self._outPen.moveTo((self.roundFunc(pt[0]), self.roundFunc(pt[1]))) def lineTo(self, pt): self._outPen.lineTo((self.roundFunc(pt[0]), self.roundFunc(pt[1]))) def curveTo(self, *points): self._outPen.curveTo( *((self.roundFunc(x), self.roundFunc(y)) for x, y in points) ) def qCurveTo(self, *points): self._outPen.qCurveTo( *((self.roundFunc(x), self.roundFunc(y)) for x, y in points) ) def addComponent(self, glyphName, transformation): xx, xy, yx, yy, dx, dy = transformation self._outPen.addComponent( glyphName, Transform( self.transformRoundFunc(xx), self.transformRoundFunc(xy), self.transformRoundFunc(yx), self.transformRoundFunc(yy), self.roundFunc(dx), self.roundFunc(dy), ), ) class RoundingPointPen(FilterPointPen): """ Filter point pen that rounds point coordinates and component XY offsets to integer. For rounding the component scale values, a separate round function can be passed to the pen. >>> from fontTools.pens.recordingPen import RecordingPointPen >>> recpen = RecordingPointPen() >>> roundpen = RoundingPointPen(recpen) >>> roundpen.beginPath() >>> roundpen.addPoint((0.4, 0.6), 'line') >>> roundpen.addPoint((1.6, 2.5), 'line') >>> roundpen.addPoint((2.4, 4.6)) >>> roundpen.addPoint((3.3, 5.7)) >>> roundpen.addPoint((4.9, 6.1), 'qcurve') >>> roundpen.endPath() >>> roundpen.addComponent("a", (1.5, 0, 0, 1.5, 10.5, -10.5)) >>> recpen.value == [ ... ('beginPath', (), {}), ... ('addPoint', ((0, 1), 'line', False, None), {}), ... ('addPoint', ((2, 3), 'line', False, None), {}), ... ('addPoint', ((2, 5), None, False, None), {}), ... ('addPoint', ((3, 6), None, False, None), {}), ... ('addPoint', ((5, 6), 'qcurve', False, None), {}), ... ('endPath', (), {}), ... ('addComponent', ('a', (1.5, 0, 0, 1.5, 11, -10)), {}), ... ] True """ def __init__(self, outPen, roundFunc=otRound, transformRoundFunc=noRound): super().__init__(outPen) self.roundFunc = roundFunc self.transformRoundFunc = transformRoundFunc def addPoint( self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs ): self._outPen.addPoint( (self.roundFunc(pt[0]), self.roundFunc(pt[1])), segmentType=segmentType, smooth=smooth, name=name, identifier=identifier, **kwargs, ) def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs): xx, xy, yx, yy, dx, dy = transformation self._outPen.addComponent( baseGlyphName=baseGlyphName, transformation=Transform( self.transformRoundFunc(xx), self.transformRoundFunc(xy), self.transformRoundFunc(yx), self.transformRoundFunc(yy), self.roundFunc(dx), self.roundFunc(dy), ), identifier=identifier, **kwargs, ) PKaZZZOcĩ%�%fontTools/pens/statisticsPen.py"""Pen calculating area, center of mass, variance and standard-deviation, covariance and correlation, and slant, of glyph shapes.""" from math import sqrt, degrees, atan from fontTools.pens.basePen import BasePen, OpenContourError from fontTools.pens.momentsPen import MomentsPen __all__ = ["StatisticsPen", "StatisticsControlPen"] class StatisticsBase: def __init__(self): self._zero() def _zero(self): self.area = 0 self.meanX = 0 self.meanY = 0 self.varianceX = 0 self.varianceY = 0 self.stddevX = 0 self.stddevY = 0 self.covariance = 0 self.correlation = 0 self.slant = 0 def _update(self): # XXX The variance formulas should never produce a negative value, # but due to reasons I don't understand, both of our pens do. # So we take the absolute value here. self.varianceX = abs(self.varianceX) self.varianceY = abs(self.varianceY) self.stddevX = stddevX = sqrt(self.varianceX) self.stddevY = stddevY = sqrt(self.varianceY) # Correlation(X,Y) = Covariance(X,Y) / ( stddev(X) * stddev(Y) ) # https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient if stddevX * stddevY == 0: correlation = float("NaN") else: # XXX The above formula should never produce a value outside # the range [-1, 1], but due to reasons I don't understand, # (probably the same issue as above), it does. So we clamp. correlation = self.covariance / (stddevX * stddevY) correlation = max(-1, min(1, correlation)) self.correlation = correlation if abs(correlation) > 1e-3 else 0 slant = ( self.covariance / self.varianceY if self.varianceY != 0 else float("NaN") ) self.slant = slant if abs(slant) > 1e-3 else 0 class StatisticsPen(StatisticsBase, MomentsPen): """Pen calculating area, center of mass, variance and standard-deviation, covariance and correlation, and slant, of glyph shapes. Note that if the glyph shape is self-intersecting, the values are not correct (but well-defined). Moreover, area will be negative if contour directions are clockwise.""" def __init__(self, glyphset=None): MomentsPen.__init__(self, glyphset=glyphset) StatisticsBase.__init__(self) def _closePath(self): MomentsPen._closePath(self) self._update() def _update(self): area = self.area if not area: self._zero() return # Center of mass # https://en.wikipedia.org/wiki/Center_of_mass#A_continuous_volume self.meanX = meanX = self.momentX / area self.meanY = meanY = self.momentY / area # Var(X) = E[X^2] - E[X]^2 self.varianceX = self.momentXX / area - meanX * meanX self.varianceY = self.momentYY / area - meanY * meanY # Covariance(X,Y) = (E[X.Y] - E[X]E[Y]) self.covariance = self.momentXY / area - meanX * meanY StatisticsBase._update(self) class StatisticsControlPen(StatisticsBase, BasePen): """Pen calculating area, center of mass, variance and standard-deviation, covariance and correlation, and slant, of glyph shapes, using the control polygon only. Note that if the glyph shape is self-intersecting, the values are not correct (but well-defined). Moreover, area will be negative if contour directions are clockwise.""" def __init__(self, glyphset=None): BasePen.__init__(self, glyphset) StatisticsBase.__init__(self) self._nodes = [] def _moveTo(self, pt): self._nodes.append(complex(*pt)) def _lineTo(self, pt): self._nodes.append(complex(*pt)) def _qCurveToOne(self, pt1, pt2): for pt in (pt1, pt2): self._nodes.append(complex(*pt)) def _curveToOne(self, pt1, pt2, pt3): for pt in (pt1, pt2, pt3): self._nodes.append(complex(*pt)) def _closePath(self): self._update() def _endPath(self): p0 = self._getCurrentPoint() if p0 != self.__startPoint: raise OpenContourError("Glyph statistics not defined on open contours.") def _update(self): nodes = self._nodes n = len(nodes) # Triangle formula self.area = ( sum( (p0.real * p1.imag - p1.real * p0.imag) for p0, p1 in zip(nodes, nodes[1:] + nodes[:1]) ) / 2 ) # Center of mass # https://en.wikipedia.org/wiki/Center_of_mass#A_system_of_particles sumNodes = sum(nodes) self.meanX = meanX = sumNodes.real / n self.meanY = meanY = sumNodes.imag / n if n > 1: # Var(X) = (sum[X^2] - sum[X]^2 / n) / (n - 1) # https://www.statisticshowto.com/probability-and-statistics/descriptive-statistics/sample-variance/ self.varianceX = varianceX = ( sum(p.real * p.real for p in nodes) - (sumNodes.real * sumNodes.real) / n ) / (n - 1) self.varianceY = varianceY = ( sum(p.imag * p.imag for p in nodes) - (sumNodes.imag * sumNodes.imag) / n ) / (n - 1) # Covariance(X,Y) = (sum[X.Y] - sum[X].sum[Y] / n) / (n - 1) self.covariance = covariance = ( sum(p.real * p.imag for p in nodes) - (sumNodes.real * sumNodes.imag) / n ) / (n - 1) else: self.varianceX = varianceX = 0 self.varianceY = varianceY = 0 self.covariance = covariance = 0 StatisticsBase._update(self) def _test(glyphset, upem, glyphs, quiet=False, *, control=False): from fontTools.pens.transformPen import TransformPen from fontTools.misc.transform import Scale wght_sum = 0 wght_sum_perceptual = 0 wdth_sum = 0 slnt_sum = 0 slnt_sum_perceptual = 0 for glyph_name in glyphs: glyph = glyphset[glyph_name] if control: pen = StatisticsControlPen(glyphset=glyphset) else: pen = StatisticsPen(glyphset=glyphset) transformer = TransformPen(pen, Scale(1.0 / upem)) glyph.draw(transformer) area = abs(pen.area) width = glyph.width wght_sum += area wght_sum_perceptual += pen.area * width wdth_sum += width slnt_sum += pen.slant slnt_sum_perceptual += pen.slant * width if quiet: continue print() print("glyph:", glyph_name) for item in [ "area", "momentX", "momentY", "momentXX", "momentYY", "momentXY", "meanX", "meanY", "varianceX", "varianceY", "stddevX", "stddevY", "covariance", "correlation", "slant", ]: print("%s: %g" % (item, getattr(pen, item))) if not quiet: print() print("font:") print("weight: %g" % (wght_sum * upem / wdth_sum)) print("weight (perceptual): %g" % (wght_sum_perceptual / wdth_sum)) print("width: %g" % (wdth_sum / upem / len(glyphs))) slant = slnt_sum / len(glyphs) print("slant: %g" % slant) print("slant angle: %g" % -degrees(atan(slant))) slant_perceptual = slnt_sum_perceptual / wdth_sum print("slant (perceptual): %g" % slant_perceptual) print("slant (perceptual) angle: %g" % -degrees(atan(slant_perceptual))) def main(args): """Report font glyph shape geometricsl statistics""" if args is None: import sys args = sys.argv[1:] import argparse parser = argparse.ArgumentParser( "fonttools pens.statisticsPen", description="Report font glyph shape geometricsl statistics", ) parser.add_argument("font", metavar="font.ttf", help="Font file.") parser.add_argument("glyphs", metavar="glyph-name", help="Glyph names.", nargs="*") parser.add_argument( "-y", metavar="<number>", help="Face index into a collection to open. Zero based.", ) parser.add_argument( "-c", "--control", action="store_true", help="Use the control-box pen instead of the Green therem.", ) parser.add_argument( "-q", "--quiet", action="store_true", help="Only report font-wide statistics." ) parser.add_argument( "--variations", metavar="AXIS=LOC", default="", help="List of space separated locations. A location consist in " "the name of a variation axis, followed by '=' and a number. E.g.: " "wght=700 wdth=80. The default is the location of the base master.", ) options = parser.parse_args(args) glyphs = options.glyphs fontNumber = int(options.y) if options.y is not None else 0 location = {} for tag_v in options.variations.split(): fields = tag_v.split("=") tag = fields[0].strip() v = int(fields[1]) location[tag] = v from fontTools.ttLib import TTFont font = TTFont(options.font, fontNumber=fontNumber) if not glyphs: glyphs = font.getGlyphOrder() _test( font.getGlyphSet(location=location), font["head"].unitsPerEm, glyphs, quiet=options.quiet, control=options.control, ) if __name__ == "__main__": import sys main(sys.argv[1:]) PKaZZZ��h(!(!fontTools/pens/svgPathPen.pyfrom typing import Callable from fontTools.pens.basePen import BasePen def pointToString(pt, ntos=str): return " ".join(ntos(i) for i in pt) class SVGPathPen(BasePen): """Pen to draw SVG path d commands. Example:: >>> pen = SVGPathPen(None) >>> pen.moveTo((0, 0)) >>> pen.lineTo((1, 1)) >>> pen.curveTo((2, 2), (3, 3), (4, 4)) >>> pen.closePath() >>> pen.getCommands() 'M0 0 1 1C2 2 3 3 4 4Z' Args: glyphSet: a dictionary of drawable glyph objects keyed by name used to resolve component references in composite glyphs. ntos: a callable that takes a number and returns a string, to customize how numbers are formatted (default: str). Note: Fonts have a coordinate system where Y grows up, whereas in SVG, Y grows down. As such, rendering path data from this pen in SVG typically results in upside-down glyphs. You can fix this by wrapping the data from this pen in an SVG group element with transform, or wrap this pen in a transform pen. For example: spen = svgPathPen.SVGPathPen(glyphset) pen= TransformPen(spen , (1, 0, 0, -1, 0, 0)) glyphset[glyphname].draw(pen) print(tpen.getCommands()) """ def __init__(self, glyphSet, ntos: Callable[[float], str] = str): BasePen.__init__(self, glyphSet) self._commands = [] self._lastCommand = None self._lastX = None self._lastY = None self._ntos = ntos def _handleAnchor(self): """ >>> pen = SVGPathPen(None) >>> pen.moveTo((0, 0)) >>> pen.moveTo((10, 10)) >>> pen._commands ['M10 10'] """ if self._lastCommand == "M": self._commands.pop(-1) def _moveTo(self, pt): """ >>> pen = SVGPathPen(None) >>> pen.moveTo((0, 0)) >>> pen._commands ['M0 0'] >>> pen = SVGPathPen(None) >>> pen.moveTo((10, 0)) >>> pen._commands ['M10 0'] >>> pen = SVGPathPen(None) >>> pen.moveTo((0, 10)) >>> pen._commands ['M0 10'] """ self._handleAnchor() t = "M%s" % (pointToString(pt, self._ntos)) self._commands.append(t) self._lastCommand = "M" self._lastX, self._lastY = pt def _lineTo(self, pt): """ # duplicate point >>> pen = SVGPathPen(None) >>> pen.moveTo((10, 10)) >>> pen.lineTo((10, 10)) >>> pen._commands ['M10 10'] # vertical line >>> pen = SVGPathPen(None) >>> pen.moveTo((10, 10)) >>> pen.lineTo((10, 0)) >>> pen._commands ['M10 10', 'V0'] # horizontal line >>> pen = SVGPathPen(None) >>> pen.moveTo((10, 10)) >>> pen.lineTo((0, 10)) >>> pen._commands ['M10 10', 'H0'] # basic >>> pen = SVGPathPen(None) >>> pen.lineTo((70, 80)) >>> pen._commands ['L70 80'] # basic following a moveto >>> pen = SVGPathPen(None) >>> pen.moveTo((0, 0)) >>> pen.lineTo((10, 10)) >>> pen._commands ['M0 0', ' 10 10'] """ x, y = pt # duplicate point if x == self._lastX and y == self._lastY: return # vertical line elif x == self._lastX: cmd = "V" pts = self._ntos(y) # horizontal line elif y == self._lastY: cmd = "H" pts = self._ntos(x) # previous was a moveto elif self._lastCommand == "M": cmd = None pts = " " + pointToString(pt, self._ntos) # basic else: cmd = "L" pts = pointToString(pt, self._ntos) # write the string t = "" if cmd: t += cmd self._lastCommand = cmd t += pts self._commands.append(t) # store for future reference self._lastX, self._lastY = pt def _curveToOne(self, pt1, pt2, pt3): """ >>> pen = SVGPathPen(None) >>> pen.curveTo((10, 20), (30, 40), (50, 60)) >>> pen._commands ['C10 20 30 40 50 60'] """ t = "C" t += pointToString(pt1, self._ntos) + " " t += pointToString(pt2, self._ntos) + " " t += pointToString(pt3, self._ntos) self._commands.append(t) self._lastCommand = "C" self._lastX, self._lastY = pt3 def _qCurveToOne(self, pt1, pt2): """ >>> pen = SVGPathPen(None) >>> pen.qCurveTo((10, 20), (30, 40)) >>> pen._commands ['Q10 20 30 40'] >>> from fontTools.misc.roundTools import otRound >>> pen = SVGPathPen(None, ntos=lambda v: str(otRound(v))) >>> pen.qCurveTo((3, 3), (7, 5), (11, 4)) >>> pen._commands ['Q3 3 5 4', 'Q7 5 11 4'] """ assert pt2 is not None t = "Q" t += pointToString(pt1, self._ntos) + " " t += pointToString(pt2, self._ntos) self._commands.append(t) self._lastCommand = "Q" self._lastX, self._lastY = pt2 def _closePath(self): """ >>> pen = SVGPathPen(None) >>> pen.closePath() >>> pen._commands ['Z'] """ self._commands.append("Z") self._lastCommand = "Z" self._lastX = self._lastY = None def _endPath(self): """ >>> pen = SVGPathPen(None) >>> pen.endPath() >>> pen._commands [] """ self._lastCommand = None self._lastX = self._lastY = None def getCommands(self): return "".join(self._commands) def main(args=None): """Generate per-character SVG from font and text""" if args is None: import sys args = sys.argv[1:] from fontTools.ttLib import TTFont import argparse parser = argparse.ArgumentParser( "fonttools pens.svgPathPen", description="Generate SVG from text" ) parser.add_argument("font", metavar="font.ttf", help="Font file.") parser.add_argument("text", metavar="text", nargs="?", help="Text string.") parser.add_argument( "-y", metavar="<number>", help="Face index into a collection to open. Zero based.", ) parser.add_argument( "--glyphs", metavar="whitespace-separated list of glyph names", type=str, help="Glyphs to show. Exclusive with text option", ) parser.add_argument( "--variations", metavar="AXIS=LOC", default="", help="List of space separated locations. A location consist in " "the name of a variation axis, followed by '=' and a number. E.g.: " "wght=700 wdth=80. The default is the location of the base master.", ) options = parser.parse_args(args) fontNumber = int(options.y) if options.y is not None else 0 font = TTFont(options.font, fontNumber=fontNumber) text = options.text glyphs = options.glyphs location = {} for tag_v in options.variations.split(): fields = tag_v.split("=") tag = fields[0].strip() v = float(fields[1]) location[tag] = v hhea = font["hhea"] ascent, descent = hhea.ascent, hhea.descent glyphset = font.getGlyphSet(location=location) cmap = font["cmap"].getBestCmap() if glyphs is not None and text is not None: raise ValueError("Options --glyphs and --text are exclusive") if glyphs is None: glyphs = " ".join(cmap[ord(u)] for u in text) glyphs = glyphs.split() s = "" width = 0 for g in glyphs: glyph = glyphset[g] pen = SVGPathPen(glyphset) glyph.draw(pen) commands = pen.getCommands() s += '<g transform="translate(%d %d) scale(1 -1)"><path d="%s"/></g>\n' % ( width, ascent, commands, ) width += glyph.width print('<?xml version="1.0" encoding="UTF-8"?>') print( '<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">' % (width, ascent - descent) ) print(s, end="") print("</svg>") if __name__ == "__main__": import sys if len(sys.argv) == 1: import doctest sys.exit(doctest.testmod().failed) sys.exit(main()) PKaZZZ�l�W W !fontTools/pens/t2CharStringPen.py# Copyright (c) 2009 Type Supply LLC # Author: Tal Leming from fontTools.misc.roundTools import otRound, roundFunc from fontTools.misc.psCharStrings import T2CharString from fontTools.pens.basePen import BasePen from fontTools.cffLib.specializer import specializeCommands, commandsToProgram class T2CharStringPen(BasePen): """Pen to draw Type 2 CharStrings. The 'roundTolerance' argument controls the rounding of point coordinates. It is defined as the maximum absolute difference between the original float and the rounded integer value. The default tolerance of 0.5 means that all floats are rounded to integer; a value of 0 disables rounding; values in between will only round floats which are close to their integral part within the tolerated range. """ def __init__(self, width, glyphSet, roundTolerance=0.5, CFF2=False): super(T2CharStringPen, self).__init__(glyphSet) self.round = roundFunc(roundTolerance) self._CFF2 = CFF2 self._width = width self._commands = [] self._p0 = (0, 0) def _p(self, pt): p0 = self._p0 pt = self._p0 = (self.round(pt[0]), self.round(pt[1])) return [pt[0] - p0[0], pt[1] - p0[1]] def _moveTo(self, pt): self._commands.append(("rmoveto", self._p(pt))) def _lineTo(self, pt): self._commands.append(("rlineto", self._p(pt))) def _curveToOne(self, pt1, pt2, pt3): _p = self._p self._commands.append(("rrcurveto", _p(pt1) + _p(pt2) + _p(pt3))) def _closePath(self): pass def _endPath(self): pass def getCharString(self, private=None, globalSubrs=None, optimize=True): commands = self._commands if optimize: maxstack = 48 if not self._CFF2 else 513 commands = specializeCommands( commands, generalizeFirst=False, maxstack=maxstack ) program = commandsToProgram(commands) if self._width is not None: assert ( not self._CFF2 ), "CFF2 does not allow encoding glyph width in CharString." program.insert(0, otRound(self._width)) if not self._CFF2: program.append("endchar") charString = T2CharString( program=program, private=private, globalSubrs=globalSubrs ) return charString PKaZZZ��à  fontTools/pens/teePen.py"""Pen multiplexing drawing to one or more pens.""" from fontTools.pens.basePen import AbstractPen __all__ = ["TeePen"] class TeePen(AbstractPen): """Pen multiplexing drawing to one or more pens. Use either as TeePen(pen1, pen2, ...) or TeePen(iterableOfPens).""" def __init__(self, *pens): if len(pens) == 1: pens = pens[0] self.pens = pens def moveTo(self, p0): for pen in self.pens: pen.moveTo(p0) def lineTo(self, p1): for pen in self.pens: pen.lineTo(p1) def qCurveTo(self, *points): for pen in self.pens: pen.qCurveTo(*points) def curveTo(self, *points): for pen in self.pens: pen.curveTo(*points) def closePath(self): for pen in self.pens: pen.closePath() def endPath(self): for pen in self.pens: pen.endPath() def addComponent(self, glyphName, transformation): for pen in self.pens: pen.addComponent(glyphName, transformation) if __name__ == "__main__": from fontTools.pens.basePen import _TestPen pen = TeePen(_TestPen(), _TestPen()) pen.moveTo((0, 0)) pen.lineTo((0, 100)) pen.curveTo((50, 75), (60, 50), (50, 25)) pen.closePath() PKaZZZ �����fontTools/pens/transformPen.pyfrom fontTools.pens.filterPen import FilterPen, FilterPointPen __all__ = ["TransformPen", "TransformPointPen"] class TransformPen(FilterPen): """Pen that transforms all coordinates using a Affine transformation, and passes them to another pen. """ def __init__(self, outPen, transformation): """The 'outPen' argument is another pen object. It will receive the transformed coordinates. The 'transformation' argument can either be a six-tuple, or a fontTools.misc.transform.Transform object. """ super(TransformPen, self).__init__(outPen) if not hasattr(transformation, "transformPoint"): from fontTools.misc.transform import Transform transformation = Transform(*transformation) self._transformation = transformation self._transformPoint = transformation.transformPoint self._stack = [] def moveTo(self, pt): self._outPen.moveTo(self._transformPoint(pt)) def lineTo(self, pt): self._outPen.lineTo(self._transformPoint(pt)) def curveTo(self, *points): self._outPen.curveTo(*self._transformPoints(points)) def qCurveTo(self, *points): if points[-1] is None: points = self._transformPoints(points[:-1]) + [None] else: points = self._transformPoints(points) self._outPen.qCurveTo(*points) def _transformPoints(self, points): transformPoint = self._transformPoint return [transformPoint(pt) for pt in points] def closePath(self): self._outPen.closePath() def endPath(self): self._outPen.endPath() def addComponent(self, glyphName, transformation): transformation = self._transformation.transform(transformation) self._outPen.addComponent(glyphName, transformation) class TransformPointPen(FilterPointPen): """PointPen that transforms all coordinates using a Affine transformation, and passes them to another PointPen. >>> from fontTools.pens.recordingPen import RecordingPointPen >>> rec = RecordingPointPen() >>> pen = TransformPointPen(rec, (2, 0, 0, 2, -10, 5)) >>> v = iter(rec.value) >>> pen.beginPath(identifier="contour-0") >>> next(v) ('beginPath', (), {'identifier': 'contour-0'}) >>> pen.addPoint((100, 100), "line") >>> next(v) ('addPoint', ((190, 205), 'line', False, None), {}) >>> pen.endPath() >>> next(v) ('endPath', (), {}) >>> pen.addComponent("a", (1, 0, 0, 1, -10, 5), identifier="component-0") >>> next(v) ('addComponent', ('a', <Transform [2 0 0 2 -30 15]>), {'identifier': 'component-0'}) """ def __init__(self, outPointPen, transformation): """The 'outPointPen' argument is another point pen object. It will receive the transformed coordinates. The 'transformation' argument can either be a six-tuple, or a fontTools.misc.transform.Transform object. """ super().__init__(outPointPen) if not hasattr(transformation, "transformPoint"): from fontTools.misc.transform import Transform transformation = Transform(*transformation) self._transformation = transformation self._transformPoint = transformation.transformPoint def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs): self._outPen.addPoint( self._transformPoint(pt), segmentType, smooth, name, **kwargs ) def addComponent(self, baseGlyphName, transformation, **kwargs): transformation = self._transformation.transform(transformation) self._outPen.addComponent(baseGlyphName, transformation, **kwargs) if __name__ == "__main__": from fontTools.pens.basePen import _TestPen pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0)) pen.moveTo((0, 0)) pen.lineTo((0, 100)) pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) pen.closePath() PKaZZZ��}g^.^.fontTools/pens/ttGlyphPen.pyfrom array import array from typing import Any, Callable, Dict, Optional, Tuple from fontTools.misc.fixedTools import MAX_F2DOT14, floatToFixedToFloat from fontTools.misc.loggingTools import LogMixin from fontTools.pens.pointPen import AbstractPointPen from fontTools.misc.roundTools import otRound from fontTools.pens.basePen import LoggingPen, PenError from fontTools.pens.transformPen import TransformPen, TransformPointPen from fontTools.ttLib.tables import ttProgram from fontTools.ttLib.tables._g_l_y_f import flagOnCurve, flagCubic from fontTools.ttLib.tables._g_l_y_f import Glyph from fontTools.ttLib.tables._g_l_y_f import GlyphComponent from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates from fontTools.ttLib.tables._g_l_y_f import dropImpliedOnCurvePoints import math __all__ = ["TTGlyphPen", "TTGlyphPointPen"] class _TTGlyphBasePen: def __init__( self, glyphSet: Optional[Dict[str, Any]], handleOverflowingTransforms: bool = True, ) -> None: """ Construct a new pen. Args: glyphSet (Dict[str, Any]): A glyphset object, used to resolve components. handleOverflowingTransforms (bool): See below. If ``handleOverflowingTransforms`` is True, the components' transform values are checked that they don't overflow the limits of a F2Dot14 number: -2.0 <= v < +2.0. If any transform value exceeds these, the composite glyph is decomposed. An exception to this rule is done for values that are very close to +2.0 (both for consistency with the -2.0 case, and for the relative frequency these occur in real fonts). When almost +2.0 values occur (and all other values are within the range -2.0 <= x <= +2.0), they are clamped to the maximum positive value that can still be encoded as an F2Dot14: i.e. 1.99993896484375. If False, no check is done and all components are translated unmodified into the glyf table, followed by an inevitable ``struct.error`` once an attempt is made to compile them. If both contours and components are present in a glyph, the components are decomposed. """ self.glyphSet = glyphSet self.handleOverflowingTransforms = handleOverflowingTransforms self.init() def _decompose( self, glyphName: str, transformation: Tuple[float, float, float, float, float, float], ): tpen = self.transformPen(self, transformation) getattr(self.glyphSet[glyphName], self.drawMethod)(tpen) def _isClosed(self): """ Check if the current path is closed. """ raise NotImplementedError def init(self) -> None: self.points = [] self.endPts = [] self.types = [] self.components = [] def addComponent( self, baseGlyphName: str, transformation: Tuple[float, float, float, float, float, float], identifier: Optional[str] = None, **kwargs: Any, ) -> None: """ Add a sub glyph. """ self.components.append((baseGlyphName, transformation)) def _buildComponents(self, componentFlags): if self.handleOverflowingTransforms: # we can't encode transform values > 2 or < -2 in F2Dot14, # so we must decompose the glyph if any transform exceeds these overflowing = any( s > 2 or s < -2 for (glyphName, transformation) in self.components for s in transformation[:4] ) components = [] for glyphName, transformation in self.components: if glyphName not in self.glyphSet: self.log.warning(f"skipped non-existing component '{glyphName}'") continue if self.points or (self.handleOverflowingTransforms and overflowing): # can't have both coordinates and components, so decompose self._decompose(glyphName, transformation) continue component = GlyphComponent() component.glyphName = glyphName component.x, component.y = (otRound(v) for v in transformation[4:]) # quantize floats to F2Dot14 so we get same values as when decompiled # from a binary glyf table transformation = tuple( floatToFixedToFloat(v, 14) for v in transformation[:4] ) if transformation != (1, 0, 0, 1): if self.handleOverflowingTransforms and any( MAX_F2DOT14 < s <= 2 for s in transformation ): # clamp values ~= +2.0 so we can keep the component transformation = tuple( MAX_F2DOT14 if MAX_F2DOT14 < s <= 2 else s for s in transformation ) component.transform = (transformation[:2], transformation[2:]) component.flags = componentFlags components.append(component) return components def glyph( self, componentFlags: int = 0x04, dropImpliedOnCurves: bool = False, *, round: Callable[[float], int] = otRound, ) -> Glyph: """ Returns a :py:class:`~._g_l_y_f.Glyph` object representing the glyph. Args: componentFlags: Flags to use for component glyphs. (default: 0x04) dropImpliedOnCurves: Whether to remove implied-oncurve points. (default: False) """ if not self._isClosed(): raise PenError("Didn't close last contour.") components = self._buildComponents(componentFlags) glyph = Glyph() glyph.coordinates = GlyphCoordinates(self.points) glyph.endPtsOfContours = self.endPts glyph.flags = array("B", self.types) self.init() if components: # If both components and contours were present, they have by now # been decomposed by _buildComponents. glyph.components = components glyph.numberOfContours = -1 else: glyph.numberOfContours = len(glyph.endPtsOfContours) glyph.program = ttProgram.Program() glyph.program.fromBytecode(b"") if dropImpliedOnCurves: dropImpliedOnCurvePoints(glyph) glyph.coordinates.toInt(round=round) return glyph class TTGlyphPen(_TTGlyphBasePen, LoggingPen): """ Pen used for drawing to a TrueType glyph. This pen can be used to construct or modify glyphs in a TrueType format font. After using the pen to draw, use the ``.glyph()`` method to retrieve a :py:class:`~._g_l_y_f.Glyph` object representing the glyph. """ drawMethod = "draw" transformPen = TransformPen def __init__( self, glyphSet: Optional[Dict[str, Any]] = None, handleOverflowingTransforms: bool = True, outputImpliedClosingLine: bool = False, ) -> None: super().__init__(glyphSet, handleOverflowingTransforms) self.outputImpliedClosingLine = outputImpliedClosingLine def _addPoint(self, pt: Tuple[float, float], tp: int) -> None: self.points.append(pt) self.types.append(tp) def _popPoint(self) -> None: self.points.pop() self.types.pop() def _isClosed(self) -> bool: return (not self.points) or ( self.endPts and self.endPts[-1] == len(self.points) - 1 ) def lineTo(self, pt: Tuple[float, float]) -> None: self._addPoint(pt, flagOnCurve) def moveTo(self, pt: Tuple[float, float]) -> None: if not self._isClosed(): raise PenError('"move"-type point must begin a new contour.') self._addPoint(pt, flagOnCurve) def curveTo(self, *points) -> None: assert len(points) % 2 == 1 for pt in points[:-1]: self._addPoint(pt, flagCubic) # last point is None if there are no on-curve points if points[-1] is not None: self._addPoint(points[-1], 1) def qCurveTo(self, *points) -> None: assert len(points) >= 1 for pt in points[:-1]: self._addPoint(pt, 0) # last point is None if there are no on-curve points if points[-1] is not None: self._addPoint(points[-1], 1) def closePath(self) -> None: endPt = len(self.points) - 1 # ignore anchors (one-point paths) if endPt == 0 or (self.endPts and endPt == self.endPts[-1] + 1): self._popPoint() return if not self.outputImpliedClosingLine: # if first and last point on this path are the same, remove last startPt = 0 if self.endPts: startPt = self.endPts[-1] + 1 if self.points[startPt] == self.points[endPt]: self._popPoint() endPt -= 1 self.endPts.append(endPt) def endPath(self) -> None: # TrueType contours are always "closed" self.closePath() class TTGlyphPointPen(_TTGlyphBasePen, LogMixin, AbstractPointPen): """ Point pen used for drawing to a TrueType glyph. This pen can be used to construct or modify glyphs in a TrueType format font. After using the pen to draw, use the ``.glyph()`` method to retrieve a :py:class:`~._g_l_y_f.Glyph` object representing the glyph. """ drawMethod = "drawPoints" transformPen = TransformPointPen def init(self) -> None: super().init() self._currentContourStartIndex = None def _isClosed(self) -> bool: return self._currentContourStartIndex is None def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None: """ Start a new sub path. """ if not self._isClosed(): raise PenError("Didn't close previous contour.") self._currentContourStartIndex = len(self.points) def endPath(self) -> None: """ End the current sub path. """ # TrueType contours are always "closed" if self._isClosed(): raise PenError("Contour is already closed.") if self._currentContourStartIndex == len(self.points): # ignore empty contours self._currentContourStartIndex = None return contourStart = self.endPts[-1] + 1 if self.endPts else 0 self.endPts.append(len(self.points) - 1) self._currentContourStartIndex = None # Resolve types for any cubic segments flags = self.types for i in range(contourStart, len(flags)): if flags[i] == "curve": j = i - 1 if j < contourStart: j = len(flags) - 1 while flags[j] == 0: flags[j] = flagCubic j -= 1 flags[i] = flagOnCurve def addPoint( self, pt: Tuple[float, float], segmentType: Optional[str] = None, smooth: bool = False, name: Optional[str] = None, identifier: Optional[str] = None, **kwargs: Any, ) -> None: """ Add a point to the current sub path. """ if self._isClosed(): raise PenError("Can't add a point to a closed contour.") if segmentType is None: self.types.append(0) elif segmentType in ("line", "move"): self.types.append(flagOnCurve) elif segmentType == "qcurve": self.types.append(flagOnCurve) elif segmentType == "curve": self.types.append("curve") else: raise AssertionError(segmentType) self.points.append(pt) PKaZZZ?�/H��fontTools/pens/wxPen.pyfrom fontTools.pens.basePen import BasePen __all__ = ["WxPen"] class WxPen(BasePen): def __init__(self, glyphSet, path=None): BasePen.__init__(self, glyphSet) if path is None: import wx path = wx.GraphicsRenderer.GetDefaultRenderer().CreatePath() self.path = path def _moveTo(self, p): self.path.MoveToPoint(*p) def _lineTo(self, p): self.path.AddLineToPoint(*p) def _curveToOne(self, p1, p2, p3): self.path.AddCurveToPoint(*p1 + p2 + p3) def _qCurveToOne(self, p1, p2): self.path.AddQuadCurveToPoint(*p1 + p2) def _closePath(self): self.path.CloseSubpath() PKaZZZB�9;jjfontTools/qu2cu/__init__.py# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .qu2cu import * PKaZZZ���TTfontTools/qu2cu/__main__.pyimport sys from .cli import main if __name__ == "__main__": sys.exit(main()) PKaZZZ236��fontTools/qu2cu/benchmark.py"""Benchmark the qu2cu algorithm performance.""" from .qu2cu import * from fontTools.cu2qu import curve_to_quadratic import random import timeit MAX_ERR = 0.5 NUM_CURVES = 5 def generate_curves(n): points = [ tuple(float(random.randint(0, 2048)) for coord in range(2)) for point in range(1 + 3 * n) ] curves = [] for i in range(n): curves.append(tuple(points[i * 3 : i * 3 + 4])) return curves def setup_quadratic_to_curves(): curves = generate_curves(NUM_CURVES) quadratics = [curve_to_quadratic(curve, MAX_ERR) for curve in curves] return quadratics, MAX_ERR def run_benchmark(module, function, setup_suffix="", repeat=25, number=1): setup_func = "setup_" + function if setup_suffix: print("%s with %s:" % (function, setup_suffix), end="") setup_func += "_" + setup_suffix else: print("%s:" % function, end="") def wrapper(function, setup_func): function = globals()[function] setup_func = globals()[setup_func] def wrapped(): return function(*setup_func()) return wrapped results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number) print("\t%5.1fus" % (min(results) * 1000000.0 / number)) def main(): """Benchmark the qu2cu algorithm performance.""" run_benchmark("qu2cu", "quadratic_to_curves") if __name__ == "__main__": random.seed(1) main() PKaZZZ[OQ)��fontTools/qu2cu/cli.pyimport os import argparse import logging from fontTools.misc.cliTools import makeOutputFileName from fontTools.ttLib import TTFont from fontTools.pens.qu2cuPen import Qu2CuPen from fontTools.pens.ttGlyphPen import TTGlyphPen import fontTools logger = logging.getLogger("fontTools.qu2cu") def _font_to_cubic(input_path, output_path=None, **kwargs): font = TTFont(input_path) logger.info("Converting curves for %s", input_path) stats = {} if kwargs["dump_stats"] else None qu2cu_kwargs = { "stats": stats, "max_err": kwargs["max_err_em"] * font["head"].unitsPerEm, "all_cubic": kwargs["all_cubic"], } assert "gvar" not in font, "Cannot convert variable font" glyphSet = font.getGlyphSet() glyphOrder = font.getGlyphOrder() glyf = font["glyf"] for glyphName in glyphOrder: glyph = glyphSet[glyphName] ttpen = TTGlyphPen(glyphSet) pen = Qu2CuPen(ttpen, **qu2cu_kwargs) glyph.draw(pen) glyf[glyphName] = ttpen.glyph(dropImpliedOnCurves=True) font["head"].glyphDataFormat = 1 if kwargs["dump_stats"]: logger.info("Stats: %s", stats) logger.info("Saving %s", output_path) font.save(output_path) def main(args=None): """Convert an OpenType font from quadratic to cubic curves""" parser = argparse.ArgumentParser(prog="qu2cu") parser.add_argument("--version", action="version", version=fontTools.__version__) parser.add_argument( "infiles", nargs="+", metavar="INPUT", help="one or more input TTF source file(s).", ) parser.add_argument("-v", "--verbose", action="count", default=0) parser.add_argument( "-e", "--conversion-error", type=float, metavar="ERROR", default=0.001, help="maxiumum approximation error measured in EM (default: 0.001)", ) parser.add_argument( "-c", "--all-cubic", default=False, action="store_true", help="whether to only use cubic curves", ) output_parser = parser.add_mutually_exclusive_group() output_parser.add_argument( "-o", "--output-file", default=None, metavar="OUTPUT", help=("output filename for the converted TTF."), ) output_parser.add_argument( "-d", "--output-dir", default=None, metavar="DIRECTORY", help="output directory where to save converted TTFs", ) options = parser.parse_args(args) if not options.verbose: level = "WARNING" elif options.verbose == 1: level = "INFO" else: level = "DEBUG" logging.basicConfig(level=level) if len(options.infiles) > 1 and options.output_file: parser.error("-o/--output-file can't be used with multile inputs") if options.output_dir: output_dir = options.output_dir if not os.path.exists(output_dir): os.mkdir(output_dir) elif not os.path.isdir(output_dir): parser.error("'%s' is not a directory" % output_dir) output_paths = [ os.path.join(output_dir, os.path.basename(p)) for p in options.infiles ] elif options.output_file: output_paths = [options.output_file] else: output_paths = [ makeOutputFileName(p, overWrite=True, suffix=".cubic") for p in options.infiles ] kwargs = dict( dump_stats=options.verbose > 0, max_err_em=options.conversion_error, all_cubic=options.all_cubic, ) for input_path, output_path in zip(options.infiles, output_paths): _font_to_cubic(input_path, output_path, **kwargs) PKaZZZ��C+00fontTools/qu2cu/qu2cu.py# cython: language_level=3 # distutils: define_macros=CYTHON_TRACE_NOGIL=1 # Copyright 2023 Google Inc. All Rights Reserved. # Copyright 2023 Behdad Esfahbod. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import cython COMPILED = cython.compiled except (AttributeError, ImportError): # if cython not installed, use mock module with no-op decorators and types from fontTools.misc import cython COMPILED = False from fontTools.misc.bezierTools import splitCubicAtTC from collections import namedtuple import math from typing import ( List, Tuple, Union, ) __all__ = ["quadratic_to_curves"] # Copied from cu2qu @cython.cfunc @cython.returns(cython.int) @cython.locals( tolerance=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, ) @cython.locals(mid=cython.complex, deriv3=cython.complex) def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance): """Check if a cubic Bezier lies within a given distance of the origin. "Origin" means *the* origin (0,0), not the start of the curve. Note that no checks are made on the start and end positions of the curve; this function only checks the inside of the curve. Args: p0 (complex): Start point of curve. p1 (complex): First handle of curve. p2 (complex): Second handle of curve. p3 (complex): End point of curve. tolerance (double): Distance from origin. Returns: bool: True if the cubic Bezier ``p`` entirely lies within a distance ``tolerance`` of the origin, False otherwise. """ # First check p2 then p1, as p2 has higher error early on. if abs(p2) <= tolerance and abs(p1) <= tolerance: return True # Split. mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 if abs(mid) > tolerance: return False deriv3 = (p3 + p2 - p1 - p0) * 0.125 return cubic_farthest_fit_inside( p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance ) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance) @cython.locals( p0=cython.complex, p1=cython.complex, p2=cython.complex, p1_2_3=cython.complex, ) def elevate_quadratic(p0, p1, p2): """Given a quadratic bezier curve, return its degree-elevated cubic.""" # https://pomax.github.io/bezierinfo/#reordering p1_2_3 = p1 * (2 / 3) return ( p0, (p0 * (1 / 3) + p1_2_3), (p2 * (1 / 3) + p1_2_3), p2, ) @cython.cfunc @cython.locals( start=cython.int, n=cython.int, k=cython.int, prod_ratio=cython.double, sum_ratio=cython.double, ratio=cython.double, t=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, ) def merge_curves(curves, start, n): """Give a cubic-Bezier spline, reconstruct one cubic-Bezier that has the same endpoints and tangents and approxmates the spline.""" # Reconstruct the t values of the cut segments prod_ratio = 1.0 sum_ratio = 1.0 ts = [1] for k in range(1, n): ck = curves[start + k] c_before = curves[start + k - 1] # |t_(k+1) - t_k| / |t_k - t_(k - 1)| = ratio assert ck[0] == c_before[3] ratio = abs(ck[1] - ck[0]) / abs(c_before[3] - c_before[2]) prod_ratio *= ratio sum_ratio += prod_ratio ts.append(sum_ratio) # (t(n) - t(n - 1)) / (t_(1) - t(0)) = prod_ratio ts = [t / sum_ratio for t in ts[:-1]] p0 = curves[start][0] p1 = curves[start][1] p2 = curves[start + n - 1][2] p3 = curves[start + n - 1][3] # Build the curve by scaling the control-points. p1 = p0 + (p1 - p0) / (ts[0] if ts else 1) p2 = p3 + (p2 - p3) / ((1 - ts[-1]) if ts else 1) curve = (p0, p1, p2, p3) return curve, ts @cython.locals( count=cython.int, num_offcurves=cython.int, i=cython.int, off1=cython.complex, off2=cython.complex, on=cython.complex, ) def add_implicit_on_curves(p): q = list(p) count = 0 num_offcurves = len(p) - 2 for i in range(1, num_offcurves): off1 = p[i] off2 = p[i + 1] on = off1 + (off2 - off1) * 0.5 q.insert(i + 1 + count, on) count += 1 return q Point = Union[Tuple[float, float], complex] @cython.locals( cost=cython.int, is_complex=cython.int, ) def quadratic_to_curves( quads: List[List[Point]], max_err: float = 0.5, all_cubic: bool = False, ) -> List[Tuple[Point, ...]]: """Converts a connecting list of quadratic splines to a list of quadratic and cubic curves. A quadratic spline is specified as a list of points. Either each point is a 2-tuple of X,Y coordinates, or each point is a complex number with real/imaginary components representing X,Y coordinates. The first and last points are on-curve points and the rest are off-curve points, with an implied on-curve point in the middle between every two consequtive off-curve points. Returns: The output is a list of tuples of points. Points are represented in the same format as the input, either as 2-tuples or complex numbers. Each tuple is either of length three, for a quadratic curve, or four, for a cubic curve. Each curve's last point is the same as the next curve's first point. Args: quads: quadratic splines max_err: absolute error tolerance; defaults to 0.5 all_cubic: if True, only cubic curves are generated; defaults to False """ is_complex = type(quads[0][0]) is complex if not is_complex: quads = [[complex(x, y) for (x, y) in p] for p in quads] q = [quads[0][0]] costs = [1] cost = 1 for p in quads: assert q[-1] == p[0] for i in range(len(p) - 2): cost += 1 costs.append(cost) costs.append(cost) qq = add_implicit_on_curves(p)[1:] costs.pop() q.extend(qq) cost += 1 costs.append(cost) curves = spline_to_curves(q, costs, max_err, all_cubic) if not is_complex: curves = [tuple((c.real, c.imag) for c in curve) for curve in curves] return curves Solution = namedtuple("Solution", ["num_points", "error", "start_index", "is_cubic"]) @cython.locals( i=cython.int, j=cython.int, k=cython.int, start=cython.int, i_sol_count=cython.int, j_sol_count=cython.int, this_sol_count=cython.int, tolerance=cython.double, err=cython.double, error=cython.double, i_sol_error=cython.double, j_sol_error=cython.double, all_cubic=cython.int, is_cubic=cython.int, count=cython.int, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, v=cython.complex, u=cython.complex, ) def spline_to_curves(q, costs, tolerance=0.5, all_cubic=False): """ q: quadratic spline with alternating on-curve / off-curve points. costs: cumulative list of encoding cost of q in terms of number of points that need to be encoded. Implied on-curve points do not contribute to the cost. If all points need to be encoded, then costs will be range(1, len(q)+1). """ assert len(q) >= 3, "quadratic spline requires at least 3 points" # Elevate quadratic segments to cubic elevated_quadratics = [ elevate_quadratic(*q[i : i + 3]) for i in range(0, len(q) - 2, 2) ] # Find sharp corners; they have to be oncurves for sure. forced = set() for i in range(1, len(elevated_quadratics)): p0 = elevated_quadratics[i - 1][2] p1 = elevated_quadratics[i][0] p2 = elevated_quadratics[i][1] if abs(p1 - p0) + abs(p2 - p1) > tolerance + abs(p2 - p0): forced.add(i) # Dynamic-Programming to find the solution with fewest number of # cubic curves, and within those the one with smallest error. sols = [Solution(0, 0, 0, False)] impossible = Solution(len(elevated_quadratics) * 3 + 1, 0, 1, False) start = 0 for i in range(1, len(elevated_quadratics) + 1): best_sol = impossible for j in range(start, i): j_sol_count, j_sol_error = sols[j].num_points, sols[j].error if not all_cubic: # Solution with quadratics between j:i this_count = costs[2 * i - 1] - costs[2 * j] + 1 i_sol_count = j_sol_count + this_count i_sol_error = j_sol_error i_sol = Solution(i_sol_count, i_sol_error, i - j, False) if i_sol < best_sol: best_sol = i_sol if this_count <= 3: # Can't get any better than this in the path below continue # Fit elevated_quadratics[j:i] into one cubic try: curve, ts = merge_curves(elevated_quadratics, j, i - j) except ZeroDivisionError: continue # Now reconstruct the segments from the fitted curve reconstructed_iter = splitCubicAtTC(*curve, *ts) reconstructed = [] # Knot errors error = 0 for k, reconst in enumerate(reconstructed_iter): orig = elevated_quadratics[j + k] err = abs(reconst[3] - orig[3]) error = max(error, err) if error > tolerance: break reconstructed.append(reconst) if error > tolerance: # Not feasible continue # Interior errors for k, reconst in enumerate(reconstructed): orig = elevated_quadratics[j + k] p0, p1, p2, p3 = tuple(v - u for v, u in zip(reconst, orig)) if not cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance): error = tolerance + 1 break if error > tolerance: # Not feasible continue # Save best solution i_sol_count = j_sol_count + 3 i_sol_error = max(j_sol_error, error) i_sol = Solution(i_sol_count, i_sol_error, i - j, True) if i_sol < best_sol: best_sol = i_sol if i_sol_count == 3: # Can't get any better than this break sols.append(best_sol) if i in forced: start = i # Reconstruct solution splits = [] cubic = [] i = len(sols) - 1 while i: count, is_cubic = sols[i].start_index, sols[i].is_cubic splits.append(i) cubic.append(is_cubic) i -= count curves = [] j = 0 for i, is_cubic in reversed(list(zip(splits, cubic))): if is_cubic: curves.append(merge_curves(elevated_quadratics, j, i - j)[0]) else: for k in range(j, i): curves.append(q[k * 2 : k * 2 + 3]) j = i return curves def main(): from fontTools.cu2qu.benchmark import generate_curve from fontTools.cu2qu import curve_to_quadratic tolerance = 0.05 reconstruct_tolerance = tolerance * 1 curve = generate_curve() quadratics = curve_to_quadratic(curve, tolerance) print( "cu2qu tolerance %g. qu2cu tolerance %g." % (tolerance, reconstruct_tolerance) ) print("One random cubic turned into %d quadratics." % len(quadratics)) curves = quadratic_to_curves([quadratics], reconstruct_tolerance) print("Those quadratics turned back into %d cubics. " % len(curves)) print("Original curve:", curve) print("Reconstructed curve(s):", curves) if __name__ == "__main__": main() PKaZZZYc����fontTools/subset/__init__.py# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): Behdad Esfahbod from fontTools import config from fontTools.misc.roundTools import otRound from fontTools import ttLib from fontTools.ttLib.tables import otTables from fontTools.ttLib.tables.otBase import USE_HARFBUZZ_REPACKER from fontTools.otlLib.maxContextCalc import maxCtxFont from fontTools.pens.basePen import NullPen from fontTools.misc.loggingTools import Timer from fontTools.misc.cliTools import makeOutputFileName from fontTools.subset.util import _add_method, _uniq_sort from fontTools.subset.cff import * from fontTools.subset.svg import * from fontTools.varLib import varStore # for subset_varidxes from fontTools.ttLib.tables._n_a_m_e import NameRecordVisitor import sys import struct import array import logging from collections import Counter, defaultdict from functools import reduce from types import MethodType __usage__ = "pyftsubset font-file [glyph...] [--option=value]..." __doc__ = ( """\ pyftsubset -- OpenType font subsetter and optimizer pyftsubset is an OpenType font subsetter and optimizer, based on fontTools. It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff) font file. The subsetted glyph set is based on the specified glyphs or characters, and specified OpenType layout features. The tool also performs some size-reducing optimizations, aimed for using subset fonts as webfonts. Individual optimizations can be enabled or disabled, and are enabled by default when they are safe. Usage: """ + __usage__ + """ At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file, --text, --text-file, --unicodes, or --unicodes-file, must be specified. Args: font-file The input font file. glyph Specify one or more glyph identifiers to include in the subset. Must be PS glyph names, or the special string '*' to keep the entire glyph set. Initial glyph set specification ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ These options populate the initial glyph set. Same option can appear multiple times, and the results are accummulated. --gids=<NNN>[,<NNN>...] Specify comma/whitespace-separated list of glyph IDs or ranges as decimal numbers. For example, --gids=10-12,14 adds glyphs with numbers 10, 11, 12, and 14. --gids-file=<path> Like --gids but reads from a file. Anything after a '#' on any line is ignored as comments. --glyphs=<glyphname>[,<glyphname>...] Specify comma/whitespace-separated PS glyph names to add to the subset. Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc that are accepted on the command line. The special string '*' will keep the entire glyph set. --glyphs-file=<path> Like --glyphs but reads from a file. Anything after a '#' on any line is ignored as comments. --text=<text> Specify characters to include in the subset, as UTF-8 string. --text-file=<path> Like --text but reads from a file. Newline character are not added to the subset. --unicodes=<XXXX>[,<XXXX>...] Specify comma/whitespace-separated list of Unicode codepoints or ranges as hex numbers, optionally prefixed with 'U+', 'u', etc. For example, --unicodes=41-5a,61-7a adds ASCII letters, so does the more verbose --unicodes=U+0041-005A,U+0061-007A. The special strings '*' will choose all Unicode characters mapped by the font. --unicodes-file=<path> Like --unicodes, but reads from a file. Anything after a '#' on any line in the file is ignored as comments. --ignore-missing-glyphs Do not fail if some requested glyphs or gids are not available in the font. --no-ignore-missing-glyphs Stop and fail if some requested glyphs or gids are not available in the font. [default] --ignore-missing-unicodes [default] Do not fail if some requested Unicode characters (including those indirectly specified using --text or --text-file) are not available in the font. --no-ignore-missing-unicodes Stop and fail if some requested Unicode characters are not available in the font. Note the default discrepancy between ignoring missing glyphs versus unicodes. This is for historical reasons and in the future --no-ignore-missing-unicodes might become default. Other options ^^^^^^^^^^^^^ For the other options listed below, to see the current value of the option, pass a value of '?' to it, with or without a '='. Examples:: $ pyftsubset --glyph-names? Current setting for 'glyph-names' is: False $ ./pyftsubset --name-IDs=? Current setting for 'name-IDs' is: [0, 1, 2, 3, 4, 5, 6] $ ./pyftsubset --hinting? --no-hinting --hinting? Current setting for 'hinting' is: True Current setting for 'hinting' is: False Output options ^^^^^^^^^^^^^^ --output-file=<path> The output font file. If not specified, the subsetted font will be saved in as font-file.subset. --flavor=<type> Specify flavor of output font file. May be 'woff' or 'woff2'. Note that WOFF2 requires the Brotli Python extension, available at https://github.com/google/brotli --with-zopfli Use the Google Zopfli algorithm to compress WOFF. The output is 3-8 % smaller than pure zlib, but the compression speed is much slower. The Zopfli Python bindings are available at: https://pypi.python.org/pypi/zopfli --harfbuzz-repacker By default, we serialize GPOS/GSUB using the HarfBuzz Repacker when uharfbuzz can be imported and is successful, otherwise fall back to the pure-python serializer. Set the option to force using the HarfBuzz Repacker (raises an error if uharfbuzz can't be found or fails). --no-harfbuzz-repacker Always use the pure-python serializer even if uharfbuzz is available. Glyph set expansion ^^^^^^^^^^^^^^^^^^^ These options control how additional glyphs are added to the subset. --retain-gids Retain glyph indices; just empty glyphs not needed in-place. --notdef-glyph Add the '.notdef' glyph to the subset (ie, keep it). [default] --no-notdef-glyph Drop the '.notdef' glyph unless specified in the glyph set. This saves a few bytes, but is not possible for Postscript-flavored fonts, as those require '.notdef'. For TrueType-flavored fonts, this works fine as long as no unsupported glyphs are requested from the font. --notdef-outline Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is used when glyphs not supported by the font are to be shown. It is not needed otherwise. --no-notdef-outline When including a '.notdef' glyph, remove its outline. This saves a few bytes. [default] --recommended-glyphs Add glyphs 0, 1, 2, and 3 to the subset, as recommended for TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'. Some legacy software might require this, but no modern system does. --no-recommended-glyphs Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in glyph set. [default] --no-layout-closure Do not expand glyph set to add glyphs produced by OpenType layout features. Instead, OpenType layout features will be subset to only rules that are relevant to the otherwise-specified glyph set. --layout-features[+|-]=<feature>[,<feature>...] Specify (=), add to (+=) or exclude from (-=) the comma-separated set of OpenType layout feature tags that will be preserved. Glyph variants used by the preserved features are added to the specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs', 'dnom', 'frac', 'kern', 'liga', 'locl', 'mark', 'mkmk', 'numr', 'rclt', 'rlig', 'rvrn', and all features required for script shaping are preserved. To see the full list, try '--layout-features=?'. Use '*' to keep all features. Multiple --layout-features options can be provided if necessary. Examples: --layout-features+=onum,pnum,ss01 * Keep the default set of features and 'onum', 'pnum', 'ss01'. --layout-features-='mark','mkmk' * Keep the default set of features but drop 'mark' and 'mkmk'. --layout-features='kern' * Only keep the 'kern' feature, drop all others. --layout-features='' * Drop all features. --layout-features='*' * Keep all features. --layout-features+=aalt --layout-features-=vrt2 * Keep default set of features plus 'aalt', but drop 'vrt2'. --layout-scripts[+|-]=<script>[,<script>...] Specify (=), add to (+=) or exclude from (-=) the comma-separated set of OpenType layout script tags that will be preserved. LangSys tags can be appended to script tag, separated by '.', for example: 'arab.dflt,arab.URD,latn.TRK'. By default all scripts are retained ('*'). Hinting options ^^^^^^^^^^^^^^^ --hinting Keep hinting [default] --no-hinting Drop glyph-specific hinting and font-wide hinting tables, as well as remove hinting-related bits and pieces from other tables (eg. GPOS). See --hinting-tables for list of tables that are dropped by default. Instructions and hints are stripped from 'glyf' and 'CFF ' tables respectively. This produces (sometimes up to 30%) smaller fonts that are suitable for extremely high-resolution systems, like high-end mobile devices and retina displays. Optimization options ^^^^^^^^^^^^^^^^^^^^ --desubroutinize Remove CFF use of subroutinizes. Subroutinization is a way to make CFF fonts smaller. For small subsets however, desubroutinizing might make the font smaller. It has even been reported that desubroutinized CFF fonts compress better (produce smaller output) WOFF and WOFF2 fonts. Also see note under --no-hinting. --no-desubroutinize [default] Leave CFF subroutinizes as is, only throw away unused subroutinizes. Font table options ^^^^^^^^^^^^^^^^^^ --drop-tables[+|-]=<table>[,<table>...] Specify (=), add to (+=) or exclude from (-=) the comma-separated set of tables that will be be dropped. By default, the following tables are dropped: 'BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'PCLT', 'LTSH' and Graphite tables: 'Feat', 'Glat', 'Gloc', 'Silf', 'Sill'. The tool will attempt to subset the remaining tables. Examples: --drop-tables-=BASE * Drop the default set of tables but keep 'BASE'. --drop-tables+=GSUB * Drop the default set of tables and 'GSUB'. --drop-tables=DSIG * Only drop the 'DSIG' table, keep all others. --drop-tables= * Keep all tables. --no-subset-tables+=<table>[,<table>...] Add to the set of tables that will not be subsetted. By default, the following tables are included in this list, as they do not need subsetting (ignore the fact that 'loca' is listed here): 'gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2', 'loca', 'name', 'cvt ', 'fpgm', 'prep', 'VMDX', 'DSIG', 'CPAL', 'MVAR', 'cvar', 'STAT'. By default, tables that the tool does not know how to subset and are not specified here will be dropped from the font, unless --passthrough-tables option is passed. Example: --no-subset-tables+=FFTM * Keep 'FFTM' table in the font by preventing subsetting. --passthrough-tables Do not drop tables that the tool does not know how to subset. --no-passthrough-tables Tables that the tool does not know how to subset and are not specified in --no-subset-tables will be dropped from the font. [default] --hinting-tables[-]=<table>[,<table>...] Specify (=), add to (+=) or exclude from (-=) the list of font-wide hinting tables that will be dropped if --no-hinting is specified. Examples: --hinting-tables-=VDMX * Drop font-wide hinting tables except 'VDMX'. --hinting-tables= * Keep all font-wide hinting tables (but strip hints from glyphs). --legacy-kern Keep TrueType 'kern' table even when OpenType 'GPOS' is available. --no-legacy-kern Drop TrueType 'kern' table if OpenType 'GPOS' is available. [default] Font naming options ^^^^^^^^^^^^^^^^^^^ These options control what is retained in the 'name' table. For numerical codes, see: http://www.microsoft.com/typography/otspec/name.htm --name-IDs[+|-]=<nameID>[,<nameID>...] Specify (=), add to (+=) or exclude from (-=) the set of 'name' table entry nameIDs that will be preserved. By default, only nameIDs between 0 and 6 are preserved, the rest are dropped. Use '*' to keep all entries. Examples: --name-IDs+=7,8,9 * Also keep Trademark, Manufacturer and Designer name entries. --name-IDs= * Drop all 'name' table entries. --name-IDs=* * keep all 'name' table entries --name-legacy Keep legacy (non-Unicode) 'name' table entries (0.x, 1.x etc.). XXX Note: This might be needed for some fonts that have no Unicode name entires for English. See: https://github.com/fonttools/fonttools/issues/146 --no-name-legacy Drop legacy (non-Unicode) 'name' table entries [default] --name-languages[+|-]=<langID>[,<langID>] Specify (=), add to (+=) or exclude from (-=) the set of 'name' table langIDs that will be preserved. By default only records with langID 0x0409 (English) are preserved. Use '*' to keep all langIDs. --obfuscate-names Make the font unusable as a system font by replacing name IDs 1, 2, 3, 4, and 6 with dummy strings (it is still fully functional as webfont). Glyph naming and encoding options ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ --glyph-names Keep PS glyph names in TT-flavored fonts. In general glyph names are not needed for correct use of the font. However, some PDF generators and PDF viewers might rely on glyph names to extract Unicode text from PDF documents. --no-glyph-names Drop PS glyph names in TT-flavored fonts, by using 'post' table version 3.0. [default] --legacy-cmap Keep the legacy 'cmap' subtables (0.x, 1.x, 4.x etc.). --no-legacy-cmap Drop the legacy 'cmap' subtables. [default] --symbol-cmap Keep the 3.0 symbol 'cmap'. --no-symbol-cmap Drop the 3.0 symbol 'cmap'. [default] Other font-specific options ^^^^^^^^^^^^^^^^^^^^^^^^^^^ --recalc-bounds Recalculate font bounding boxes. --no-recalc-bounds Keep original font bounding boxes. This is faster and still safe for all practical purposes. [default] --recalc-timestamp Set font 'modified' timestamp to current time. --no-recalc-timestamp Do not modify font 'modified' timestamp. [default] --canonical-order Order tables as recommended in the OpenType standard. This is not required by the standard, nor by any known implementation. --no-canonical-order Keep original order of font tables. This is faster. [default] --prune-unicode-ranges Update the 'OS/2 ulUnicodeRange*' bits after subsetting. The Unicode ranges defined in the OpenType specification v1.7 are intersected with the Unicode codepoints specified in the font's Unicode 'cmap' subtables: when no overlap is found, the bit will be switched off. However, it will *not* be switched on if an intersection is found. [default] --no-prune-unicode-ranges Don't change the 'OS/2 ulUnicodeRange*' bits. --prune-codepage-ranges Update the 'OS/2 ulCodePageRange*' bits after subsetting. [default] --no-prune-codepage-ranges Don't change the 'OS/2 ulCodePageRange*' bits. --recalc-average-width Update the 'OS/2 xAvgCharWidth' field after subsetting. --no-recalc-average-width Don't change the 'OS/2 xAvgCharWidth' field. [default] --recalc-max-context Update the 'OS/2 usMaxContext' field after subsetting. --no-recalc-max-context Don't change the 'OS/2 usMaxContext' field. [default] --font-number=<number> Select font number for TrueType Collection (.ttc/.otc), starting from 0. --pretty-svg When subsetting SVG table, use lxml pretty_print=True option to indent the XML output (only recommended for debugging purposes). Application options ^^^^^^^^^^^^^^^^^^^ --verbose Display verbose information of the subsetting process. --timing Display detailed timing information of the subsetting process. --xml Display the TTX XML representation of subsetted font. Example ^^^^^^^ Produce a subset containing the characters ' !"#$%' without performing size-reducing optimizations:: $ pyftsubset font.ttf --unicodes="U+0020-0025" \\ --layout-features=* --glyph-names --symbol-cmap --legacy-cmap \\ --notdef-glyph --notdef-outline --recommended-glyphs \\ --name-IDs=* --name-legacy --name-languages=* """ ) log = logging.getLogger("fontTools.subset") def _log_glyphs(self, glyphs, font=None): self.info("Glyph names: %s", sorted(glyphs)) if font: reverseGlyphMap = font.getReverseGlyphMap() self.info("Glyph IDs: %s", sorted(reverseGlyphMap[g] for g in glyphs)) # bind "glyphs" function to 'log' object log.glyphs = MethodType(_log_glyphs, log) # I use a different timing channel so I can configure it separately from the # main module's logger timer = Timer(logger=logging.getLogger("fontTools.subset.timer")) def _dict_subset(d, glyphs): return {g: d[g] for g in glyphs} def _list_subset(l, indices): count = len(l) return [l[i] for i in indices if i < count] @_add_method(otTables.Coverage) def intersect(self, glyphs): """Returns ascending list of matching coverage values.""" return [i for i, g in enumerate(self.glyphs) if g in glyphs] @_add_method(otTables.Coverage) def intersect_glyphs(self, glyphs): """Returns set of intersecting glyphs.""" return set(g for g in self.glyphs if g in glyphs) @_add_method(otTables.Coverage) def subset(self, glyphs): """Returns ascending list of remaining coverage values.""" indices = self.intersect(glyphs) self.glyphs = [g for g in self.glyphs if g in glyphs] return indices @_add_method(otTables.Coverage) def remap(self, coverage_map): """Remaps coverage.""" self.glyphs = [self.glyphs[i] for i in coverage_map] @_add_method(otTables.ClassDef) def intersect(self, glyphs): """Returns ascending list of matching class values.""" return _uniq_sort( ([0] if any(g not in self.classDefs for g in glyphs) else []) + [v for g, v in self.classDefs.items() if g in glyphs] ) @_add_method(otTables.ClassDef) def intersect_class(self, glyphs, klass): """Returns set of glyphs matching class.""" if klass == 0: return set(g for g in glyphs if g not in self.classDefs) return set(g for g, v in self.classDefs.items() if v == klass and g in glyphs) @_add_method(otTables.ClassDef) def subset(self, glyphs, remap=False, useClass0=True): """Returns ascending list of remaining classes.""" self.classDefs = {g: v for g, v in self.classDefs.items() if g in glyphs} # Note: while class 0 has the special meaning of "not matched", # if no glyph will ever /not match/, we can optimize class 0 out too. # Only do this if allowed. indices = _uniq_sort( ( [0] if ((not useClass0) or any(g not in self.classDefs for g in glyphs)) else [] ) + list(self.classDefs.values()) ) if remap: self.remap(indices) return indices @_add_method(otTables.ClassDef) def remap(self, class_map): """Remaps classes.""" self.classDefs = {g: class_map.index(v) for g, v in self.classDefs.items()} @_add_method(otTables.SingleSubst) def closure_glyphs(self, s, cur_glyphs): s.glyphs.update(v for g, v in self.mapping.items() if g in cur_glyphs) @_add_method(otTables.SingleSubst) def subset_glyphs(self, s): self.mapping = { g: v for g, v in self.mapping.items() if g in s.glyphs and v in s.glyphs } return bool(self.mapping) @_add_method(otTables.MultipleSubst) def closure_glyphs(self, s, cur_glyphs): for glyph, subst in self.mapping.items(): if glyph in cur_glyphs: s.glyphs.update(subst) @_add_method(otTables.MultipleSubst) def subset_glyphs(self, s): self.mapping = { g: v for g, v in self.mapping.items() if g in s.glyphs and all(sub in s.glyphs for sub in v) } return bool(self.mapping) @_add_method(otTables.AlternateSubst) def closure_glyphs(self, s, cur_glyphs): s.glyphs.update(*(vlist for g, vlist in self.alternates.items() if g in cur_glyphs)) @_add_method(otTables.AlternateSubst) def subset_glyphs(self, s): self.alternates = { g: [v for v in vlist if v in s.glyphs] for g, vlist in self.alternates.items() if g in s.glyphs and any(v in s.glyphs for v in vlist) } return bool(self.alternates) @_add_method(otTables.LigatureSubst) def closure_glyphs(self, s, cur_glyphs): s.glyphs.update( *( [seq.LigGlyph for seq in seqs if all(c in s.glyphs for c in seq.Component)] for g, seqs in self.ligatures.items() if g in cur_glyphs ) ) @_add_method(otTables.LigatureSubst) def subset_glyphs(self, s): self.ligatures = {g: v for g, v in self.ligatures.items() if g in s.glyphs} self.ligatures = { g: [ seq for seq in seqs if seq.LigGlyph in s.glyphs and all(c in s.glyphs for c in seq.Component) ] for g, seqs in self.ligatures.items() } self.ligatures = {g: v for g, v in self.ligatures.items() if v} return bool(self.ligatures) @_add_method(otTables.ReverseChainSingleSubst) def closure_glyphs(self, s, cur_glyphs): if self.Format == 1: indices = self.Coverage.intersect(cur_glyphs) if not indices or not all( c.intersect(s.glyphs) for c in self.LookAheadCoverage + self.BacktrackCoverage ): return s.glyphs.update(self.Substitute[i] for i in indices) else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.ReverseChainSingleSubst) def subset_glyphs(self, s): if self.Format == 1: indices = self.Coverage.subset(s.glyphs) self.Substitute = _list_subset(self.Substitute, indices) # Now drop rules generating glyphs we don't want indices = [i for i, sub in enumerate(self.Substitute) if sub in s.glyphs] self.Substitute = _list_subset(self.Substitute, indices) self.Coverage.remap(indices) self.GlyphCount = len(self.Substitute) return bool( self.GlyphCount and all( c.subset(s.glyphs) for c in self.LookAheadCoverage + self.BacktrackCoverage ) ) else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.Device) def is_hinting(self): return self.DeltaFormat in (1, 2, 3) @_add_method(otTables.ValueRecord) def prune_hints(self): for name in ["XPlaDevice", "YPlaDevice", "XAdvDevice", "YAdvDevice"]: v = getattr(self, name, None) if v is not None and v.is_hinting(): delattr(self, name) @_add_method(otTables.SinglePos) def subset_glyphs(self, s): if self.Format == 1: return len(self.Coverage.subset(s.glyphs)) elif self.Format == 2: indices = self.Coverage.subset(s.glyphs) values = self.Value count = len(values) self.Value = [values[i] for i in indices if i < count] self.ValueCount = len(self.Value) return bool(self.ValueCount) else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.SinglePos) def prune_post_subset(self, font, options): if self.Value is None: assert self.ValueFormat == 0 return True # Shrink ValueFormat if self.Format == 1: if not options.hinting: self.Value.prune_hints() self.ValueFormat = self.Value.getEffectiveFormat() elif self.Format == 2: if None in self.Value: assert self.ValueFormat == 0 assert all(v is None for v in self.Value) else: if not options.hinting: for v in self.Value: v.prune_hints() self.ValueFormat = reduce( int.__or__, [v.getEffectiveFormat() for v in self.Value], 0 ) # Downgrade to Format 1 if all ValueRecords are the same if self.Format == 2 and all(v == self.Value[0] for v in self.Value): self.Format = 1 self.Value = self.Value[0] if self.ValueFormat != 0 else None del self.ValueCount return True @_add_method(otTables.PairPos) def subset_glyphs(self, s): if self.Format == 1: indices = self.Coverage.subset(s.glyphs) pairs = self.PairSet count = len(pairs) self.PairSet = [pairs[i] for i in indices if i < count] for p in self.PairSet: p.PairValueRecord = [ r for r in p.PairValueRecord if r.SecondGlyph in s.glyphs ] p.PairValueCount = len(p.PairValueRecord) # Remove empty pairsets indices = [i for i, p in enumerate(self.PairSet) if p.PairValueCount] self.Coverage.remap(indices) self.PairSet = _list_subset(self.PairSet, indices) self.PairSetCount = len(self.PairSet) return bool(self.PairSetCount) elif self.Format == 2: class1_map = [ c for c in self.ClassDef1.subset( s.glyphs.intersection(self.Coverage.glyphs), remap=True ) if c < self.Class1Count ] class2_map = [ c for c in self.ClassDef2.subset(s.glyphs, remap=True, useClass0=False) if c < self.Class2Count ] self.Class1Record = [self.Class1Record[i] for i in class1_map] for c in self.Class1Record: c.Class2Record = [c.Class2Record[i] for i in class2_map] self.Class1Count = len(class1_map) self.Class2Count = len(class2_map) # If only Class2 0 left, no need to keep anything. return bool( self.Class1Count and (self.Class2Count > 1) and self.Coverage.subset(s.glyphs) ) else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.PairPos) def prune_post_subset(self, font, options): if not options.hinting: attr1, attr2 = { 1: ("PairSet", "PairValueRecord"), 2: ("Class1Record", "Class2Record"), }[self.Format] self.ValueFormat1 = self.ValueFormat2 = 0 for row in getattr(self, attr1): for r in getattr(row, attr2): if r.Value1: r.Value1.prune_hints() self.ValueFormat1 |= r.Value1.getEffectiveFormat() if r.Value2: r.Value2.prune_hints() self.ValueFormat2 |= r.Value2.getEffectiveFormat() return bool(self.ValueFormat1 | self.ValueFormat2) @_add_method(otTables.CursivePos) def subset_glyphs(self, s): if self.Format == 1: indices = self.Coverage.subset(s.glyphs) records = self.EntryExitRecord count = len(records) self.EntryExitRecord = [records[i] for i in indices if i < count] self.EntryExitCount = len(self.EntryExitRecord) return bool(self.EntryExitCount) else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.Anchor) def prune_hints(self): if self.Format == 2: self.Format = 1 elif self.Format == 3: for name in ("XDeviceTable", "YDeviceTable"): v = getattr(self, name, None) if v is not None and v.is_hinting(): setattr(self, name, None) if self.XDeviceTable is None and self.YDeviceTable is None: self.Format = 1 @_add_method(otTables.CursivePos) def prune_post_subset(self, font, options): if not options.hinting: for rec in self.EntryExitRecord: if rec.EntryAnchor: rec.EntryAnchor.prune_hints() if rec.ExitAnchor: rec.ExitAnchor.prune_hints() return True @_add_method(otTables.MarkBasePos) def subset_glyphs(self, s): if self.Format == 1: mark_indices = self.MarkCoverage.subset(s.glyphs) self.MarkArray.MarkRecord = _list_subset( self.MarkArray.MarkRecord, mark_indices ) self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) base_indices = self.BaseCoverage.subset(s.glyphs) self.BaseArray.BaseRecord = _list_subset( self.BaseArray.BaseRecord, base_indices ) self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord) # Prune empty classes class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) self.ClassCount = len(class_indices) for m in self.MarkArray.MarkRecord: m.Class = class_indices.index(m.Class) for b in self.BaseArray.BaseRecord: b.BaseAnchor = _list_subset(b.BaseAnchor, class_indices) return bool( self.ClassCount and self.MarkArray.MarkCount and self.BaseArray.BaseCount ) else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.MarkBasePos) def prune_post_subset(self, font, options): if not options.hinting: for m in self.MarkArray.MarkRecord: if m.MarkAnchor: m.MarkAnchor.prune_hints() for b in self.BaseArray.BaseRecord: for a in b.BaseAnchor: if a: a.prune_hints() return True @_add_method(otTables.MarkLigPos) def subset_glyphs(self, s): if self.Format == 1: mark_indices = self.MarkCoverage.subset(s.glyphs) self.MarkArray.MarkRecord = _list_subset( self.MarkArray.MarkRecord, mark_indices ) self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) ligature_indices = self.LigatureCoverage.subset(s.glyphs) self.LigatureArray.LigatureAttach = _list_subset( self.LigatureArray.LigatureAttach, ligature_indices ) self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach) # Prune empty classes class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) self.ClassCount = len(class_indices) for m in self.MarkArray.MarkRecord: m.Class = class_indices.index(m.Class) for l in self.LigatureArray.LigatureAttach: if l is None: continue for c in l.ComponentRecord: c.LigatureAnchor = _list_subset(c.LigatureAnchor, class_indices) return bool( self.ClassCount and self.MarkArray.MarkCount and self.LigatureArray.LigatureCount ) else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.MarkLigPos) def prune_post_subset(self, font, options): if not options.hinting: for m in self.MarkArray.MarkRecord: if m.MarkAnchor: m.MarkAnchor.prune_hints() for l in self.LigatureArray.LigatureAttach: if l is None: continue for c in l.ComponentRecord: for a in c.LigatureAnchor: if a: a.prune_hints() return True @_add_method(otTables.MarkMarkPos) def subset_glyphs(self, s): if self.Format == 1: mark1_indices = self.Mark1Coverage.subset(s.glyphs) self.Mark1Array.MarkRecord = _list_subset( self.Mark1Array.MarkRecord, mark1_indices ) self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord) mark2_indices = self.Mark2Coverage.subset(s.glyphs) self.Mark2Array.Mark2Record = _list_subset( self.Mark2Array.Mark2Record, mark2_indices ) self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record) # Prune empty classes class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord) self.ClassCount = len(class_indices) for m in self.Mark1Array.MarkRecord: m.Class = class_indices.index(m.Class) for b in self.Mark2Array.Mark2Record: b.Mark2Anchor = _list_subset(b.Mark2Anchor, class_indices) return bool( self.ClassCount and self.Mark1Array.MarkCount and self.Mark2Array.MarkCount ) else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.MarkMarkPos) def prune_post_subset(self, font, options): if not options.hinting: for m in self.Mark1Array.MarkRecord: if m.MarkAnchor: m.MarkAnchor.prune_hints() for b in self.Mark2Array.Mark2Record: for m in b.Mark2Anchor: if m: m.prune_hints() return True @_add_method( otTables.SingleSubst, otTables.MultipleSubst, otTables.AlternateSubst, otTables.LigatureSubst, otTables.ReverseChainSingleSubst, otTables.SinglePos, otTables.PairPos, otTables.CursivePos, otTables.MarkBasePos, otTables.MarkLigPos, otTables.MarkMarkPos, ) def subset_lookups(self, lookup_indices): pass @_add_method( otTables.SingleSubst, otTables.MultipleSubst, otTables.AlternateSubst, otTables.LigatureSubst, otTables.ReverseChainSingleSubst, otTables.SinglePos, otTables.PairPos, otTables.CursivePos, otTables.MarkBasePos, otTables.MarkLigPos, otTables.MarkMarkPos, ) def collect_lookups(self): return [] @_add_method( otTables.SingleSubst, otTables.MultipleSubst, otTables.AlternateSubst, otTables.LigatureSubst, otTables.ReverseChainSingleSubst, otTables.ContextSubst, otTables.ChainContextSubst, otTables.ContextPos, otTables.ChainContextPos, ) def prune_post_subset(self, font, options): return True @_add_method( otTables.SingleSubst, otTables.AlternateSubst, otTables.ReverseChainSingleSubst ) def may_have_non_1to1(self): return False @_add_method( otTables.MultipleSubst, otTables.LigatureSubst, otTables.ContextSubst, otTables.ChainContextSubst, ) def may_have_non_1to1(self): return True @_add_method( otTables.ContextSubst, otTables.ChainContextSubst, otTables.ContextPos, otTables.ChainContextPos, ) def __subset_classify_context(self): class ContextHelper(object): def __init__(self, klass, Format): if klass.__name__.endswith("Subst"): Typ = "Sub" Type = "Subst" else: Typ = "Pos" Type = "Pos" if klass.__name__.startswith("Chain"): Chain = "Chain" InputIdx = 1 DataLen = 3 else: Chain = "" InputIdx = 0 DataLen = 1 ChainTyp = Chain + Typ self.Typ = Typ self.Type = Type self.Chain = Chain self.ChainTyp = ChainTyp self.InputIdx = InputIdx self.DataLen = DataLen self.LookupRecord = Type + "LookupRecord" if Format == 1: Coverage = lambda r: r.Coverage ChainCoverage = lambda r: r.Coverage ContextData = lambda r: (None,) ChainContextData = lambda r: (None, None, None) SetContextData = None SetChainContextData = None RuleData = lambda r: (r.Input,) ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead) def SetRuleData(r, d): (r.Input,) = d (r.GlyphCount,) = (len(x) + 1 for x in d) def ChainSetRuleData(r, d): (r.Backtrack, r.Input, r.LookAhead) = d ( r.BacktrackGlyphCount, r.InputGlyphCount, r.LookAheadGlyphCount, ) = (len(d[0]), len(d[1]) + 1, len(d[2])) elif Format == 2: Coverage = lambda r: r.Coverage ChainCoverage = lambda r: r.Coverage ContextData = lambda r: (r.ClassDef,) ChainContextData = lambda r: ( r.BacktrackClassDef, r.InputClassDef, r.LookAheadClassDef, ) def SetContextData(r, d): (r.ClassDef,) = d def SetChainContextData(r, d): (r.BacktrackClassDef, r.InputClassDef, r.LookAheadClassDef) = d RuleData = lambda r: (r.Class,) ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead) def SetRuleData(r, d): (r.Class,) = d (r.GlyphCount,) = (len(x) + 1 for x in d) def ChainSetRuleData(r, d): (r.Backtrack, r.Input, r.LookAhead) = d ( r.BacktrackGlyphCount, r.InputGlyphCount, r.LookAheadGlyphCount, ) = (len(d[0]), len(d[1]) + 1, len(d[2])) elif Format == 3: Coverage = lambda r: r.Coverage[0] ChainCoverage = lambda r: r.InputCoverage[0] ContextData = None ChainContextData = None SetContextData = None SetChainContextData = None RuleData = lambda r: r.Coverage ChainRuleData = lambda r: ( r.BacktrackCoverage + r.InputCoverage + r.LookAheadCoverage ) def SetRuleData(r, d): (r.Coverage,) = d (r.GlyphCount,) = (len(x) for x in d) def ChainSetRuleData(r, d): (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d ( r.BacktrackGlyphCount, r.InputGlyphCount, r.LookAheadGlyphCount, ) = (len(x) for x in d) else: assert 0, "unknown format: %s" % Format if Chain: self.Coverage = ChainCoverage self.ContextData = ChainContextData self.SetContextData = SetChainContextData self.RuleData = ChainRuleData self.SetRuleData = ChainSetRuleData else: self.Coverage = Coverage self.ContextData = ContextData self.SetContextData = SetContextData self.RuleData = RuleData self.SetRuleData = SetRuleData if Format == 1: self.Rule = ChainTyp + "Rule" self.RuleCount = ChainTyp + "RuleCount" self.RuleSet = ChainTyp + "RuleSet" self.RuleSetCount = ChainTyp + "RuleSetCount" self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] elif Format == 2: self.Rule = ChainTyp + "ClassRule" self.RuleCount = ChainTyp + "ClassRuleCount" self.RuleSet = ChainTyp + "ClassSet" self.RuleSetCount = ChainTyp + "ClassSetCount" self.Intersect = lambda glyphs, c, r: ( c.intersect_class(glyphs, r) if c else (set(glyphs) if r == 0 else set()) ) self.ClassDef = "InputClassDef" if Chain else "ClassDef" self.ClassDefIndex = 1 if Chain else 0 self.Input = "Input" if Chain else "Class" elif Format == 3: self.Input = "InputCoverage" if Chain else "Coverage" if self.Format not in [1, 2, 3]: return None # Don't shoot the messenger; let it go if not hasattr(self.__class__, "_subset__ContextHelpers"): self.__class__._subset__ContextHelpers = {} if self.Format not in self.__class__._subset__ContextHelpers: helper = ContextHelper(self.__class__, self.Format) self.__class__._subset__ContextHelpers[self.Format] = helper return self.__class__._subset__ContextHelpers[self.Format] @_add_method(otTables.ContextSubst, otTables.ChainContextSubst) def closure_glyphs(self, s, cur_glyphs): c = self.__subset_classify_context() indices = c.Coverage(self).intersect(cur_glyphs) if not indices: return [] cur_glyphs = c.Coverage(self).intersect_glyphs(cur_glyphs) if self.Format == 1: ContextData = c.ContextData(self) rss = getattr(self, c.RuleSet) rssCount = getattr(self, c.RuleSetCount) for i in indices: if i >= rssCount or not rss[i]: continue for r in getattr(rss[i], c.Rule): if not r: continue if not all( all(c.Intersect(s.glyphs, cd, k) for k in klist) for cd, klist in zip(ContextData, c.RuleData(r)) ): continue chaos = set() for ll in getattr(r, c.LookupRecord): if not ll: continue seqi = ll.SequenceIndex if seqi in chaos: # TODO Can we improve this? pos_glyphs = None else: if seqi == 0: pos_glyphs = frozenset([c.Coverage(self).glyphs[i]]) else: pos_glyphs = frozenset([r.Input[seqi - 1]]) lookup = s.table.LookupList.Lookup[ll.LookupListIndex] chaos.add(seqi) if lookup.may_have_non_1to1(): chaos.update(range(seqi, len(r.Input) + 2)) lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) elif self.Format == 2: ClassDef = getattr(self, c.ClassDef) indices = ClassDef.intersect(cur_glyphs) ContextData = c.ContextData(self) rss = getattr(self, c.RuleSet) rssCount = getattr(self, c.RuleSetCount) for i in indices: if i >= rssCount or not rss[i]: continue for r in getattr(rss[i], c.Rule): if not r: continue if not all( all(c.Intersect(s.glyphs, cd, k) for k in klist) for cd, klist in zip(ContextData, c.RuleData(r)) ): continue chaos = set() for ll in getattr(r, c.LookupRecord): if not ll: continue seqi = ll.SequenceIndex if seqi in chaos: # TODO Can we improve this? pos_glyphs = None else: if seqi == 0: pos_glyphs = frozenset( ClassDef.intersect_class(cur_glyphs, i) ) else: pos_glyphs = frozenset( ClassDef.intersect_class( s.glyphs, getattr(r, c.Input)[seqi - 1] ) ) lookup = s.table.LookupList.Lookup[ll.LookupListIndex] chaos.add(seqi) if lookup.may_have_non_1to1(): chaos.update(range(seqi, len(getattr(r, c.Input)) + 2)) lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) elif self.Format == 3: if not all(x is not None and x.intersect(s.glyphs) for x in c.RuleData(self)): return [] r = self input_coverages = getattr(r, c.Input) chaos = set() for ll in getattr(r, c.LookupRecord): if not ll: continue seqi = ll.SequenceIndex if seqi in chaos: # TODO Can we improve this? pos_glyphs = None else: if seqi == 0: pos_glyphs = frozenset(cur_glyphs) else: pos_glyphs = frozenset( input_coverages[seqi].intersect_glyphs(s.glyphs) ) lookup = s.table.LookupList.Lookup[ll.LookupListIndex] chaos.add(seqi) if lookup.may_have_non_1to1(): chaos.update(range(seqi, len(input_coverages) + 1)) lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) else: assert 0, "unknown format: %s" % self.Format @_add_method( otTables.ContextSubst, otTables.ContextPos, otTables.ChainContextSubst, otTables.ChainContextPos, ) def subset_glyphs(self, s): c = self.__subset_classify_context() if self.Format == 1: indices = self.Coverage.subset(s.glyphs) rss = getattr(self, c.RuleSet) rssCount = getattr(self, c.RuleSetCount) rss = [rss[i] for i in indices if i < rssCount] for rs in rss: if not rs: continue ss = getattr(rs, c.Rule) ss = [ r for r in ss if r and all(all(g in s.glyphs for g in glist) for glist in c.RuleData(r)) ] setattr(rs, c.Rule, ss) setattr(rs, c.RuleCount, len(ss)) # Prune empty rulesets indices = [i for i, rs in enumerate(rss) if rs and getattr(rs, c.Rule)] self.Coverage.remap(indices) rss = _list_subset(rss, indices) setattr(self, c.RuleSet, rss) setattr(self, c.RuleSetCount, len(rss)) return bool(rss) elif self.Format == 2: if not self.Coverage.subset(s.glyphs): return False ContextData = c.ContextData(self) klass_maps = [ x.subset(s.glyphs, remap=True) if x else None for x in ContextData ] # Keep rulesets for class numbers that survived. indices = klass_maps[c.ClassDefIndex] rss = getattr(self, c.RuleSet) rssCount = getattr(self, c.RuleSetCount) rss = [rss[i] for i in indices if i < rssCount] del rssCount # Delete, but not renumber, unreachable rulesets. indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs) rss = [rss if i in indices else None for i, rss in enumerate(rss)] for rs in rss: if not rs: continue ss = getattr(rs, c.Rule) ss = [ r for r in ss if r and all( all(k in klass_map for k in klist) for klass_map, klist in zip(klass_maps, c.RuleData(r)) ) ] setattr(rs, c.Rule, ss) setattr(rs, c.RuleCount, len(ss)) # Remap rule classes for r in ss: c.SetRuleData( r, [ [klass_map.index(k) for k in klist] for klass_map, klist in zip(klass_maps, c.RuleData(r)) ], ) # Prune empty rulesets rss = [rs if rs and getattr(rs, c.Rule) else None for rs in rss] while rss and rss[-1] is None: del rss[-1] setattr(self, c.RuleSet, rss) setattr(self, c.RuleSetCount, len(rss)) # TODO: We can do a second round of remapping class values based # on classes that are actually used in at least one rule. Right # now we subset classes to c.glyphs only. Or better, rewrite # the above to do that. return bool(rss) elif self.Format == 3: return all(x is not None and x.subset(s.glyphs) for x in c.RuleData(self)) else: assert 0, "unknown format: %s" % self.Format @_add_method( otTables.ContextSubst, otTables.ChainContextSubst, otTables.ContextPos, otTables.ChainContextPos, ) def subset_lookups(self, lookup_indices): c = self.__subset_classify_context() if self.Format in [1, 2]: for rs in getattr(self, c.RuleSet): if not rs: continue for r in getattr(rs, c.Rule): if not r: continue setattr( r, c.LookupRecord, [ ll for ll in getattr(r, c.LookupRecord) if ll and ll.LookupListIndex in lookup_indices ], ) for ll in getattr(r, c.LookupRecord): if not ll: continue ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) elif self.Format == 3: setattr( self, c.LookupRecord, [ ll for ll in getattr(self, c.LookupRecord) if ll and ll.LookupListIndex in lookup_indices ], ) for ll in getattr(self, c.LookupRecord): if not ll: continue ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) else: assert 0, "unknown format: %s" % self.Format @_add_method( otTables.ContextSubst, otTables.ChainContextSubst, otTables.ContextPos, otTables.ChainContextPos, ) def collect_lookups(self): c = self.__subset_classify_context() if self.Format in [1, 2]: return [ ll.LookupListIndex for rs in getattr(self, c.RuleSet) if rs for r in getattr(rs, c.Rule) if r for ll in getattr(r, c.LookupRecord) if ll ] elif self.Format == 3: return [ll.LookupListIndex for ll in getattr(self, c.LookupRecord) if ll] else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.ExtensionSubst) def closure_glyphs(self, s, cur_glyphs): if self.Format == 1: self.ExtSubTable.closure_glyphs(s, cur_glyphs) else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.ExtensionSubst) def may_have_non_1to1(self): if self.Format == 1: return self.ExtSubTable.may_have_non_1to1() else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.ExtensionSubst, otTables.ExtensionPos) def subset_glyphs(self, s): if self.Format == 1: return self.ExtSubTable.subset_glyphs(s) else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.ExtensionSubst, otTables.ExtensionPos) def prune_post_subset(self, font, options): if self.Format == 1: return self.ExtSubTable.prune_post_subset(font, options) else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.ExtensionSubst, otTables.ExtensionPos) def subset_lookups(self, lookup_indices): if self.Format == 1: return self.ExtSubTable.subset_lookups(lookup_indices) else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.ExtensionSubst, otTables.ExtensionPos) def collect_lookups(self): if self.Format == 1: return self.ExtSubTable.collect_lookups() else: assert 0, "unknown format: %s" % self.Format @_add_method(otTables.Lookup) def closure_glyphs(self, s, cur_glyphs=None): if cur_glyphs is None: cur_glyphs = frozenset(s.glyphs) # Memoize key = id(self) doneLookups = s._doneLookups count, covered = doneLookups.get(key, (0, None)) if count != len(s.glyphs): count, covered = doneLookups[key] = (len(s.glyphs), set()) if cur_glyphs.issubset(covered): return covered.update(cur_glyphs) for st in self.SubTable: if not st: continue st.closure_glyphs(s, cur_glyphs) @_add_method(otTables.Lookup) def subset_glyphs(self, s): self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)] self.SubTableCount = len(self.SubTable) if hasattr(self, "MarkFilteringSet") and self.MarkFilteringSet is not None: if self.MarkFilteringSet not in s.used_mark_sets: self.MarkFilteringSet = None self.LookupFlag &= ~0x10 else: self.MarkFilteringSet = s.used_mark_sets.index(self.MarkFilteringSet) return bool(self.SubTableCount) @_add_method(otTables.Lookup) def prune_post_subset(self, font, options): ret = False for st in self.SubTable: if not st: continue if st.prune_post_subset(font, options): ret = True return ret @_add_method(otTables.Lookup) def subset_lookups(self, lookup_indices): for s in self.SubTable: s.subset_lookups(lookup_indices) @_add_method(otTables.Lookup) def collect_lookups(self): return sum((st.collect_lookups() for st in self.SubTable if st), []) @_add_method(otTables.Lookup) def may_have_non_1to1(self): return any(st.may_have_non_1to1() for st in self.SubTable if st) @_add_method(otTables.LookupList) def subset_glyphs(self, s): """Returns the indices of nonempty lookups.""" return [i for i, l in enumerate(self.Lookup) if l and l.subset_glyphs(s)] @_add_method(otTables.LookupList) def prune_post_subset(self, font, options): ret = False for l in self.Lookup: if not l: continue if l.prune_post_subset(font, options): ret = True return ret @_add_method(otTables.LookupList) def subset_lookups(self, lookup_indices): self.ensureDecompiled() self.Lookup = [self.Lookup[i] for i in lookup_indices if i < self.LookupCount] self.LookupCount = len(self.Lookup) for l in self.Lookup: l.subset_lookups(lookup_indices) @_add_method(otTables.LookupList) def neuter_lookups(self, lookup_indices): """Sets lookups not in lookup_indices to None.""" self.ensureDecompiled() self.Lookup = [ l if i in lookup_indices else None for i, l in enumerate(self.Lookup) ] @_add_method(otTables.LookupList) def closure_lookups(self, lookup_indices): """Returns sorted index of all lookups reachable from lookup_indices.""" lookup_indices = _uniq_sort(lookup_indices) recurse = lookup_indices while True: recurse_lookups = sum( (self.Lookup[i].collect_lookups() for i in recurse if i < self.LookupCount), [], ) recurse_lookups = [ l for l in recurse_lookups if l not in lookup_indices and l < self.LookupCount ] if not recurse_lookups: return _uniq_sort(lookup_indices) recurse_lookups = _uniq_sort(recurse_lookups) lookup_indices.extend(recurse_lookups) recurse = recurse_lookups @_add_method(otTables.Feature) def subset_lookups(self, lookup_indices): """ "Returns True if feature is non-empty afterwards.""" self.LookupListIndex = [l for l in self.LookupListIndex if l in lookup_indices] # Now map them. self.LookupListIndex = [lookup_indices.index(l) for l in self.LookupListIndex] self.LookupCount = len(self.LookupListIndex) # keep 'size' feature even if it contains no lookups; but drop any other # empty feature (e.g. FeatureParams for stylistic set names) # https://github.com/fonttools/fonttools/issues/2324 return self.LookupCount or isinstance( self.FeatureParams, otTables.FeatureParamsSize ) @_add_method(otTables.FeatureList) def subset_lookups(self, lookup_indices): """Returns the indices of nonempty features.""" # Note: Never ever drop feature 'pref', even if it's empty. # HarfBuzz chooses shaper for Khmer based on presence of this # feature. See thread at: # http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html return [ i for i, f in enumerate(self.FeatureRecord) if (f.Feature.subset_lookups(lookup_indices) or f.FeatureTag == "pref") ] @_add_method(otTables.FeatureList) def collect_lookups(self, feature_indices): return sum( ( self.FeatureRecord[i].Feature.LookupListIndex for i in feature_indices if i < self.FeatureCount ), [], ) @_add_method(otTables.FeatureList) def subset_features(self, feature_indices): self.ensureDecompiled() self.FeatureRecord = _list_subset(self.FeatureRecord, feature_indices) self.FeatureCount = len(self.FeatureRecord) return bool(self.FeatureCount) @_add_method(otTables.FeatureTableSubstitution) def subset_lookups(self, lookup_indices): """Returns the indices of nonempty features.""" return [ r.FeatureIndex for r in self.SubstitutionRecord if r.Feature.subset_lookups(lookup_indices) ] @_add_method(otTables.FeatureVariations) def subset_lookups(self, lookup_indices): """Returns the indices of nonempty features.""" return sum( ( f.FeatureTableSubstitution.subset_lookups(lookup_indices) for f in self.FeatureVariationRecord ), [], ) @_add_method(otTables.FeatureVariations) def collect_lookups(self, feature_indices): return sum( ( r.Feature.LookupListIndex for vr in self.FeatureVariationRecord for r in vr.FeatureTableSubstitution.SubstitutionRecord if r.FeatureIndex in feature_indices ), [], ) @_add_method(otTables.FeatureTableSubstitution) def subset_features(self, feature_indices): self.ensureDecompiled() self.SubstitutionRecord = [ r for r in self.SubstitutionRecord if r.FeatureIndex in feature_indices ] # remap feature indices for r in self.SubstitutionRecord: r.FeatureIndex = feature_indices.index(r.FeatureIndex) self.SubstitutionCount = len(self.SubstitutionRecord) return bool(self.SubstitutionCount) @_add_method(otTables.FeatureVariations) def subset_features(self, feature_indices): self.ensureDecompiled() for r in self.FeatureVariationRecord: r.FeatureTableSubstitution.subset_features(feature_indices) # Prune empty records at the end only # https://github.com/fonttools/fonttools/issues/1881 while ( self.FeatureVariationRecord and not self.FeatureVariationRecord[ -1 ].FeatureTableSubstitution.SubstitutionCount ): self.FeatureVariationRecord.pop() self.FeatureVariationCount = len(self.FeatureVariationRecord) return bool(self.FeatureVariationCount) @_add_method(otTables.DefaultLangSys, otTables.LangSys) def subset_features(self, feature_indices): if self.ReqFeatureIndex in feature_indices: self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex) else: self.ReqFeatureIndex = 65535 self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices] # Now map them. self.FeatureIndex = [ feature_indices.index(f) for f in self.FeatureIndex if f in feature_indices ] self.FeatureCount = len(self.FeatureIndex) return bool(self.FeatureCount or self.ReqFeatureIndex != 65535) @_add_method(otTables.DefaultLangSys, otTables.LangSys) def collect_features(self): feature_indices = self.FeatureIndex[:] if self.ReqFeatureIndex != 65535: feature_indices.append(self.ReqFeatureIndex) return _uniq_sort(feature_indices) @_add_method(otTables.Script) def subset_features(self, feature_indices, keepEmptyDefaultLangSys=False): if ( self.DefaultLangSys and not self.DefaultLangSys.subset_features(feature_indices) and not keepEmptyDefaultLangSys ): self.DefaultLangSys = None self.LangSysRecord = [ l for l in self.LangSysRecord if l.LangSys.subset_features(feature_indices) ] self.LangSysCount = len(self.LangSysRecord) return bool(self.LangSysCount or self.DefaultLangSys) @_add_method(otTables.Script) def collect_features(self): feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord] if self.DefaultLangSys: feature_indices.append(self.DefaultLangSys.collect_features()) return _uniq_sort(sum(feature_indices, [])) @_add_method(otTables.ScriptList) def subset_features(self, feature_indices, retain_empty): # https://bugzilla.mozilla.org/show_bug.cgi?id=1331737#c32 self.ScriptRecord = [ s for s in self.ScriptRecord if s.Script.subset_features(feature_indices, s.ScriptTag == "DFLT") or retain_empty ] self.ScriptCount = len(self.ScriptRecord) return bool(self.ScriptCount) @_add_method(otTables.ScriptList) def collect_features(self): return _uniq_sort(sum((s.Script.collect_features() for s in self.ScriptRecord), [])) # CBLC will inherit it @_add_method(ttLib.getTableClass("EBLC")) def subset_glyphs(self, s): for strike in self.strikes: for indexSubTable in strike.indexSubTables: indexSubTable.names = [n for n in indexSubTable.names if n in s.glyphs] strike.indexSubTables = [i for i in strike.indexSubTables if i.names] self.strikes = [s for s in self.strikes if s.indexSubTables] return True # CBDT will inherit it @_add_method(ttLib.getTableClass("EBDT")) def subset_glyphs(self, s): strikeData = [ {g: strike[g] for g in s.glyphs if g in strike} for strike in self.strikeData ] # Prune empty strikes # https://github.com/fonttools/fonttools/issues/1633 self.strikeData = [strike for strike in strikeData if strike] return True @_add_method(ttLib.getTableClass("sbix")) def subset_glyphs(self, s): for strike in self.strikes.values(): strike.glyphs = {g: strike.glyphs[g] for g in s.glyphs if g in strike.glyphs} return True @_add_method(ttLib.getTableClass("GSUB")) def closure_glyphs(self, s): s.table = self.table if self.table.ScriptList: feature_indices = self.table.ScriptList.collect_features() else: feature_indices = [] if self.table.FeatureList: lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) else: lookup_indices = [] if getattr(self.table, "FeatureVariations", None): lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices) lookup_indices = _uniq_sort(lookup_indices) if self.table.LookupList: s._doneLookups = {} while True: orig_glyphs = frozenset(s.glyphs) for i in lookup_indices: if i >= self.table.LookupList.LookupCount: continue if not self.table.LookupList.Lookup[i]: continue self.table.LookupList.Lookup[i].closure_glyphs(s) if orig_glyphs == s.glyphs: break del s._doneLookups del s.table @_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS")) def subset_glyphs(self, s): s.glyphs = s.glyphs_gsubed if self.table.LookupList: lookup_indices = self.table.LookupList.subset_glyphs(s) else: lookup_indices = [] self.subset_lookups(lookup_indices) return True @_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS")) def retain_empty_scripts(self): # https://github.com/fonttools/fonttools/issues/518 # https://bugzilla.mozilla.org/show_bug.cgi?id=1080739#c15 return self.__class__ == ttLib.getTableClass("GSUB") @_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS")) def subset_lookups(self, lookup_indices): """Retains specified lookups, then removes empty features, language systems, and scripts.""" if self.table.LookupList: self.table.LookupList.subset_lookups(lookup_indices) if self.table.FeatureList: feature_indices = self.table.FeatureList.subset_lookups(lookup_indices) else: feature_indices = [] if getattr(self.table, "FeatureVariations", None): feature_indices += self.table.FeatureVariations.subset_lookups(lookup_indices) feature_indices = _uniq_sort(feature_indices) if self.table.FeatureList: self.table.FeatureList.subset_features(feature_indices) if getattr(self.table, "FeatureVariations", None): self.table.FeatureVariations.subset_features(feature_indices) if self.table.ScriptList: self.table.ScriptList.subset_features( feature_indices, self.retain_empty_scripts() ) @_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS")) def neuter_lookups(self, lookup_indices): """Sets lookups not in lookup_indices to None.""" if self.table.LookupList: self.table.LookupList.neuter_lookups(lookup_indices) @_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS")) def prune_lookups(self, remap=True): """Remove (default) or neuter unreferenced lookups""" if self.table.ScriptList: feature_indices = self.table.ScriptList.collect_features() else: feature_indices = [] if self.table.FeatureList: lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) else: lookup_indices = [] if getattr(self.table, "FeatureVariations", None): lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices) lookup_indices = _uniq_sort(lookup_indices) if self.table.LookupList: lookup_indices = self.table.LookupList.closure_lookups(lookup_indices) else: lookup_indices = [] if remap: self.subset_lookups(lookup_indices) else: self.neuter_lookups(lookup_indices) @_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS")) def subset_feature_tags(self, feature_tags): if self.table.FeatureList: feature_indices = [ i for i, f in enumerate(self.table.FeatureList.FeatureRecord) if f.FeatureTag in feature_tags ] self.table.FeatureList.subset_features(feature_indices) if getattr(self.table, "FeatureVariations", None): self.table.FeatureVariations.subset_features(feature_indices) else: feature_indices = [] if self.table.ScriptList: self.table.ScriptList.subset_features( feature_indices, self.retain_empty_scripts() ) @_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS")) def subset_script_tags(self, tags): langsys = {} script_tags = set() for tag in tags: script_tag, lang_tag = tag.split(".") if "." in tag else (tag, "*") script_tags.add(script_tag.ljust(4)) langsys.setdefault(script_tag, set()).add(lang_tag.ljust(4)) if self.table.ScriptList: self.table.ScriptList.ScriptRecord = [ s for s in self.table.ScriptList.ScriptRecord if s.ScriptTag in script_tags ] self.table.ScriptList.ScriptCount = len(self.table.ScriptList.ScriptRecord) for record in self.table.ScriptList.ScriptRecord: if record.ScriptTag in langsys and "* " not in langsys[record.ScriptTag]: record.Script.LangSysRecord = [ l for l in record.Script.LangSysRecord if l.LangSysTag in langsys[record.ScriptTag] ] record.Script.LangSysCount = len(record.Script.LangSysRecord) if "dflt" not in langsys[record.ScriptTag]: record.Script.DefaultLangSys = None @_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS")) def prune_features(self): """Remove unreferenced features""" if self.table.ScriptList: feature_indices = self.table.ScriptList.collect_features() else: feature_indices = [] if self.table.FeatureList: self.table.FeatureList.subset_features(feature_indices) if getattr(self.table, "FeatureVariations", None): self.table.FeatureVariations.subset_features(feature_indices) if self.table.ScriptList: self.table.ScriptList.subset_features( feature_indices, self.retain_empty_scripts() ) @_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS")) def prune_pre_subset(self, font, options): # Drop undesired features if "*" not in options.layout_scripts: self.subset_script_tags(options.layout_scripts) if "*" not in options.layout_features: self.subset_feature_tags(options.layout_features) # Neuter unreferenced lookups self.prune_lookups(remap=False) return True @_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS")) def remove_redundant_langsys(self): table = self.table if not table.ScriptList or not table.FeatureList: return features = table.FeatureList.FeatureRecord for s in table.ScriptList.ScriptRecord: d = s.Script.DefaultLangSys if not d: continue for lr in s.Script.LangSysRecord[:]: l = lr.LangSys # Compare d and l if len(d.FeatureIndex) != len(l.FeatureIndex): continue if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535): continue if d.ReqFeatureIndex != 65535: if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]: continue for i in range(len(d.FeatureIndex)): if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]: break else: # LangSys and default are equal; delete LangSys s.Script.LangSysRecord.remove(lr) @_add_method(ttLib.getTableClass("GSUB"), ttLib.getTableClass("GPOS")) def prune_post_subset(self, font, options): table = self.table self.prune_lookups() # XXX Is this actually needed?! if table.LookupList: table.LookupList.prune_post_subset(font, options) # XXX Next two lines disabled because OTS is stupid and # doesn't like NULL offsets here. # if not table.LookupList.Lookup: # table.LookupList = None if not table.LookupList: table.FeatureList = None if table.FeatureList: self.remove_redundant_langsys() # Remove unreferenced features self.prune_features() # XXX Next two lines disabled because OTS is stupid and # doesn't like NULL offsets here. # if table.FeatureList and not table.FeatureList.FeatureRecord: # table.FeatureList = None # Never drop scripts themselves as them just being available # holds semantic significance. # XXX Next two lines disabled because OTS is stupid and # doesn't like NULL offsets here. # if table.ScriptList and not table.ScriptList.ScriptRecord: # table.ScriptList = None if hasattr(table, "FeatureVariations"): # drop FeatureVariations if there are no features to substitute if table.FeatureVariations and not ( table.FeatureList and table.FeatureVariations.FeatureVariationRecord ): table.FeatureVariations = None # downgrade table version if there are no FeatureVariations if not table.FeatureVariations and table.Version == 0x00010001: table.Version = 0x00010000 return True @_add_method(ttLib.getTableClass("GDEF")) def subset_glyphs(self, s): glyphs = s.glyphs_gsubed table = self.table if table.LigCaretList: indices = table.LigCaretList.Coverage.subset(glyphs) table.LigCaretList.LigGlyph = _list_subset(table.LigCaretList.LigGlyph, indices) table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph) if table.MarkAttachClassDef: table.MarkAttachClassDef.classDefs = { g: v for g, v in table.MarkAttachClassDef.classDefs.items() if g in glyphs } if table.GlyphClassDef: table.GlyphClassDef.classDefs = { g: v for g, v in table.GlyphClassDef.classDefs.items() if g in glyphs } if table.AttachList: indices = table.AttachList.Coverage.subset(glyphs) GlyphCount = table.AttachList.GlyphCount table.AttachList.AttachPoint = [ table.AttachList.AttachPoint[i] for i in indices if i < GlyphCount ] table.AttachList.GlyphCount = len(table.AttachList.AttachPoint) if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef: markGlyphSets = table.MarkGlyphSetsDef for coverage in markGlyphSets.Coverage: if coverage: coverage.subset(glyphs) s.used_mark_sets = [i for i, c in enumerate(markGlyphSets.Coverage) if c.glyphs] markGlyphSets.Coverage = [c for c in markGlyphSets.Coverage if c.glyphs] return True def _pruneGDEF(font): if "GDEF" not in font: return gdef = font["GDEF"] table = gdef.table if not hasattr(table, "VarStore"): return store = table.VarStore usedVarIdxes = set() # Collect. table.collect_device_varidxes(usedVarIdxes) if "GPOS" in font: font["GPOS"].table.collect_device_varidxes(usedVarIdxes) # Subset. varidx_map = store.subset_varidxes(usedVarIdxes) # Map. table.remap_device_varidxes(varidx_map) if "GPOS" in font: font["GPOS"].table.remap_device_varidxes(varidx_map) @_add_method(ttLib.getTableClass("GDEF")) def prune_post_subset(self, font, options): table = self.table # XXX check these against OTS if table.LigCaretList and not table.LigCaretList.LigGlyphCount: table.LigCaretList = None if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs: table.MarkAttachClassDef = None if table.GlyphClassDef and not table.GlyphClassDef.classDefs: table.GlyphClassDef = None if table.AttachList and not table.AttachList.GlyphCount: table.AttachList = None if hasattr(table, "VarStore"): _pruneGDEF(font) if table.VarStore.VarDataCount == 0: if table.Version == 0x00010003: table.Version = 0x00010002 if ( not hasattr(table, "MarkGlyphSetsDef") or not table.MarkGlyphSetsDef or not table.MarkGlyphSetsDef.Coverage ): table.MarkGlyphSetsDef = None if table.Version == 0x00010002: table.Version = 0x00010000 return bool( table.LigCaretList or table.MarkAttachClassDef or table.GlyphClassDef or table.AttachList or (table.Version >= 0x00010002 and table.MarkGlyphSetsDef) or (table.Version >= 0x00010003 and table.VarStore) ) @_add_method(ttLib.getTableClass("kern")) def prune_pre_subset(self, font, options): # Prune unknown kern table types self.kernTables = [t for t in self.kernTables if hasattr(t, "kernTable")] return bool(self.kernTables) @_add_method(ttLib.getTableClass("kern")) def subset_glyphs(self, s): glyphs = s.glyphs_gsubed for t in self.kernTables: t.kernTable = { (a, b): v for (a, b), v in t.kernTable.items() if a in glyphs and b in glyphs } self.kernTables = [t for t in self.kernTables if t.kernTable] return bool(self.kernTables) @_add_method(ttLib.getTableClass("vmtx")) def subset_glyphs(self, s): self.metrics = _dict_subset(self.metrics, s.glyphs) for g in s.glyphs_emptied: self.metrics[g] = (0, 0) return bool(self.metrics) @_add_method(ttLib.getTableClass("hmtx")) def subset_glyphs(self, s): self.metrics = _dict_subset(self.metrics, s.glyphs) for g in s.glyphs_emptied: self.metrics[g] = (0, 0) return True # Required table @_add_method(ttLib.getTableClass("hdmx")) def subset_glyphs(self, s): self.hdmx = {sz: _dict_subset(l, s.glyphs) for sz, l in self.hdmx.items()} for sz in self.hdmx: for g in s.glyphs_emptied: self.hdmx[sz][g] = 0 return bool(self.hdmx) @_add_method(ttLib.getTableClass("ankr")) def subset_glyphs(self, s): table = self.table.AnchorPoints assert table.Format == 0, "unknown 'ankr' format %s" % table.Format table.Anchors = { glyph: table.Anchors[glyph] for glyph in s.glyphs if glyph in table.Anchors } return len(table.Anchors) > 0 @_add_method(ttLib.getTableClass("bsln")) def closure_glyphs(self, s): table = self.table.Baseline if table.Format in (2, 3): s.glyphs.add(table.StandardGlyph) @_add_method(ttLib.getTableClass("bsln")) def subset_glyphs(self, s): table = self.table.Baseline if table.Format in (1, 3): baselines = { glyph: table.BaselineValues.get(glyph, table.DefaultBaseline) for glyph in s.glyphs } if len(baselines) > 0: mostCommon, _cnt = Counter(baselines.values()).most_common(1)[0] table.DefaultBaseline = mostCommon baselines = {glyph: b for glyph, b in baselines.items() if b != mostCommon} if len(baselines) > 0: table.BaselineValues = baselines else: table.Format = {1: 0, 3: 2}[table.Format] del table.BaselineValues return True @_add_method(ttLib.getTableClass("lcar")) def subset_glyphs(self, s): table = self.table.LigatureCarets if table.Format in (0, 1): table.Carets = { glyph: table.Carets[glyph] for glyph in s.glyphs if glyph in table.Carets } return len(table.Carets) > 0 else: assert False, "unknown 'lcar' format %s" % table.Format @_add_method(ttLib.getTableClass("gvar")) def prune_pre_subset(self, font, options): if options.notdef_glyph and not options.notdef_outline: self.variations[font.glyphOrder[0]] = [] return True @_add_method(ttLib.getTableClass("gvar")) def subset_glyphs(self, s): self.variations = _dict_subset(self.variations, s.glyphs) self.glyphCount = len(self.variations) return bool(self.variations) def _remap_index_map(s, varidx_map, table_map): map_ = {k: varidx_map[v] for k, v in table_map.mapping.items()} # Emptied glyphs are remapped to: # if GID <= last retained GID, 0/0: delta set for 0/0 is expected to exist & zeros compress well # if GID > last retained GID, major/minor of the last retained glyph: will be optimized out by table compiler last_idx = varidx_map[table_map.mapping[s.last_retained_glyph]] for g, i in s.reverseEmptiedGlyphMap.items(): map_[g] = last_idx if i > s.last_retained_order else 0 return map_ @_add_method(ttLib.getTableClass("HVAR")) def subset_glyphs(self, s): table = self.table used = set() advIdxes_ = set() retainAdvMap = False if table.AdvWidthMap: table.AdvWidthMap.mapping = _dict_subset(table.AdvWidthMap.mapping, s.glyphs) used.update(table.AdvWidthMap.mapping.values()) else: used.update(s.reverseOrigGlyphMap.values()) advIdxes_ = used.copy() retainAdvMap = s.options.retain_gids if table.LsbMap: table.LsbMap.mapping = _dict_subset(table.LsbMap.mapping, s.glyphs) used.update(table.LsbMap.mapping.values()) if table.RsbMap: table.RsbMap.mapping = _dict_subset(table.RsbMap.mapping, s.glyphs) used.update(table.RsbMap.mapping.values()) varidx_map = table.VarStore.subset_varidxes( used, retainFirstMap=retainAdvMap, advIdxes=advIdxes_ ) if table.AdvWidthMap: table.AdvWidthMap.mapping = _remap_index_map(s, varidx_map, table.AdvWidthMap) if table.LsbMap: table.LsbMap.mapping = _remap_index_map(s, varidx_map, table.LsbMap) if table.RsbMap: table.RsbMap.mapping = _remap_index_map(s, varidx_map, table.RsbMap) # TODO Return emptiness... return True @_add_method(ttLib.getTableClass("VVAR")) def subset_glyphs(self, s): table = self.table used = set() advIdxes_ = set() retainAdvMap = False if table.AdvHeightMap: table.AdvHeightMap.mapping = _dict_subset(table.AdvHeightMap.mapping, s.glyphs) used.update(table.AdvHeightMap.mapping.values()) else: used.update(s.reverseOrigGlyphMap.values()) advIdxes_ = used.copy() retainAdvMap = s.options.retain_gids if table.TsbMap: table.TsbMap.mapping = _dict_subset(table.TsbMap.mapping, s.glyphs) used.update(table.TsbMap.mapping.values()) if table.BsbMap: table.BsbMap.mapping = _dict_subset(table.BsbMap.mapping, s.glyphs) used.update(table.BsbMap.mapping.values()) if table.VOrgMap: table.VOrgMap.mapping = _dict_subset(table.VOrgMap.mapping, s.glyphs) used.update(table.VOrgMap.mapping.values()) varidx_map = table.VarStore.subset_varidxes( used, retainFirstMap=retainAdvMap, advIdxes=advIdxes_ ) if table.AdvHeightMap: table.AdvHeightMap.mapping = _remap_index_map(s, varidx_map, table.AdvHeightMap) if table.TsbMap: table.TsbMap.mapping = _remap_index_map(s, varidx_map, table.TsbMap) if table.BsbMap: table.BsbMap.mapping = _remap_index_map(s, varidx_map, table.BsbMap) if table.VOrgMap: table.VOrgMap.mapping = _remap_index_map(s, varidx_map, table.VOrgMap) # TODO Return emptiness... return True @_add_method(ttLib.getTableClass("VORG")) def subset_glyphs(self, s): self.VOriginRecords = { g: v for g, v in self.VOriginRecords.items() if g in s.glyphs } self.numVertOriginYMetrics = len(self.VOriginRecords) return True # Never drop; has default metrics @_add_method(ttLib.getTableClass("opbd")) def subset_glyphs(self, s): table = self.table.OpticalBounds if table.Format == 0: table.OpticalBoundsDeltas = { glyph: table.OpticalBoundsDeltas[glyph] for glyph in s.glyphs if glyph in table.OpticalBoundsDeltas } return len(table.OpticalBoundsDeltas) > 0 elif table.Format == 1: table.OpticalBoundsPoints = { glyph: table.OpticalBoundsPoints[glyph] for glyph in s.glyphs if glyph in table.OpticalBoundsPoints } return len(table.OpticalBoundsPoints) > 0 else: assert False, "unknown 'opbd' format %s" % table.Format @_add_method(ttLib.getTableClass("post")) def prune_pre_subset(self, font, options): if not options.glyph_names: self.formatType = 3.0 return True # Required table @_add_method(ttLib.getTableClass("post")) def subset_glyphs(self, s): self.extraNames = [] # This seems to do it return True # Required table @_add_method(ttLib.getTableClass("prop")) def subset_glyphs(self, s): prop = self.table.GlyphProperties if prop.Format == 0: return prop.DefaultProperties != 0 elif prop.Format == 1: prop.Properties = { g: prop.Properties.get(g, prop.DefaultProperties) for g in s.glyphs } mostCommon, _cnt = Counter(prop.Properties.values()).most_common(1)[0] prop.DefaultProperties = mostCommon prop.Properties = { g: prop for g, prop in prop.Properties.items() if prop != mostCommon } if len(prop.Properties) == 0: del prop.Properties prop.Format = 0 return prop.DefaultProperties != 0 return True else: assert False, "unknown 'prop' format %s" % prop.Format def _paint_glyph_names(paint, colr): result = set() def callback(paint): if paint.Format in { otTables.PaintFormat.PaintGlyph, otTables.PaintFormat.PaintColrGlyph, }: result.add(paint.Glyph) paint.traverse(colr, callback) return result @_add_method(ttLib.getTableClass("COLR")) def closure_glyphs(self, s): if self.version > 0: # on decompiling COLRv1, we only keep around the raw otTables # but for subsetting we need dicts with fully decompiled layers; # we store them temporarily in the C_O_L_R_ instance and delete # them after we have finished subsetting. self.ColorLayers = self._decompileColorLayersV0(self.table) self.ColorLayersV1 = { rec.BaseGlyph: rec.Paint for rec in self.table.BaseGlyphList.BaseGlyphPaintRecord } decompose = s.glyphs while decompose: layers = set() for g in decompose: for layer in self.ColorLayers.get(g, []): layers.add(layer.name) if self.version > 0: paint = self.ColorLayersV1.get(g) if paint is not None: layers.update(_paint_glyph_names(paint, self.table)) layers -= s.glyphs s.glyphs.update(layers) decompose = layers @_add_method(ttLib.getTableClass("COLR")) def subset_glyphs(self, s): from fontTools.colorLib.unbuilder import unbuildColrV1 from fontTools.colorLib.builder import buildColrV1, populateCOLRv0 # only include glyphs after COLR closure, which in turn comes after cmap and GSUB # closure, but importantly before glyf/CFF closures. COLR layers can refer to # composite glyphs, and that's ok, since glyf/CFF closures happen after COLR closure # and take care of those. If we also included glyphs resulting from glyf/CFF closures # when deciding which COLR base glyphs to retain, then we may end up with a situation # whereby a COLR base glyph is kept, not because directly requested (cmap) # or substituted (GSUB) or referenced by another COLRv1 PaintColrGlyph, but because # it corresponds to (has same GID as) a non-COLR glyph that happens to be used as a # component in glyf or CFF table. Best case scenario we retain more glyphs than # required; worst case we retain incomplete COLR records that try to reference # glyphs that are no longer in the final subset font. # https://github.com/fonttools/fonttools/issues/2461 s.glyphs = s.glyphs_colred self.ColorLayers = { g: self.ColorLayers[g] for g in s.glyphs if g in self.ColorLayers } if self.version == 0: return bool(self.ColorLayers) colorGlyphsV1 = unbuildColrV1(self.table.LayerList, self.table.BaseGlyphList) self.table.LayerList, self.table.BaseGlyphList = buildColrV1( {g: colorGlyphsV1[g] for g in colorGlyphsV1 if g in s.glyphs} ) del self.ColorLayersV1 if self.table.ClipList is not None: clips = self.table.ClipList.clips self.table.ClipList.clips = {g: clips[g] for g in clips if g in s.glyphs} layersV0 = self.ColorLayers if not self.table.BaseGlyphList.BaseGlyphPaintRecord: # no more COLRv1 glyphs: downgrade to version 0 self.version = 0 del self.table return bool(layersV0) populateCOLRv0( self.table, {g: [(layer.name, layer.colorID) for layer in layersV0[g]] for g in layersV0}, ) del self.ColorLayers # TODO: also prune ununsed varIndices in COLR.VarStore return True @_add_method(ttLib.getTableClass("CPAL")) def prune_post_subset(self, font, options): # Keep whole "CPAL" if "SVG " is present as it may be referenced by the latter # via 'var(--color{palette_entry_index}, ...)' CSS color variables. # For now we just assume this is the case by the mere presence of "SVG " table, # for parsing SVG to collect all the used indices is too much work... # TODO(anthrotype): Do The Right Thing (TM). if "SVG " in font: return True colr = font.get("COLR") if not colr: # drop CPAL if COLR was subsetted to empty return False colors_by_index = defaultdict(list) def collect_colors_by_index(paint): if hasattr(paint, "PaletteIndex"): # either solid colors... colors_by_index[paint.PaletteIndex].append(paint) elif hasattr(paint, "ColorLine"): # ... or gradient color stops for stop in paint.ColorLine.ColorStop: colors_by_index[stop.PaletteIndex].append(stop) if colr.version == 0: for layers in colr.ColorLayers.values(): for layer in layers: colors_by_index[layer.colorID].append(layer) else: if colr.table.LayerRecordArray: for layer in colr.table.LayerRecordArray.LayerRecord: colors_by_index[layer.PaletteIndex].append(layer) for record in colr.table.BaseGlyphList.BaseGlyphPaintRecord: record.Paint.traverse(colr.table, collect_colors_by_index) # don't remap palette entry index 0xFFFF, this is always the foreground color # https://github.com/fonttools/fonttools/issues/2257 retained_palette_indices = set(colors_by_index.keys()) - {0xFFFF} for palette in self.palettes: palette[:] = [c for i, c in enumerate(palette) if i in retained_palette_indices] assert len(palette) == len(retained_palette_indices) for new_index, old_index in enumerate(sorted(retained_palette_indices)): for record in colors_by_index[old_index]: if hasattr(record, "colorID"): # v0 record.colorID = new_index elif hasattr(record, "PaletteIndex"): # v1 record.PaletteIndex = new_index else: raise AssertionError(record) self.numPaletteEntries = len(self.palettes[0]) if self.version == 1: kept_labels = [] for i, label in enumerate(self.paletteEntryLabels): if i in retained_palette_indices: kept_labels.append(label) self.paletteEntryLabels = kept_labels return bool(self.numPaletteEntries) @_add_method(otTables.MathGlyphConstruction) def closure_glyphs(self, glyphs): variants = set() for v in self.MathGlyphVariantRecord: variants.add(v.VariantGlyph) if self.GlyphAssembly: for p in self.GlyphAssembly.PartRecords: variants.add(p.glyph) return variants @_add_method(otTables.MathVariants) def closure_glyphs(self, s): glyphs = frozenset(s.glyphs) variants = set() if self.VertGlyphCoverage: indices = self.VertGlyphCoverage.intersect(glyphs) for i in indices: variants.update(self.VertGlyphConstruction[i].closure_glyphs(glyphs)) if self.HorizGlyphCoverage: indices = self.HorizGlyphCoverage.intersect(glyphs) for i in indices: variants.update(self.HorizGlyphConstruction[i].closure_glyphs(glyphs)) s.glyphs.update(variants) @_add_method(ttLib.getTableClass("MATH")) def closure_glyphs(self, s): if self.table.MathVariants: self.table.MathVariants.closure_glyphs(s) @_add_method(otTables.MathItalicsCorrectionInfo) def subset_glyphs(self, s): indices = self.Coverage.subset(s.glyphs) self.ItalicsCorrection = _list_subset(self.ItalicsCorrection, indices) self.ItalicsCorrectionCount = len(self.ItalicsCorrection) return bool(self.ItalicsCorrectionCount) @_add_method(otTables.MathTopAccentAttachment) def subset_glyphs(self, s): indices = self.TopAccentCoverage.subset(s.glyphs) self.TopAccentAttachment = _list_subset(self.TopAccentAttachment, indices) self.TopAccentAttachmentCount = len(self.TopAccentAttachment) return bool(self.TopAccentAttachmentCount) @_add_method(otTables.MathKernInfo) def subset_glyphs(self, s): indices = self.MathKernCoverage.subset(s.glyphs) self.MathKernInfoRecords = _list_subset(self.MathKernInfoRecords, indices) self.MathKernCount = len(self.MathKernInfoRecords) return bool(self.MathKernCount) @_add_method(otTables.MathGlyphInfo) def subset_glyphs(self, s): if self.MathItalicsCorrectionInfo: self.MathItalicsCorrectionInfo.subset_glyphs(s) if self.MathTopAccentAttachment: self.MathTopAccentAttachment.subset_glyphs(s) if self.MathKernInfo: self.MathKernInfo.subset_glyphs(s) if self.ExtendedShapeCoverage: self.ExtendedShapeCoverage.subset(s.glyphs) return True @_add_method(otTables.MathVariants) def subset_glyphs(self, s): if self.VertGlyphCoverage: indices = self.VertGlyphCoverage.subset(s.glyphs) self.VertGlyphConstruction = _list_subset(self.VertGlyphConstruction, indices) self.VertGlyphCount = len(self.VertGlyphConstruction) if self.HorizGlyphCoverage: indices = self.HorizGlyphCoverage.subset(s.glyphs) self.HorizGlyphConstruction = _list_subset(self.HorizGlyphConstruction, indices) self.HorizGlyphCount = len(self.HorizGlyphConstruction) return True @_add_method(ttLib.getTableClass("MATH")) def subset_glyphs(self, s): s.glyphs = s.glyphs_mathed if self.table.MathGlyphInfo: self.table.MathGlyphInfo.subset_glyphs(s) if self.table.MathVariants: self.table.MathVariants.subset_glyphs(s) return True @_add_method(ttLib.getTableModule("glyf").Glyph) def remapComponentsFast(self, glyphidmap): if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: return # Not composite data = self.data = bytearray(self.data) i = 10 more = 1 while more: flags = (data[i] << 8) | data[i + 1] glyphID = (data[i + 2] << 8) | data[i + 3] # Remap glyphID = glyphidmap[glyphID] data[i + 2] = glyphID >> 8 data[i + 3] = glyphID & 0xFF i += 4 flags = int(flags) if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS else: i += 2 if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO more = flags & 0x0020 # MORE_COMPONENTS @_add_method(ttLib.getTableClass("glyf")) def closure_glyphs(self, s): glyphSet = self.glyphs decompose = s.glyphs while decompose: components = set() for g in decompose: if g not in glyphSet: continue gl = glyphSet[g] for c in gl.getComponentNames(self): components.add(c) components -= s.glyphs s.glyphs.update(components) decompose = components @_add_method(ttLib.getTableClass("glyf")) def prune_pre_subset(self, font, options): if options.notdef_glyph and not options.notdef_outline: g = self[self.glyphOrder[0]] # Yay, easy! g.__dict__.clear() g.data = b"" return True @_add_method(ttLib.getTableClass("glyf")) def subset_glyphs(self, s): self.glyphs = _dict_subset(self.glyphs, s.glyphs) if not s.options.retain_gids: indices = [i for i, g in enumerate(self.glyphOrder) if g in s.glyphs] glyphmap = {o: n for n, o in enumerate(indices)} for v in self.glyphs.values(): if hasattr(v, "data"): v.remapComponentsFast(glyphmap) Glyph = ttLib.getTableModule("glyf").Glyph for g in s.glyphs_emptied: self.glyphs[g] = Glyph() self.glyphs[g].data = b"" self.glyphOrder = [ g for g in self.glyphOrder if g in s.glyphs or g in s.glyphs_emptied ] # Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset. return True @_add_method(ttLib.getTableClass("glyf")) def prune_post_subset(self, font, options): remove_hinting = not options.hinting for v in self.glyphs.values(): v.trim(remove_hinting=remove_hinting) return True @_add_method(ttLib.getTableClass("cmap")) def closure_glyphs(self, s): tables = [t for t in self.tables if t.isUnicode()] # Close glyphs for table in tables: if table.format == 14: for cmap in table.uvsDict.values(): glyphs = {g for u, g in cmap if u in s.unicodes_requested} if None in glyphs: glyphs.remove(None) s.glyphs.update(glyphs) else: cmap = table.cmap intersection = s.unicodes_requested.intersection(cmap.keys()) s.glyphs.update(cmap[u] for u in intersection) # Calculate unicodes_missing s.unicodes_missing = s.unicodes_requested.copy() for table in tables: s.unicodes_missing.difference_update(table.cmap) @_add_method(ttLib.getTableClass("cmap")) def prune_pre_subset(self, font, options): if not options.legacy_cmap: # Drop non-Unicode / non-Symbol cmaps self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()] if not options.symbol_cmap: self.tables = [t for t in self.tables if not t.isSymbol()] # TODO(behdad) Only keep one subtable? # For now, drop format=0 which can't be subset_glyphs easily? self.tables = [t for t in self.tables if t.format != 0] self.numSubTables = len(self.tables) return True # Required table @_add_method(ttLib.getTableClass("cmap")) def subset_glyphs(self, s): s.glyphs = None # We use s.glyphs_requested and s.unicodes_requested only tables_format12_bmp = [] table_plat0_enc3 = {} # Unicode platform, Unicode BMP only, keyed by language table_plat3_enc1 = {} # Windows platform, Unicode BMP, keyed by language for t in self.tables: if t.platformID == 0 and t.platEncID == 3: table_plat0_enc3[t.language] = t if t.platformID == 3 and t.platEncID == 1: table_plat3_enc1[t.language] = t if t.format == 14: # TODO(behdad) We drop all the default-UVS mappings # for glyphs_requested. So it's the caller's responsibility to make # sure those are included. t.uvsDict = { v: [ (u, g) for u, g in l if g in s.glyphs_requested or u in s.unicodes_requested ] for v, l in t.uvsDict.items() } t.uvsDict = {v: l for v, l in t.uvsDict.items() if l} elif t.isUnicode(): t.cmap = { u: g for u, g in t.cmap.items() if g in s.glyphs_requested or u in s.unicodes_requested } # Collect format 12 tables that hold only basic multilingual plane # codepoints. if t.format == 12 and t.cmap and max(t.cmap.keys()) < 0x10000: tables_format12_bmp.append(t) else: t.cmap = {u: g for u, g in t.cmap.items() if g in s.glyphs_requested} # Fomat 12 tables are redundant if they contain just the same BMP codepoints # their little BMP-only encoding siblings contain. for t in tables_format12_bmp: if ( t.platformID == 0 # Unicode platform and t.platEncID == 4 # Unicode full repertoire and t.language in table_plat0_enc3 # Have a BMP-only sibling? and table_plat0_enc3[t.language].cmap == t.cmap ): t.cmap.clear() elif ( t.platformID == 3 # Windows platform and t.platEncID == 10 # Unicode full repertoire and t.language in table_plat3_enc1 # Have a BMP-only sibling? and table_plat3_enc1[t.language].cmap == t.cmap ): t.cmap.clear() self.tables = [t for t in self.tables if (t.cmap if t.format != 14 else t.uvsDict)] self.numSubTables = len(self.tables) # TODO(behdad) Convert formats when needed. # In particular, if we have a format=12 without non-BMP # characters, convert it to format=4 if there's not one. return True # Required table @_add_method(ttLib.getTableClass("DSIG")) def prune_pre_subset(self, font, options): # Drop all signatures since they will be invalid self.usNumSigs = 0 self.signatureRecords = [] return True @_add_method(ttLib.getTableClass("maxp")) def prune_pre_subset(self, font, options): if not options.hinting: if self.tableVersion == 0x00010000: self.maxZones = 1 self.maxTwilightPoints = 0 self.maxStorage = 0 self.maxFunctionDefs = 0 self.maxInstructionDefs = 0 self.maxStackElements = 0 self.maxSizeOfInstructions = 0 return True @_add_method(ttLib.getTableClass("name")) def prune_post_subset(self, font, options): visitor = NameRecordVisitor() visitor.visit(font) nameIDs = set(options.name_IDs) | visitor.seen if "*" not in options.name_IDs: self.names = [n for n in self.names if n.nameID in nameIDs] if not options.name_legacy: # TODO(behdad) Sometimes (eg Apple Color Emoji) there's only a macroman # entry for Latin and no Unicode names. self.names = [n for n in self.names if n.isUnicode()] # TODO(behdad) Option to keep only one platform's if "*" not in options.name_languages: # TODO(behdad) This is Windows-platform specific! self.names = [n for n in self.names if n.langID in options.name_languages] if options.obfuscate_names: namerecs = [] for n in self.names: if n.nameID in [1, 4]: n.string = ".\x7f".encode("utf_16_be") if n.isUnicode() else ".\x7f" elif n.nameID in [2, 6]: n.string = "\x7f".encode("utf_16_be") if n.isUnicode() else "\x7f" elif n.nameID == 3: n.string = "" elif n.nameID in [16, 17, 18]: continue namerecs.append(n) self.names = namerecs return True # Required table @_add_method(ttLib.getTableClass("head")) def prune_post_subset(self, font, options): # Force re-compiling head table, to update any recalculated values. return True # TODO(behdad) OS/2 ulCodePageRange? # TODO(behdad) Drop AAT tables. # TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries. # TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left # TODO(behdad) Drop GDEF subitems if unused by lookups # TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF) # TODO(behdad) Text direction considerations. # TODO(behdad) Text script / language considerations. # TODO(behdad) Optionally drop 'kern' table if GPOS available # TODO(behdad) Implement --unicode='*' to choose all cmap'ed # TODO(behdad) Drop old-spec Indic scripts class Options(object): class OptionError(Exception): pass class UnknownOptionError(OptionError): pass # spaces in tag names (e.g. "SVG ", "cvt ") are stripped by the argument parser _drop_tables_default = [ "BASE", "JSTF", "DSIG", "EBDT", "EBLC", "EBSC", "PCLT", "LTSH", ] _drop_tables_default += ["Feat", "Glat", "Gloc", "Silf", "Sill"] # Graphite _no_subset_tables_default = [ "avar", "fvar", "gasp", "head", "hhea", "maxp", "vhea", "OS/2", "loca", "name", "cvt", "fpgm", "prep", "VDMX", "DSIG", "CPAL", "MVAR", "cvar", "STAT", ] _hinting_tables_default = ["cvt", "cvar", "fpgm", "prep", "hdmx", "VDMX"] # Based on HarfBuzz shapers _layout_features_groups = { # Default shaper "common": ["rvrn", "ccmp", "liga", "locl", "mark", "mkmk", "rlig"], "fractions": ["frac", "numr", "dnom"], "horizontal": ["calt", "clig", "curs", "kern", "rclt"], "vertical": ["valt", "vert", "vkrn", "vpal", "vrt2"], "ltr": ["ltra", "ltrm"], "rtl": ["rtla", "rtlm"], "rand": ["rand"], "justify": ["jalt"], "private": ["Harf", "HARF", "Buzz", "BUZZ"], "east_asian_spacing": ["chws", "vchw", "halt", "vhal"], # Complex shapers "arabic": [ "init", "medi", "fina", "isol", "med2", "fin2", "fin3", "cswh", "mset", "stch", ], "hangul": ["ljmo", "vjmo", "tjmo"], "tibetan": ["abvs", "blws", "abvm", "blwm"], "indic": [ "nukt", "akhn", "rphf", "rkrf", "pref", "blwf", "half", "abvf", "pstf", "cfar", "vatu", "cjct", "init", "pres", "abvs", "blws", "psts", "haln", "dist", "abvm", "blwm", ], } _layout_features_default = _uniq_sort( sum(iter(_layout_features_groups.values()), []) ) def __init__(self, **kwargs): self.drop_tables = self._drop_tables_default[:] self.no_subset_tables = self._no_subset_tables_default[:] self.passthrough_tables = False # keep/drop tables we can't subset self.hinting_tables = self._hinting_tables_default[:] self.legacy_kern = False # drop 'kern' table if GPOS available self.layout_closure = True self.layout_features = self._layout_features_default[:] self.layout_scripts = ["*"] self.ignore_missing_glyphs = False self.ignore_missing_unicodes = True self.hinting = True self.glyph_names = False self.legacy_cmap = False self.symbol_cmap = False self.name_IDs = [ 0, 1, 2, 3, 4, 5, 6, ] # https://github.com/fonttools/fonttools/issues/1170#issuecomment-364631225 self.name_legacy = False self.name_languages = [0x0409] # English self.obfuscate_names = False # to make webfont unusable as a system font self.retain_gids = False self.notdef_glyph = True # gid0 for TrueType / .notdef for CFF self.notdef_outline = False # No need for notdef to have an outline really self.recommended_glyphs = False # gid1, gid2, gid3 for TrueType self.recalc_bounds = False # Recalculate font bounding boxes self.recalc_timestamp = False # Recalculate font modified timestamp self.prune_unicode_ranges = True # Clear unused 'ulUnicodeRange' bits self.prune_codepage_ranges = True # Clear unused 'ulCodePageRange' bits self.recalc_average_width = False # update 'xAvgCharWidth' self.recalc_max_context = False # update 'usMaxContext' self.canonical_order = None # Order tables as recommended self.flavor = None # May be 'woff' or 'woff2' self.with_zopfli = False # use zopfli instead of zlib for WOFF 1.0 self.desubroutinize = False # Desubroutinize CFF CharStrings self.harfbuzz_repacker = USE_HARFBUZZ_REPACKER.default self.verbose = False self.timing = False self.xml = False self.font_number = -1 self.pretty_svg = False self.lazy = True self.set(**kwargs) def set(self, **kwargs): for k, v in kwargs.items(): if not hasattr(self, k): raise self.UnknownOptionError("Unknown option '%s'" % k) setattr(self, k, v) def parse_opts(self, argv, ignore_unknown=[]): posargs = [] passthru_options = [] for a in argv: orig_a = a if not a.startswith("--"): posargs.append(a) continue a = a[2:] i = a.find("=") op = "=" if i == -1: if a.startswith("no-"): k = a[3:] if k == "canonical-order": # reorderTables=None is faster than False (the latter # still reorders to "keep" the original table order) v = None else: v = False else: k = a v = True if k.endswith("?"): k = k[:-1] v = "?" else: k = a[:i] if k[-1] in "-+": op = k[-1] + "=" # Op is '-=' or '+=' now. k = k[:-1] v = a[i + 1 :] ok = k k = k.replace("-", "_") if not hasattr(self, k): if ignore_unknown is True or ok in ignore_unknown: passthru_options.append(orig_a) continue else: raise self.UnknownOptionError("Unknown option '%s'" % a) ov = getattr(self, k) if v == "?": print("Current setting for '%s' is: %s" % (ok, ov)) continue if isinstance(ov, bool): v = bool(v) elif isinstance(ov, int): v = int(v) elif isinstance(ov, str): v = str(v) # redundant elif isinstance(ov, list): if isinstance(v, bool): raise self.OptionError( "Option '%s' requires values to be specified using '='" % a ) vv = v.replace(",", " ").split() if vv == [""]: vv = [] vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] if op == "=": v = vv elif op == "+=": v = ov v.extend(vv) elif op == "-=": v = ov for x in vv: if x in v: v.remove(x) else: assert False setattr(self, k, v) return posargs + passthru_options class Subsetter(object): class SubsettingError(Exception): pass class MissingGlyphsSubsettingError(SubsettingError): pass class MissingUnicodesSubsettingError(SubsettingError): pass def __init__(self, options=None): if not options: options = Options() self.options = options self.unicodes_requested = set() self.glyph_names_requested = set() self.glyph_ids_requested = set() def populate(self, glyphs=[], gids=[], unicodes=[], text=""): self.unicodes_requested.update(unicodes) if isinstance(text, bytes): text = text.decode("utf_8") text_utf32 = text.encode("utf-32-be") nchars = len(text_utf32) // 4 for u in struct.unpack(">%dL" % nchars, text_utf32): self.unicodes_requested.add(u) self.glyph_names_requested.update(glyphs) self.glyph_ids_requested.update(gids) def _prune_pre_subset(self, font): for tag in self._sort_tables(font): if ( tag.strip() in self.options.drop_tables or ( tag.strip() in self.options.hinting_tables and not self.options.hinting ) or (tag == "kern" and (not self.options.legacy_kern and "GPOS" in font)) ): log.info("%s dropped", tag) del font[tag] continue clazz = ttLib.getTableClass(tag) if hasattr(clazz, "prune_pre_subset"): with timer("load '%s'" % tag): table = font[tag] with timer("prune '%s'" % tag): retain = table.prune_pre_subset(font, self.options) if not retain: log.info("%s pruned to empty; dropped", tag) del font[tag] continue else: log.info("%s pruned", tag) def _closure_glyphs(self, font): realGlyphs = set(font.getGlyphOrder()) self.orig_glyph_order = glyph_order = font.getGlyphOrder() self.glyphs_requested = set() self.glyphs_requested.update(self.glyph_names_requested) self.glyphs_requested.update( glyph_order[i] for i in self.glyph_ids_requested if i < len(glyph_order) ) self.glyphs_missing = set() self.glyphs_missing.update(self.glyphs_requested.difference(realGlyphs)) self.glyphs_missing.update( i for i in self.glyph_ids_requested if i >= len(glyph_order) ) if self.glyphs_missing: log.info("Missing requested glyphs: %s", self.glyphs_missing) if not self.options.ignore_missing_glyphs: raise self.MissingGlyphsSubsettingError(self.glyphs_missing) self.glyphs = self.glyphs_requested.copy() self.unicodes_missing = set() if "cmap" in font: with timer("close glyph list over 'cmap'"): font["cmap"].closure_glyphs(self) self.glyphs.intersection_update(realGlyphs) self.glyphs_cmaped = frozenset(self.glyphs) if self.unicodes_missing: missing = ["U+%04X" % u for u in self.unicodes_missing] log.info("Missing glyphs for requested Unicodes: %s", missing) if not self.options.ignore_missing_unicodes: raise self.MissingUnicodesSubsettingError(missing) del missing if self.options.notdef_glyph: if "glyf" in font: self.glyphs.add(font.getGlyphName(0)) log.info("Added gid0 to subset") else: self.glyphs.add(".notdef") log.info("Added .notdef to subset") if self.options.recommended_glyphs: if "glyf" in font: for i in range(min(4, len(font.getGlyphOrder()))): self.glyphs.add(font.getGlyphName(i)) log.info("Added first four glyphs to subset") if self.options.layout_closure and "GSUB" in font: with timer("close glyph list over 'GSUB'"): log.info( "Closing glyph list over 'GSUB': %d glyphs before", len(self.glyphs) ) log.glyphs(self.glyphs, font=font) font["GSUB"].closure_glyphs(self) self.glyphs.intersection_update(realGlyphs) log.info( "Closed glyph list over 'GSUB': %d glyphs after", len(self.glyphs) ) log.glyphs(self.glyphs, font=font) self.glyphs_gsubed = frozenset(self.glyphs) if "MATH" in font: with timer("close glyph list over 'MATH'"): log.info( "Closing glyph list over 'MATH': %d glyphs before", len(self.glyphs) ) log.glyphs(self.glyphs, font=font) font["MATH"].closure_glyphs(self) self.glyphs.intersection_update(realGlyphs) log.info( "Closed glyph list over 'MATH': %d glyphs after", len(self.glyphs) ) log.glyphs(self.glyphs, font=font) self.glyphs_mathed = frozenset(self.glyphs) for table in ("COLR", "bsln"): if table in font: with timer("close glyph list over '%s'" % table): log.info( "Closing glyph list over '%s': %d glyphs before", table, len(self.glyphs), ) log.glyphs(self.glyphs, font=font) font[table].closure_glyphs(self) self.glyphs.intersection_update(realGlyphs) log.info( "Closed glyph list over '%s': %d glyphs after", table, len(self.glyphs), ) log.glyphs(self.glyphs, font=font) setattr(self, f"glyphs_{table.lower()}ed", frozenset(self.glyphs)) if "glyf" in font: with timer("close glyph list over 'glyf'"): log.info( "Closing glyph list over 'glyf': %d glyphs before", len(self.glyphs) ) log.glyphs(self.glyphs, font=font) font["glyf"].closure_glyphs(self) self.glyphs.intersection_update(realGlyphs) log.info( "Closed glyph list over 'glyf': %d glyphs after", len(self.glyphs) ) log.glyphs(self.glyphs, font=font) self.glyphs_glyfed = frozenset(self.glyphs) if "CFF " in font: with timer("close glyph list over 'CFF '"): log.info( "Closing glyph list over 'CFF ': %d glyphs before", len(self.glyphs) ) log.glyphs(self.glyphs, font=font) font["CFF "].closure_glyphs(self) self.glyphs.intersection_update(realGlyphs) log.info( "Closed glyph list over 'CFF ': %d glyphs after", len(self.glyphs) ) log.glyphs(self.glyphs, font=font) self.glyphs_cffed = frozenset(self.glyphs) self.glyphs_retained = frozenset(self.glyphs) order = font.getReverseGlyphMap() self.reverseOrigGlyphMap = {g: order[g] for g in self.glyphs_retained} self.last_retained_order = max(self.reverseOrigGlyphMap.values()) self.last_retained_glyph = font.getGlyphOrder()[self.last_retained_order] self.glyphs_emptied = frozenset() if self.options.retain_gids: self.glyphs_emptied = { g for g in realGlyphs - self.glyphs_retained if order[g] <= self.last_retained_order } self.reverseEmptiedGlyphMap = {g: order[g] for g in self.glyphs_emptied} if not self.options.retain_gids: new_glyph_order = [g for g in glyph_order if g in self.glyphs_retained] else: new_glyph_order = [ g for g in glyph_order if font.getGlyphID(g) <= self.last_retained_order ] # We'll call font.setGlyphOrder() at the end of _subset_glyphs when all # tables have been subsetted. Below, we use the new glyph order to get # a map from old to new glyph indices, which can be useful when # subsetting individual tables (e.g. SVG) that refer to GIDs. self.new_glyph_order = new_glyph_order self.glyph_index_map = { order[new_glyph_order[i]]: i for i in range(len(new_glyph_order)) } log.info("Retaining %d glyphs", len(self.glyphs_retained)) del self.glyphs def _subset_glyphs(self, font): self.used_mark_sets = [] for tag in self._sort_tables(font): clazz = ttLib.getTableClass(tag) if tag.strip() in self.options.no_subset_tables: log.info("%s subsetting not needed", tag) elif hasattr(clazz, "subset_glyphs"): with timer("subset '%s'" % tag): table = font[tag] self.glyphs = self.glyphs_retained retain = table.subset_glyphs(self) del self.glyphs if not retain: log.info("%s subsetted to empty; dropped", tag) del font[tag] else: log.info("%s subsetted", tag) elif self.options.passthrough_tables: log.info("%s NOT subset; don't know how to subset", tag) else: log.warning("%s NOT subset; don't know how to subset; dropped", tag) del font[tag] with timer("subset GlyphOrder"): font.setGlyphOrder(self.new_glyph_order) def _prune_post_subset(self, font): tableTags = font.keys() # Prune the name table last because when we're pruning the name table, # we visit each table in the font to see what name table records are # still in use. if "name" in tableTags: tableTags.remove("name") tableTags.append("name") for tag in tableTags: if tag == "GlyphOrder": continue if tag == "OS/2": if self.options.prune_unicode_ranges: old_uniranges = font[tag].getUnicodeRanges() new_uniranges = font[tag].recalcUnicodeRanges(font, pruneOnly=True) if old_uniranges != new_uniranges: log.info( "%s Unicode ranges pruned: %s", tag, sorted(new_uniranges) ) if self.options.prune_codepage_ranges and font[tag].version >= 1: # codepage range fields were added with OS/2 format 1 # https://learn.microsoft.com/en-us/typography/opentype/spec/os2#version-1 old_codepages = font[tag].getCodePageRanges() new_codepages = font[tag].recalcCodePageRanges(font, pruneOnly=True) if old_codepages != new_codepages: log.info( "%s CodePage ranges pruned: %s", tag, sorted(new_codepages), ) if self.options.recalc_average_width: old_avg_width = font[tag].xAvgCharWidth new_avg_width = font[tag].recalcAvgCharWidth(font) if old_avg_width != new_avg_width: log.info("%s xAvgCharWidth updated: %d", tag, new_avg_width) if self.options.recalc_max_context: max_context = maxCtxFont(font) if max_context != font[tag].usMaxContext: font[tag].usMaxContext = max_context log.info("%s usMaxContext updated: %d", tag, max_context) clazz = ttLib.getTableClass(tag) if hasattr(clazz, "prune_post_subset"): with timer("prune '%s'" % tag): table = font[tag] retain = table.prune_post_subset(font, self.options) if not retain: log.info("%s pruned to empty; dropped", tag) del font[tag] else: log.info("%s pruned", tag) def _sort_tables(self, font): tagOrder = ["GDEF", "GPOS", "GSUB", "fvar", "avar", "gvar", "name", "glyf"] tagOrder = {t: i + 1 for i, t in enumerate(tagOrder)} tags = sorted(font.keys(), key=lambda tag: tagOrder.get(tag, 0)) return [t for t in tags if t != "GlyphOrder"] def subset(self, font): self._prune_pre_subset(font) self._closure_glyphs(font) self._subset_glyphs(font) self._prune_post_subset(font) @timer("load font") def load_font(fontFile, options, checkChecksums=0, dontLoadGlyphNames=False, lazy=True): font = ttLib.TTFont( fontFile, checkChecksums=checkChecksums, recalcBBoxes=options.recalc_bounds, recalcTimestamp=options.recalc_timestamp, lazy=lazy, fontNumber=options.font_number, ) # Hack: # # If we don't need glyph names, change 'post' class to not try to # load them. It avoid lots of headache with broken fonts as well # as loading time. # # Ideally ttLib should provide a way to ask it to skip loading # glyph names. But it currently doesn't provide such a thing. # if dontLoadGlyphNames: post = ttLib.getTableClass("post") saved = post.decode_format_2_0 post.decode_format_2_0 = post.decode_format_3_0 f = font["post"] if f.formatType == 2.0: f.formatType = 3.0 post.decode_format_2_0 = saved return font @timer("compile and save font") def save_font(font, outfile, options): if options.with_zopfli and options.flavor == "woff": from fontTools.ttLib import sfnt sfnt.USE_ZOPFLI = True font.flavor = options.flavor font.cfg[USE_HARFBUZZ_REPACKER] = options.harfbuzz_repacker font.save(outfile, reorderTables=options.canonical_order) def parse_unicodes(s): import re s = re.sub(r"0[xX]", " ", s) s = re.sub(r"[<+>,;&#\\xXuU\n ]", " ", s) l = [] for item in s.split(): fields = item.split("-") if len(fields) == 1: l.append(int(item, 16)) else: start, end = fields l.extend(range(int(start, 16), int(end, 16) + 1)) return l def parse_gids(s): l = [] for item in s.replace(",", " ").split(): fields = item.split("-") if len(fields) == 1: l.append(int(fields[0])) else: l.extend(range(int(fields[0]), int(fields[1]) + 1)) return l def parse_glyphs(s): return s.replace(",", " ").split() def usage(): print("usage:", __usage__, file=sys.stderr) print("Try pyftsubset --help for more information.\n", file=sys.stderr) @timer("make one with everything (TOTAL TIME)") def main(args=None): """OpenType font subsetter and optimizer""" from os.path import splitext from fontTools import configLogger if args is None: args = sys.argv[1:] if "--help" in args: print(__doc__) return 0 options = Options() try: args = options.parse_opts( args, ignore_unknown=[ "gids", "gids-file", "glyphs", "glyphs-file", "text", "text-file", "unicodes", "unicodes-file", "output-file", ], ) except options.OptionError as e: usage() print("ERROR:", e, file=sys.stderr) return 2 if len(args) < 2: usage() return 1 configLogger(level=logging.INFO if options.verbose else logging.WARNING) if options.timing: timer.logger.setLevel(logging.DEBUG) else: timer.logger.disabled = True fontfile = args[0] args = args[1:] subsetter = Subsetter(options=options) outfile = None glyphs = [] gids = [] unicodes = [] wildcard_glyphs = False wildcard_unicodes = False text = "" for g in args: if g == "*": wildcard_glyphs = True continue if g.startswith("--output-file="): outfile = g[14:] continue if g.startswith("--text="): text += g[7:] continue if g.startswith("--text-file="): with open(g[12:], encoding="utf-8") as f: text += f.read().replace("\n", "") continue if g.startswith("--unicodes="): if g[11:] == "*": wildcard_unicodes = True else: unicodes.extend(parse_unicodes(g[11:])) continue if g.startswith("--unicodes-file="): with open(g[16:]) as f: for line in f.readlines(): unicodes.extend(parse_unicodes(line.split("#")[0])) continue if g.startswith("--gids="): gids.extend(parse_gids(g[7:])) continue if g.startswith("--gids-file="): with open(g[12:]) as f: for line in f.readlines(): gids.extend(parse_gids(line.split("#")[0])) continue if g.startswith("--glyphs="): if g[9:] == "*": wildcard_glyphs = True else: glyphs.extend(parse_glyphs(g[9:])) continue if g.startswith("--glyphs-file="): with open(g[14:]) as f: for line in f.readlines(): glyphs.extend(parse_glyphs(line.split("#")[0])) continue glyphs.append(g) dontLoadGlyphNames = not options.glyph_names and not glyphs lazy = options.lazy font = load_font( fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames, lazy=lazy ) if outfile is None: ext = "." + options.flavor.lower() if options.flavor is not None else None outfile = makeOutputFileName( fontfile, extension=ext, overWrite=True, suffix=".subset" ) with timer("compile glyph list"): if wildcard_glyphs: glyphs.extend(font.getGlyphOrder()) if wildcard_unicodes: for t in font["cmap"].tables: if t.isUnicode(): unicodes.extend(t.cmap.keys()) assert "" not in glyphs log.info("Text: '%s'" % text) log.info("Unicodes: %s", unicodes) log.info("Glyphs: %s", glyphs) log.info("Gids: %s", gids) subsetter.populate(glyphs=glyphs, gids=gids, unicodes=unicodes, text=text) subsetter.subset(font) save_font(font, outfile, options) if options.verbose: import os log.info("Input font:% 7d bytes: %s" % (os.path.getsize(fontfile), fontfile)) log.info("Subset font:% 7d bytes: %s" % (os.path.getsize(outfile), outfile)) if options.xml: font.saveXML(sys.stdout) font.close() __all__ = [ "Options", "Subsetter", "load_font", "save_font", "parse_gids", "parse_glyphs", "parse_unicodes", "main", ] PKaZZZ�:��__fontTools/subset/__main__.pyimport sys from fontTools.subset import main if __name__ == "__main__": sys.exit(main()) PKaZZZd�x�I�IfontTools/subset/cff.pyfrom fontTools.misc import psCharStrings from fontTools import ttLib from fontTools.pens.basePen import NullPen from fontTools.misc.roundTools import otRound from fontTools.misc.loggingTools import deprecateFunction from fontTools.subset.util import _add_method, _uniq_sort class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler): def __init__(self, components, localSubrs, globalSubrs): psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs) self.components = components def op_endchar(self, index): args = self.popall() if len(args) >= 4: from fontTools.encodings.StandardEncoding import StandardEncoding # endchar can do seac accent bulding; The T2 spec says it's deprecated, # but recent software that shall remain nameless does output it. adx, ady, bchar, achar = args[-4:] baseGlyph = StandardEncoding[bchar] accentGlyph = StandardEncoding[achar] self.components.add(baseGlyph) self.components.add(accentGlyph) @_add_method(ttLib.getTableClass("CFF ")) def closure_glyphs(self, s): cff = self.cff assert len(cff) == 1 font = cff[cff.keys()[0]] glyphSet = font.CharStrings decompose = s.glyphs while decompose: components = set() for g in decompose: if g not in glyphSet: continue gl = glyphSet[g] subrs = getattr(gl.private, "Subrs", []) decompiler = _ClosureGlyphsT2Decompiler(components, subrs, gl.globalSubrs) decompiler.execute(gl) components -= s.glyphs s.glyphs.update(components) decompose = components def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False): c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName) if isCFF2 or ignoreWidth: # CFF2 charstrings have no widths nor 'endchar' operators c.setProgram([] if isCFF2 else ["endchar"]) else: if hasattr(font, "FDArray") and font.FDArray is not None: private = font.FDArray[fdSelectIndex].Private else: private = font.Private dfltWdX = private.defaultWidthX nmnlWdX = private.nominalWidthX pen = NullPen() c.draw(pen) # this will set the charstring's width if c.width != dfltWdX: c.program = [c.width - nmnlWdX, "endchar"] else: c.program = ["endchar"] @_add_method(ttLib.getTableClass("CFF ")) def prune_pre_subset(self, font, options): cff = self.cff # CFF table must have one font only cff.fontNames = cff.fontNames[:1] if options.notdef_glyph and not options.notdef_outline: isCFF2 = cff.major > 1 for fontname in cff.keys(): font = cff[fontname] _empty_charstring(font, ".notdef", isCFF2=isCFF2) # Clear useless Encoding for fontname in cff.keys(): font = cff[fontname] # https://github.com/fonttools/fonttools/issues/620 font.Encoding = "StandardEncoding" return True # bool(cff.fontNames) @_add_method(ttLib.getTableClass("CFF ")) def subset_glyphs(self, s): cff = self.cff for fontname in cff.keys(): font = cff[fontname] cs = font.CharStrings glyphs = s.glyphs.union(s.glyphs_emptied) # Load all glyphs for g in font.charset: if g not in glyphs: continue c, _ = cs.getItemAndSelector(g) if cs.charStringsAreIndexed: indices = [i for i, g in enumerate(font.charset) if g in glyphs] csi = cs.charStringsIndex csi.items = [csi.items[i] for i in indices] del csi.file, csi.offsets if hasattr(font, "FDSelect"): sel = font.FDSelect sel.format = None sel.gidArray = [sel.gidArray[i] for i in indices] newCharStrings = {} for indicesIdx, charsetIdx in enumerate(indices): g = font.charset[charsetIdx] if g in cs.charStrings: newCharStrings[g] = indicesIdx cs.charStrings = newCharStrings else: cs.charStrings = {g: v for g, v in cs.charStrings.items() if g in glyphs} font.charset = [g for g in font.charset if g in glyphs] font.numGlyphs = len(font.charset) if s.options.retain_gids: isCFF2 = cff.major > 1 for g in s.glyphs_emptied: _empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True) return True # any(cff[fontname].numGlyphs for fontname in cff.keys()) @_add_method(psCharStrings.T2CharString) def subset_subroutines(self, subrs, gsubrs): p = self.program for i in range(1, len(p)): if p[i] == "callsubr": assert isinstance(p[i - 1], int) p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias elif p[i] == "callgsubr": assert isinstance(p[i - 1], int) p[i - 1] = ( gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias ) @_add_method(psCharStrings.T2CharString) def drop_hints(self): hints = self._hints if hints.deletions: p = self.program for idx in reversed(hints.deletions): del p[idx - 2 : idx] if hints.has_hint: assert not hints.deletions or hints.last_hint <= hints.deletions[0] self.program = self.program[hints.last_hint :] if not self.program: # TODO CFF2 no need for endchar. self.program.append("endchar") if hasattr(self, "width"): # Insert width back if needed if self.width != self.private.defaultWidthX: # For CFF2 charstrings, this should never happen assert ( self.private.defaultWidthX is not None ), "CFF2 CharStrings must not have an initial width value" self.program.insert(0, self.width - self.private.nominalWidthX) if hints.has_hintmask: i = 0 p = self.program while i < len(p): if p[i] in ["hintmask", "cntrmask"]: assert i + 1 <= len(p) del p[i : i + 2] continue i += 1 assert len(self.program) del self._hints class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler): def __init__(self, localSubrs, globalSubrs, private): psCharStrings.SimpleT2Decompiler.__init__( self, localSubrs, globalSubrs, private ) for subrs in [localSubrs, globalSubrs]: if subrs and not hasattr(subrs, "_used"): subrs._used = set() def op_callsubr(self, index): self.localSubrs._used.add(self.operandStack[-1] + self.localBias) psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) def op_callgsubr(self, index): self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias) psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor): class Hints(object): def __init__(self): # Whether calling this charstring produces any hint stems # Note that if a charstring starts with hintmask, it will # have has_hint set to True, because it *might* produce an # implicit vstem if called under certain conditions. self.has_hint = False # Index to start at to drop all hints self.last_hint = 0 # Index up to which we know more hints are possible. # Only relevant if status is 0 or 1. self.last_checked = 0 # The status means: # 0: after dropping hints, this charstring is empty # 1: after dropping hints, there may be more hints # continuing after this, or there might be # other things. Not clear yet. # 2: no more hints possible after this charstring self.status = 0 # Has hintmask instructions; not recursive self.has_hintmask = False # List of indices of calls to empty subroutines to remove. self.deletions = [] pass def __init__( self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None ): self._css = css psCharStrings.T2WidthExtractor.__init__( self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX ) self.private = private def execute(self, charString): old_hints = charString._hints if hasattr(charString, "_hints") else None charString._hints = self.Hints() psCharStrings.T2WidthExtractor.execute(self, charString) hints = charString._hints if hints.has_hint or hints.has_hintmask: self._css.add(charString) if hints.status != 2: # Check from last_check, make sure we didn't have any operators. for i in range(hints.last_checked, len(charString.program) - 1): if isinstance(charString.program[i], str): hints.status = 2 break else: hints.status = 1 # There's *something* here hints.last_checked = len(charString.program) if old_hints: assert hints.__dict__ == old_hints.__dict__ def op_callsubr(self, index): subr = self.localSubrs[self.operandStack[-1] + self.localBias] psCharStrings.T2WidthExtractor.op_callsubr(self, index) self.processSubr(index, subr) def op_callgsubr(self, index): subr = self.globalSubrs[self.operandStack[-1] + self.globalBias] psCharStrings.T2WidthExtractor.op_callgsubr(self, index) self.processSubr(index, subr) def op_hstem(self, index): psCharStrings.T2WidthExtractor.op_hstem(self, index) self.processHint(index) def op_vstem(self, index): psCharStrings.T2WidthExtractor.op_vstem(self, index) self.processHint(index) def op_hstemhm(self, index): psCharStrings.T2WidthExtractor.op_hstemhm(self, index) self.processHint(index) def op_vstemhm(self, index): psCharStrings.T2WidthExtractor.op_vstemhm(self, index) self.processHint(index) def op_hintmask(self, index): rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index) self.processHintmask(index) return rv def op_cntrmask(self, index): rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index) self.processHintmask(index) return rv def processHintmask(self, index): cs = self.callingStack[-1] hints = cs._hints hints.has_hintmask = True if hints.status != 2: # Check from last_check, see if we may be an implicit vstem for i in range(hints.last_checked, index - 1): if isinstance(cs.program[i], str): hints.status = 2 break else: # We are an implicit vstem hints.has_hint = True hints.last_hint = index + 1 hints.status = 0 hints.last_checked = index + 1 def processHint(self, index): cs = self.callingStack[-1] hints = cs._hints hints.has_hint = True hints.last_hint = index hints.last_checked = index def processSubr(self, index, subr): cs = self.callingStack[-1] hints = cs._hints subr_hints = subr._hints # Check from last_check, make sure we didn't have # any operators. if hints.status != 2: for i in range(hints.last_checked, index - 1): if isinstance(cs.program[i], str): hints.status = 2 break hints.last_checked = index if hints.status != 2: if subr_hints.has_hint: hints.has_hint = True # Decide where to chop off from if subr_hints.status == 0: hints.last_hint = index else: hints.last_hint = index - 2 # Leave the subr call in elif subr_hints.status == 0: hints.deletions.append(index) hints.status = max(hints.status, subr_hints.status) @_add_method(ttLib.getTableClass("CFF ")) def prune_post_subset(self, ttfFont, options): cff = self.cff for fontname in cff.keys(): font = cff[fontname] cs = font.CharStrings # Drop unused FontDictionaries if hasattr(font, "FDSelect"): sel = font.FDSelect indices = _uniq_sort(sel.gidArray) sel.gidArray = [indices.index(ss) for ss in sel.gidArray] arr = font.FDArray arr.items = [arr[i] for i in indices] del arr.file, arr.offsets # Desubroutinize if asked for if options.desubroutinize: cff.desubroutinize() # Drop hints if not needed if not options.hinting: self.remove_hints() elif not options.desubroutinize: self.remove_unused_subroutines() return True def _delete_empty_subrs(private_dict): if hasattr(private_dict, "Subrs") and not private_dict.Subrs: if "Subrs" in private_dict.rawDict: del private_dict.rawDict["Subrs"] del private_dict.Subrs @deprecateFunction( "use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning ) @_add_method(ttLib.getTableClass("CFF ")) def desubroutinize(self): self.cff.desubroutinize() @_add_method(ttLib.getTableClass("CFF ")) def remove_hints(self): cff = self.cff for fontname in cff.keys(): font = cff[fontname] cs = font.CharStrings # This can be tricky, but doesn't have to. What we do is: # # - Run all used glyph charstrings and recurse into subroutines, # - For each charstring (including subroutines), if it has any # of the hint stem operators, we mark it as such. # Upon returning, for each charstring we note all the # subroutine calls it makes that (recursively) contain a stem, # - Dropping hinting then consists of the following two ops: # * Drop the piece of the program in each charstring before the # last call to a stem op or a stem-calling subroutine, # * Drop all hintmask operations. # - It's trickier... A hintmask right after hints and a few numbers # will act as an implicit vstemhm. As such, we track whether # we have seen any non-hint operators so far and do the right # thing, recursively... Good luck understanding that :( css = set() for g in font.charset: c, _ = cs.getItemAndSelector(g) c.decompile() subrs = getattr(c.private, "Subrs", []) decompiler = _DehintingT2Decompiler( css, subrs, c.globalSubrs, c.private.nominalWidthX, c.private.defaultWidthX, c.private, ) decompiler.execute(c) c.width = decompiler.width for charstring in css: charstring.drop_hints() del css # Drop font-wide hinting values all_privs = [] if hasattr(font, "FDArray"): all_privs.extend(fd.Private for fd in font.FDArray) else: all_privs.append(font.Private) for priv in all_privs: for k in [ "BlueValues", "OtherBlues", "FamilyBlues", "FamilyOtherBlues", "BlueScale", "BlueShift", "BlueFuzz", "StemSnapH", "StemSnapV", "StdHW", "StdVW", "ForceBold", "LanguageGroup", "ExpansionFactor", ]: if hasattr(priv, k): setattr(priv, k, None) self.remove_unused_subroutines() @_add_method(ttLib.getTableClass("CFF ")) def remove_unused_subroutines(self): cff = self.cff for fontname in cff.keys(): font = cff[fontname] cs = font.CharStrings # Renumber subroutines to remove unused ones # Mark all used subroutines for g in font.charset: c, _ = cs.getItemAndSelector(g) subrs = getattr(c.private, "Subrs", []) decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private) decompiler.execute(c) all_subrs = [font.GlobalSubrs] if hasattr(font, "FDArray"): all_subrs.extend( fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, "Subrs") and fd.Private.Subrs ) elif hasattr(font.Private, "Subrs") and font.Private.Subrs: all_subrs.append(font.Private.Subrs) subrs = set(subrs) # Remove duplicates # Prepare for subrs in all_subrs: if not hasattr(subrs, "_used"): subrs._used = set() subrs._used = _uniq_sort(subrs._used) subrs._old_bias = psCharStrings.calcSubrBias(subrs) subrs._new_bias = psCharStrings.calcSubrBias(subrs._used) # Renumber glyph charstrings for g in font.charset: c, _ = cs.getItemAndSelector(g) subrs = getattr(c.private, "Subrs", None) c.subset_subroutines(subrs, font.GlobalSubrs) # Renumber subroutines themselves for subrs in all_subrs: if subrs == font.GlobalSubrs: if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"): local_subrs = font.Private.Subrs else: local_subrs = None else: local_subrs = subrs subrs.items = [subrs.items[i] for i in subrs._used] if hasattr(subrs, "file"): del subrs.file if hasattr(subrs, "offsets"): del subrs.offsets for subr in subrs.items: subr.subset_subroutines(local_subrs, font.GlobalSubrs) # Delete local SubrsIndex if empty if hasattr(font, "FDArray"): for fd in font.FDArray: _delete_empty_subrs(fd.Private) else: _delete_empty_subrs(font.Private) # Cleanup for subrs in all_subrs: del subrs._used, subrs._old_bias, subrs._new_bias PKaZZZ��$�$�$fontTools/subset/svg.pyfrom __future__ import annotations import re from functools import lru_cache from itertools import chain, count from typing import Dict, Iterable, Iterator, List, Optional, Set, Tuple try: from lxml import etree except ImportError: # lxml is required for subsetting SVG, but we prefer to delay the import error # until subset_glyphs() is called (i.e. if font to subset has an 'SVG ' table) etree = None from fontTools import ttLib from fontTools.subset.util import _add_method from fontTools.ttLib.tables.S_V_G_ import SVGDocument __all__ = ["subset_glyphs"] GID_RE = re.compile(r"^glyph(\d+)$") NAMESPACES = { "svg": "http://www.w3.org/2000/svg", "xlink": "http://www.w3.org/1999/xlink", } XLINK_HREF = f'{{{NAMESPACES["xlink"]}}}href' # TODO(antrotype): Replace with functools.cache once we are 3.9+ @lru_cache(maxsize=None) def xpath(path): # compile XPath upfront, caching result to reuse on multiple elements return etree.XPath(path, namespaces=NAMESPACES) def group_elements_by_id(tree: etree.Element) -> Dict[str, etree.Element]: # select all svg elements with 'id' attribute no matter where they are # including the root element itself: # https://github.com/fonttools/fonttools/issues/2548 return {el.attrib["id"]: el for el in xpath("//svg:*[@id]")(tree)} def parse_css_declarations(style_attr: str) -> Dict[str, str]: # https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/style # https://developer.mozilla.org/en-US/docs/Web/CSS/Syntax#css_declarations result = {} for declaration in style_attr.split(";"): if declaration.count(":") == 1: property_name, value = declaration.split(":") property_name = property_name.strip() result[property_name] = value.strip() elif declaration.strip(): raise ValueError(f"Invalid CSS declaration syntax: {declaration}") return result def iter_referenced_ids(tree: etree.Element) -> Iterator[str]: # Yield all the ids that can be reached via references from this element tree. # We currently support xlink:href (as used by <use> and gradient templates), # and local url(#...) links found in fill or clip-path attributes # TODO(anthrotype): Check we aren't missing other supported kinds of reference find_svg_elements_with_references = xpath( ".//svg:*[ " "starts-with(@xlink:href, '#') " "or starts-with(@fill, 'url(#') " "or starts-with(@clip-path, 'url(#') " "or contains(@style, ':url(#') " "]", ) for el in chain([tree], find_svg_elements_with_references(tree)): ref_id = href_local_target(el) if ref_id is not None: yield ref_id attrs = el.attrib if "style" in attrs: attrs = {**dict(attrs), **parse_css_declarations(el.attrib["style"])} for attr in ("fill", "clip-path"): if attr in attrs: value = attrs[attr] if value.startswith("url(#") and value.endswith(")"): ref_id = value[5:-1] assert ref_id yield ref_id def closure_element_ids( elements: Dict[str, etree.Element], element_ids: Set[str] ) -> None: # Expand the initial subset of element ids to include ids that can be reached # via references from the initial set. unvisited = element_ids while unvisited: referenced: Set[str] = set() for el_id in unvisited: if el_id not in elements: # ignore dangling reference; not our job to validate svg continue referenced.update(iter_referenced_ids(elements[el_id])) referenced -= element_ids element_ids.update(referenced) unvisited = referenced def subset_elements(el: etree.Element, retained_ids: Set[str]) -> bool: # Keep elements if their id is in the subset, or any of their children's id is. # Drop elements whose id is not in the subset, and either have no children, # or all their children are being dropped. if el.attrib.get("id") in retained_ids: # if id is in the set, don't recurse; keep whole subtree return True # recursively subset all the children; we use a list comprehension instead # of a parentheses-less generator expression because we don't want any() to # short-circuit, as our function has a side effect of dropping empty elements. if any([subset_elements(e, retained_ids) for e in el]): return True assert len(el) == 0 parent = el.getparent() if parent is not None: parent.remove(el) return False def remap_glyph_ids( svg: etree.Element, glyph_index_map: Dict[int, int] ) -> Dict[str, str]: # Given {old_gid: new_gid} map, rename all elements containing id="glyph{gid}" # special attributes elements = group_elements_by_id(svg) id_map = {} for el_id, el in elements.items(): m = GID_RE.match(el_id) if not m: continue old_index = int(m.group(1)) new_index = glyph_index_map.get(old_index) if new_index is not None: if old_index == new_index: continue new_id = f"glyph{new_index}" else: # If the old index is missing, the element correspond to a glyph that was # excluded from the font's subset. # We rename it to avoid clashes with the new GIDs or other element ids. new_id = f".{el_id}" n = count(1) while new_id in elements: new_id = f"{new_id}.{next(n)}" id_map[el_id] = new_id el.attrib["id"] = new_id return id_map def href_local_target(el: etree.Element) -> Optional[str]: if XLINK_HREF in el.attrib: href = el.attrib[XLINK_HREF] if href.startswith("#") and len(href) > 1: return href[1:] # drop the leading # return None def update_glyph_href_links(svg: etree.Element, id_map: Dict[str, str]) -> None: # update all xlink:href="#glyph..." attributes to point to the new glyph ids for el in xpath(".//svg:*[starts-with(@xlink:href, '#glyph')]")(svg): old_id = href_local_target(el) assert old_id is not None if old_id in id_map: new_id = id_map[old_id] el.attrib[XLINK_HREF] = f"#{new_id}" def ranges(ints: Iterable[int]) -> Iterator[Tuple[int, int]]: # Yield sorted, non-overlapping (min, max) ranges of consecutive integers sorted_ints = iter(sorted(set(ints))) try: start = end = next(sorted_ints) except StopIteration: return for v in sorted_ints: if v - 1 == end: end = v else: yield (start, end) start = end = v yield (start, end) @_add_method(ttLib.getTableClass("SVG ")) def subset_glyphs(self, s) -> bool: if etree is None: raise ImportError("No module named 'lxml', required to subset SVG") # glyph names (before subsetting) glyph_order: List[str] = s.orig_glyph_order # map from glyph names to original glyph indices rev_orig_glyph_map: Dict[str, int] = s.reverseOrigGlyphMap # map from original to new glyph indices (after subsetting) glyph_index_map: Dict[int, int] = s.glyph_index_map new_docs: List[SVGDocument] = [] for doc in self.docList: glyphs = { glyph_order[i] for i in range(doc.startGlyphID, doc.endGlyphID + 1) }.intersection(s.glyphs) if not glyphs: # no intersection: we can drop the whole record continue svg = etree.fromstring( # encode because fromstring dislikes xml encoding decl if input is str. # SVG xml encoding must be utf-8 as per OT spec. doc.data.encode("utf-8"), parser=etree.XMLParser( # Disable libxml2 security restrictions to support very deep trees. # Without this we would get an error like this: # `lxml.etree.XMLSyntaxError: internal error: Huge input lookup` # when parsing big fonts e.g. noto-emoji-picosvg.ttf. huge_tree=True, # ignore blank text as it's not meaningful in OT-SVG; it also prevents # dangling tail text after removing an element when pretty_print=True remove_blank_text=True, # don't replace entities; we don't expect any in OT-SVG and they may # be abused for XXE attacks resolve_entities=False, ), ) elements = group_elements_by_id(svg) gids = {rev_orig_glyph_map[g] for g in glyphs} element_ids = {f"glyph{i}" for i in gids} closure_element_ids(elements, element_ids) if not subset_elements(svg, element_ids): continue if not s.options.retain_gids: id_map = remap_glyph_ids(svg, glyph_index_map) update_glyph_href_links(svg, id_map) new_doc = etree.tostring(svg, pretty_print=s.options.pretty_svg).decode("utf-8") new_gids = (glyph_index_map[i] for i in gids) for start, end in ranges(new_gids): new_docs.append(SVGDocument(new_doc, start, end, doc.compressed)) self.docList = new_docs return bool(self.docList) PKaZZZ�^$���fontTools/subset/util.py"""Private utility methods used by the subset modules""" def _add_method(*clazzes): """Returns a decorator function that adds a new method to one or more classes.""" def wrapper(method): done = [] for clazz in clazzes: if clazz in done: continue # Support multiple names of a clazz done.append(clazz) assert clazz.__name__ != "DefaultTable", "Oops, table class not found." assert not hasattr( clazz, method.__name__ ), "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__) setattr(clazz, method.__name__, method) return None return wrapper def _uniq_sort(l): return sorted(set(l)) PKaZZZ 2MKKfontTools/svgLib/__init__.pyfrom .path import SVGPath, parse_path __all__ = ["SVGPath", "parse_path"] PKaZZZ�HN��!fontTools/svgLib/path/__init__.pyfrom fontTools.pens.transformPen import TransformPen from fontTools.misc import etree from fontTools.misc.textTools import tostr from .parser import parse_path from .shapes import PathBuilder __all__ = [tostr(s) for s in ("SVGPath", "parse_path")] class SVGPath(object): """Parse SVG ``path`` elements from a file or string, and draw them onto a glyph object that supports the FontTools Pen protocol. For example, reading from an SVG file and drawing to a Defcon Glyph: import defcon glyph = defcon.Glyph() pen = glyph.getPen() svg = SVGPath("path/to/a.svg") svg.draw(pen) Or reading from a string containing SVG data, using the alternative 'fromstring' (a class method): data = '<?xml version="1.0" ...' svg = SVGPath.fromstring(data) svg.draw(pen) Both constructors can optionally take a 'transform' matrix (6-float tuple, or a FontTools Transform object) to modify the draw output. """ def __init__(self, filename=None, transform=None): if filename is None: self.root = etree.ElementTree() else: tree = etree.parse(filename) self.root = tree.getroot() self.transform = transform @classmethod def fromstring(cls, data, transform=None): self = cls(transform=transform) self.root = etree.fromstring(data) return self def draw(self, pen): if self.transform: pen = TransformPen(pen, self.transform) pb = PathBuilder() # xpath | doesn't seem to reliable work so just walk it for el in self.root.iter(): pb.add_path_from_element(el) original_pen = pen for path, transform in zip(pb.paths, pb.transforms): if transform: pen = TransformPen(original_pen, transform) else: pen = original_pen parse_path(path, pen) PKaZZZ�p���fontTools/svgLib/path/arc.py"""Convert SVG Path's elliptical arcs to Bezier curves. The code is mostly adapted from Blink's SVGPathNormalizer::DecomposeArcToCubic https://github.com/chromium/chromium/blob/93831f2/third_party/ blink/renderer/core/svg/svg_path_parser.cc#L169-L278 """ from fontTools.misc.transform import Identity, Scale from math import atan2, ceil, cos, fabs, isfinite, pi, radians, sin, sqrt, tan TWO_PI = 2 * pi PI_OVER_TWO = 0.5 * pi def _map_point(matrix, pt): # apply Transform matrix to a point represented as a complex number r = matrix.transformPoint((pt.real, pt.imag)) return r[0] + r[1] * 1j class EllipticalArc(object): def __init__(self, current_point, rx, ry, rotation, large, sweep, target_point): self.current_point = current_point self.rx = rx self.ry = ry self.rotation = rotation self.large = large self.sweep = sweep self.target_point = target_point # SVG arc's rotation angle is expressed in degrees, whereas Transform.rotate # uses radians self.angle = radians(rotation) # these derived attributes are computed by the _parametrize method self.center_point = self.theta1 = self.theta2 = self.theta_arc = None def _parametrize(self): # convert from endopoint to center parametrization: # https://www.w3.org/TR/SVG/implnote.html#ArcConversionEndpointToCenter # If rx = 0 or ry = 0 then this arc is treated as a straight line segment (a # "lineto") joining the endpoints. # http://www.w3.org/TR/SVG/implnote.html#ArcOutOfRangeParameters rx = fabs(self.rx) ry = fabs(self.ry) if not (rx and ry): return False # If the current point and target point for the arc are identical, it should # be treated as a zero length path. This ensures continuity in animations. if self.target_point == self.current_point: return False mid_point_distance = (self.current_point - self.target_point) * 0.5 point_transform = Identity.rotate(-self.angle) transformed_mid_point = _map_point(point_transform, mid_point_distance) square_rx = rx * rx square_ry = ry * ry square_x = transformed_mid_point.real * transformed_mid_point.real square_y = transformed_mid_point.imag * transformed_mid_point.imag # Check if the radii are big enough to draw the arc, scale radii if not. # http://www.w3.org/TR/SVG/implnote.html#ArcCorrectionOutOfRangeRadii radii_scale = square_x / square_rx + square_y / square_ry if radii_scale > 1: rx *= sqrt(radii_scale) ry *= sqrt(radii_scale) self.rx, self.ry = rx, ry point_transform = Scale(1 / rx, 1 / ry).rotate(-self.angle) point1 = _map_point(point_transform, self.current_point) point2 = _map_point(point_transform, self.target_point) delta = point2 - point1 d = delta.real * delta.real + delta.imag * delta.imag scale_factor_squared = max(1 / d - 0.25, 0.0) scale_factor = sqrt(scale_factor_squared) if self.sweep == self.large: scale_factor = -scale_factor delta *= scale_factor center_point = (point1 + point2) * 0.5 center_point += complex(-delta.imag, delta.real) point1 -= center_point point2 -= center_point theta1 = atan2(point1.imag, point1.real) theta2 = atan2(point2.imag, point2.real) theta_arc = theta2 - theta1 if theta_arc < 0 and self.sweep: theta_arc += TWO_PI elif theta_arc > 0 and not self.sweep: theta_arc -= TWO_PI self.theta1 = theta1 self.theta2 = theta1 + theta_arc self.theta_arc = theta_arc self.center_point = center_point return True def _decompose_to_cubic_curves(self): if self.center_point is None and not self._parametrize(): return point_transform = Identity.rotate(self.angle).scale(self.rx, self.ry) # Some results of atan2 on some platform implementations are not exact # enough. So that we get more cubic curves than expected here. Adding 0.001f # reduces the count of sgements to the correct count. num_segments = int(ceil(fabs(self.theta_arc / (PI_OVER_TWO + 0.001)))) for i in range(num_segments): start_theta = self.theta1 + i * self.theta_arc / num_segments end_theta = self.theta1 + (i + 1) * self.theta_arc / num_segments t = (4 / 3) * tan(0.25 * (end_theta - start_theta)) if not isfinite(t): return sin_start_theta = sin(start_theta) cos_start_theta = cos(start_theta) sin_end_theta = sin(end_theta) cos_end_theta = cos(end_theta) point1 = complex( cos_start_theta - t * sin_start_theta, sin_start_theta + t * cos_start_theta, ) point1 += self.center_point target_point = complex(cos_end_theta, sin_end_theta) target_point += self.center_point point2 = target_point point2 += complex(t * sin_end_theta, -t * cos_end_theta) point1 = _map_point(point_transform, point1) point2 = _map_point(point_transform, point2) target_point = _map_point(point_transform, target_point) yield point1, point2, target_point def draw(self, pen): for point1, point2, target_point in self._decompose_to_cubic_curves(): pen.curveTo( (point1.real, point1.imag), (point2.real, point2.imag), (target_point.real, target_point.imag), ) PKaZZZ�MW�**fontTools/svgLib/path/parser.py# SVG Path specification parser. # This is an adaptation from 'svg.path' by Lennart Regebro (@regebro), # modified so that the parser takes a FontTools Pen object instead of # returning a list of svg.path Path objects. # The original code can be found at: # https://github.com/regebro/svg.path/blob/4f9b6e3/src/svg/path/parser.py # Copyright (c) 2013-2014 Lennart Regebro # License: MIT from .arc import EllipticalArc import re COMMANDS = set("MmZzLlHhVvCcSsQqTtAa") ARC_COMMANDS = set("Aa") UPPERCASE = set("MZLHVCSQTA") COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])") # https://www.w3.org/TR/css-syntax-3/#number-token-diagram # but -6.e-5 will be tokenized as "-6" then "-5" and confuse parsing FLOAT_RE = re.compile( r"[-+]?" # optional sign r"(?:" r"(?:0|[1-9][0-9]*)(?:\.[0-9]+)?(?:[eE][-+]?[0-9]+)?" # int/float r"|" r"(?:\.[0-9]+(?:[eE][-+]?[0-9]+)?)" # float with leading dot (e.g. '.42') r")" ) BOOL_RE = re.compile("^[01]") SEPARATOR_RE = re.compile(f"[, \t]") def _tokenize_path(pathdef): arc_cmd = None for x in COMMAND_RE.split(pathdef): if x in COMMANDS: arc_cmd = x if x in ARC_COMMANDS else None yield x continue if arc_cmd: try: yield from _tokenize_arc_arguments(x) except ValueError as e: raise ValueError(f"Invalid arc command: '{arc_cmd}{x}'") from e else: for token in FLOAT_RE.findall(x): yield token ARC_ARGUMENT_TYPES = ( ("rx", FLOAT_RE), ("ry", FLOAT_RE), ("x-axis-rotation", FLOAT_RE), ("large-arc-flag", BOOL_RE), ("sweep-flag", BOOL_RE), ("x", FLOAT_RE), ("y", FLOAT_RE), ) def _tokenize_arc_arguments(arcdef): raw_args = [s for s in SEPARATOR_RE.split(arcdef) if s] if not raw_args: raise ValueError(f"Not enough arguments: '{arcdef}'") raw_args.reverse() i = 0 while raw_args: arg = raw_args.pop() name, pattern = ARC_ARGUMENT_TYPES[i] match = pattern.search(arg) if not match: raise ValueError(f"Invalid argument for '{name}' parameter: {arg!r}") j, k = match.span() yield arg[j:k] arg = arg[k:] if arg: raw_args.append(arg) # wrap around every 7 consecutive arguments if i == 6: i = 0 else: i += 1 if i != 0: raise ValueError(f"Not enough arguments: '{arcdef}'") def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc): """Parse SVG path definition (i.e. "d" attribute of <path> elements) and call a 'pen' object's moveTo, lineTo, curveTo, qCurveTo and closePath methods. If 'current_pos' (2-float tuple) is provided, the initial moveTo will be relative to that instead being absolute. If the pen has an "arcTo" method, it is called with the original values of the elliptical arc curve commands: pen.arcTo(rx, ry, rotation, arc_large, arc_sweep, (x, y)) Otherwise, the arcs are approximated by series of cubic Bezier segments ("curveTo"), one every 90 degrees. """ # In the SVG specs, initial movetos are absolute, even if # specified as 'm'. This is the default behavior here as well. # But if you pass in a current_pos variable, the initial moveto # will be relative to that current_pos. This is useful. current_pos = complex(*current_pos) elements = list(_tokenize_path(pathdef)) # Reverse for easy use of .pop() elements.reverse() start_pos = None command = None last_control = None have_arcTo = hasattr(pen, "arcTo") while elements: if elements[-1] in COMMANDS: # New command. last_command = command # Used by S and T command = elements.pop() absolute = command in UPPERCASE command = command.upper() else: # If this element starts with numbers, it is an implicit command # and we don't change the command. Check that it's allowed: if command is None: raise ValueError( "Unallowed implicit command in %s, position %s" % (pathdef, len(pathdef.split()) - len(elements)) ) last_command = command # Used by S and T if command == "M": # Moveto command. x = elements.pop() y = elements.pop() pos = float(x) + float(y) * 1j if absolute: current_pos = pos else: current_pos += pos # M is not preceded by Z; it's an open subpath if start_pos is not None: pen.endPath() pen.moveTo((current_pos.real, current_pos.imag)) # when M is called, reset start_pos # This behavior of Z is defined in svg spec: # http://www.w3.org/TR/SVG/paths.html#PathDataClosePathCommand start_pos = current_pos # Implicit moveto commands are treated as lineto commands. # So we set command to lineto here, in case there are # further implicit commands after this moveto. command = "L" elif command == "Z": # Close path if current_pos != start_pos: pen.lineTo((start_pos.real, start_pos.imag)) pen.closePath() current_pos = start_pos start_pos = None command = None # You can't have implicit commands after closing. elif command == "L": x = elements.pop() y = elements.pop() pos = float(x) + float(y) * 1j if not absolute: pos += current_pos pen.lineTo((pos.real, pos.imag)) current_pos = pos elif command == "H": x = elements.pop() pos = float(x) + current_pos.imag * 1j if not absolute: pos += current_pos.real pen.lineTo((pos.real, pos.imag)) current_pos = pos elif command == "V": y = elements.pop() pos = current_pos.real + float(y) * 1j if not absolute: pos += current_pos.imag * 1j pen.lineTo((pos.real, pos.imag)) current_pos = pos elif command == "C": control1 = float(elements.pop()) + float(elements.pop()) * 1j control2 = float(elements.pop()) + float(elements.pop()) * 1j end = float(elements.pop()) + float(elements.pop()) * 1j if not absolute: control1 += current_pos control2 += current_pos end += current_pos pen.curveTo( (control1.real, control1.imag), (control2.real, control2.imag), (end.real, end.imag), ) current_pos = end last_control = control2 elif command == "S": # Smooth curve. First control point is the "reflection" of # the second control point in the previous path. if last_command not in "CS": # If there is no previous command or if the previous command # was not an C, c, S or s, assume the first control point is # coincident with the current point. control1 = current_pos else: # The first control point is assumed to be the reflection of # the second control point on the previous command relative # to the current point. control1 = current_pos + current_pos - last_control control2 = float(elements.pop()) + float(elements.pop()) * 1j end = float(elements.pop()) + float(elements.pop()) * 1j if not absolute: control2 += current_pos end += current_pos pen.curveTo( (control1.real, control1.imag), (control2.real, control2.imag), (end.real, end.imag), ) current_pos = end last_control = control2 elif command == "Q": control = float(elements.pop()) + float(elements.pop()) * 1j end = float(elements.pop()) + float(elements.pop()) * 1j if not absolute: control += current_pos end += current_pos pen.qCurveTo((control.real, control.imag), (end.real, end.imag)) current_pos = end last_control = control elif command == "T": # Smooth curve. Control point is the "reflection" of # the second control point in the previous path. if last_command not in "QT": # If there is no previous command or if the previous command # was not an Q, q, T or t, assume the first control point is # coincident with the current point. control = current_pos else: # The control point is assumed to be the reflection of # the control point on the previous command relative # to the current point. control = current_pos + current_pos - last_control end = float(elements.pop()) + float(elements.pop()) * 1j if not absolute: end += current_pos pen.qCurveTo((control.real, control.imag), (end.real, end.imag)) current_pos = end last_control = control elif command == "A": rx = abs(float(elements.pop())) ry = abs(float(elements.pop())) rotation = float(elements.pop()) arc_large = bool(int(elements.pop())) arc_sweep = bool(int(elements.pop())) end = float(elements.pop()) + float(elements.pop()) * 1j if not absolute: end += current_pos # if the pen supports arcs, pass the values unchanged, otherwise # approximate the arc with a series of cubic bezier curves if have_arcTo: pen.arcTo( rx, ry, rotation, arc_large, arc_sweep, (end.real, end.imag), ) else: arc = arc_class( current_pos, rx, ry, rotation, arc_large, arc_sweep, end ) arc.draw(pen) current_pos = end # no final Z command, it's an open path if start_pos is not None: pen.endPath() PKaZZZ��:��fontTools/svgLib/path/shapes.pyimport re def _prefer_non_zero(*args): for arg in args: if arg != 0: return arg return 0.0 def _ntos(n): # %f likes to add unnecessary 0's, %g isn't consistent about # decimals return ("%.3f" % n).rstrip("0").rstrip(".") def _strip_xml_ns(tag): # ElementTree API doesn't provide a way to ignore XML namespaces in tags # so we here strip them ourselves: cf. https://bugs.python.org/issue18304 return tag.split("}", 1)[1] if "}" in tag else tag def _transform(raw_value): # TODO assumes a 'matrix' transform. # No other transform functions are supported at the moment. # https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/transform # start simple: if you aren't exactly matrix(...) then no love match = re.match(r"matrix\((.*)\)", raw_value) if not match: raise NotImplementedError matrix = tuple(float(p) for p in re.split(r"\s+|,", match.group(1))) if len(matrix) != 6: raise ValueError("wrong # of terms in %s" % raw_value) return matrix class PathBuilder(object): def __init__(self): self.paths = [] self.transforms = [] def _start_path(self, initial_path=""): self.paths.append(initial_path) self.transforms.append(None) def _end_path(self): self._add("z") def _add(self, path_snippet): path = self.paths[-1] if path: path += " " + path_snippet else: path = path_snippet self.paths[-1] = path def _move(self, c, x, y): self._add("%s%s,%s" % (c, _ntos(x), _ntos(y))) def M(self, x, y): self._move("M", x, y) def m(self, x, y): self._move("m", x, y) def _arc(self, c, rx, ry, x, y, large_arc): self._add( "%s%s,%s 0 %d 1 %s,%s" % (c, _ntos(rx), _ntos(ry), large_arc, _ntos(x), _ntos(y)) ) def A(self, rx, ry, x, y, large_arc=0): self._arc("A", rx, ry, x, y, large_arc) def a(self, rx, ry, x, y, large_arc=0): self._arc("a", rx, ry, x, y, large_arc) def _vhline(self, c, x): self._add("%s%s" % (c, _ntos(x))) def H(self, x): self._vhline("H", x) def h(self, x): self._vhline("h", x) def V(self, y): self._vhline("V", y) def v(self, y): self._vhline("v", y) def _line(self, c, x, y): self._add("%s%s,%s" % (c, _ntos(x), _ntos(y))) def L(self, x, y): self._line("L", x, y) def l(self, x, y): self._line("l", x, y) def _parse_line(self, line): x1 = float(line.attrib.get("x1", 0)) y1 = float(line.attrib.get("y1", 0)) x2 = float(line.attrib.get("x2", 0)) y2 = float(line.attrib.get("y2", 0)) self._start_path() self.M(x1, y1) self.L(x2, y2) def _parse_rect(self, rect): x = float(rect.attrib.get("x", 0)) y = float(rect.attrib.get("y", 0)) w = float(rect.attrib.get("width")) h = float(rect.attrib.get("height")) rx = float(rect.attrib.get("rx", 0)) ry = float(rect.attrib.get("ry", 0)) rx = _prefer_non_zero(rx, ry) ry = _prefer_non_zero(ry, rx) # TODO there are more rules for adjusting rx, ry self._start_path() self.M(x + rx, y) self.H(x + w - rx) if rx > 0: self.A(rx, ry, x + w, y + ry) self.V(y + h - ry) if rx > 0: self.A(rx, ry, x + w - rx, y + h) self.H(x + rx) if rx > 0: self.A(rx, ry, x, y + h - ry) self.V(y + ry) if rx > 0: self.A(rx, ry, x + rx, y) self._end_path() def _parse_path(self, path): if "d" in path.attrib: self._start_path(initial_path=path.attrib["d"]) def _parse_polygon(self, poly): if "points" in poly.attrib: self._start_path("M" + poly.attrib["points"]) self._end_path() def _parse_polyline(self, poly): if "points" in poly.attrib: self._start_path("M" + poly.attrib["points"]) def _parse_circle(self, circle): cx = float(circle.attrib.get("cx", 0)) cy = float(circle.attrib.get("cy", 0)) r = float(circle.attrib.get("r")) # arc doesn't seem to like being a complete shape, draw two halves self._start_path() self.M(cx - r, cy) self.A(r, r, cx + r, cy, large_arc=1) self.A(r, r, cx - r, cy, large_arc=1) def _parse_ellipse(self, ellipse): cx = float(ellipse.attrib.get("cx", 0)) cy = float(ellipse.attrib.get("cy", 0)) rx = float(ellipse.attrib.get("rx")) ry = float(ellipse.attrib.get("ry")) # arc doesn't seem to like being a complete shape, draw two halves self._start_path() self.M(cx - rx, cy) self.A(rx, ry, cx + rx, cy, large_arc=1) self.A(rx, ry, cx - rx, cy, large_arc=1) def add_path_from_element(self, el): tag = _strip_xml_ns(el.tag) parse_fn = getattr(self, "_parse_%s" % tag.lower(), None) if not callable(parse_fn): return False parse_fn(el) if "transform" in el.attrib: self.transforms[-1] = _transform(el.attrib["transform"]) return True PKaZZZ/��Q�QfontTools/t1Lib/__init__.py"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts. Functions for reading and writing raw Type 1 data: read(path) reads any Type 1 font file, returns the raw data and a type indicator: 'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed to by 'path'. Raises an error when the file does not contain valid Type 1 data. write(path, data, kind='OTHER', dohex=False) writes raw Type 1 data to the file pointed to by 'path'. 'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'. 'dohex' is a flag which determines whether the eexec encrypted part should be written as hexadecimal or binary, but only if kind is 'OTHER'. """ import fontTools from fontTools.misc import eexec from fontTools.misc.macCreatorType import getMacCreatorAndType from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes from fontTools.misc.psOperators import ( _type1_pre_eexec_order, _type1_fontinfo_order, _type1_post_eexec_order, ) from fontTools.encodings.StandardEncoding import StandardEncoding import os import re __author__ = "jvr" __version__ = "1.0b3" DEBUG = 0 try: try: from Carbon import Res except ImportError: import Res # MacPython < 2.2 except ImportError: haveMacSupport = 0 else: haveMacSupport = 1 class T1Error(Exception): pass class T1Font(object): """Type 1 font class. Uses a minimal interpeter that supports just about enough PS to parse Type 1 fonts. """ def __init__(self, path, encoding="ascii", kind=None): if kind is None: self.data, _ = read(path) elif kind == "LWFN": self.data = readLWFN(path) elif kind == "PFB": self.data = readPFB(path) elif kind == "OTHER": self.data = readOther(path) else: raise ValueError(kind) self.encoding = encoding def saveAs(self, path, type, dohex=False): write(path, self.getData(), type, dohex) def getData(self): if not hasattr(self, "data"): self.data = self.createData() return self.data def getGlyphSet(self): """Return a generic GlyphSet, which is a dict-like object mapping glyph names to glyph objects. The returned glyph objects have a .draw() method that supports the Pen protocol, and will have an attribute named 'width', but only *after* the .draw() method has been called. In the case of Type 1, the GlyphSet is simply the CharStrings dict. """ return self["CharStrings"] def __getitem__(self, key): if not hasattr(self, "font"): self.parse() return self.font[key] def parse(self): from fontTools.misc import psLib from fontTools.misc import psCharStrings self.font = psLib.suckfont(self.data, self.encoding) charStrings = self.font["CharStrings"] lenIV = self.font["Private"].get("lenIV", 4) assert lenIV >= 0 subrs = self.font["Private"]["Subrs"] for glyphName, charString in charStrings.items(): charString, R = eexec.decrypt(charString, 4330) charStrings[glyphName] = psCharStrings.T1CharString( charString[lenIV:], subrs=subrs ) for i in range(len(subrs)): charString, R = eexec.decrypt(subrs[i], 4330) subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs) del self.data def createData(self): sf = self.font eexec_began = False eexec_dict = {} lines = [] lines.extend( [ self._tobytes(f"%!FontType1-1.1: {sf['FontName']}"), self._tobytes(f"%t1Font: ({fontTools.version})"), self._tobytes(f"%%BeginResource: font {sf['FontName']}"), ] ) # follow t1write.c:writeRegNameKeyedFont size = 3 # Headroom for new key addition size += 1 # FontMatrix is always counted size += 1 + 1 # Private, CharStings for key in font_dictionary_keys: size += int(key in sf) lines.append(self._tobytes(f"{size} dict dup begin")) for key, value in sf.items(): if eexec_began: eexec_dict[key] = value continue if key == "FontInfo": fi = sf["FontInfo"] # follow t1write.c:writeFontInfoDict size = 3 # Headroom for new key addition for subkey in FontInfo_dictionary_keys: size += int(subkey in fi) lines.append(self._tobytes(f"/FontInfo {size} dict dup begin")) for subkey, subvalue in fi.items(): lines.extend(self._make_lines(subkey, subvalue)) lines.append(b"end def") elif key in _type1_post_eexec_order: # usually 'Private' eexec_dict[key] = value eexec_began = True else: lines.extend(self._make_lines(key, value)) lines.append(b"end") eexec_portion = self.encode_eexec(eexec_dict) lines.append(bytesjoin([b"currentfile eexec ", eexec_portion])) for _ in range(8): lines.append(self._tobytes("0" * 64)) lines.extend([b"cleartomark", b"%%EndResource", b"%%EOF"]) data = bytesjoin(lines, "\n") return data def encode_eexec(self, eexec_dict): lines = [] # '-|', '|-', '|' RD_key, ND_key, NP_key = None, None, None lenIV = 4 subrs = std_subrs # Ensure we look at Private first, because we need RD_key, ND_key, NP_key and lenIV sortedItems = sorted(eexec_dict.items(), key=lambda item: item[0] != "Private") for key, value in sortedItems: if key == "Private": pr = eexec_dict["Private"] # follow t1write.c:writePrivateDict size = 3 # for RD, ND, NP for subkey in Private_dictionary_keys: size += int(subkey in pr) lines.append(b"dup /Private") lines.append(self._tobytes(f"{size} dict dup begin")) for subkey, subvalue in pr.items(): if not RD_key and subvalue == RD_value: RD_key = subkey elif not ND_key and subvalue in ND_values: ND_key = subkey elif not NP_key and subvalue in PD_values: NP_key = subkey if subkey == "lenIV": lenIV = subvalue if subkey == "OtherSubrs": # XXX: assert that no flex hint is used lines.append(self._tobytes(hintothers)) elif subkey == "Subrs": for subr_bin in subvalue: subr_bin.compile() subrs = [subr_bin.bytecode for subr_bin in subvalue] lines.append(f"/Subrs {len(subrs)} array".encode("ascii")) for i, subr_bin in enumerate(subrs): encrypted_subr, R = eexec.encrypt( bytesjoin([char_IV[:lenIV], subr_bin]), 4330 ) lines.append( bytesjoin( [ self._tobytes( f"dup {i} {len(encrypted_subr)} {RD_key} " ), encrypted_subr, self._tobytes(f" {NP_key}"), ] ) ) lines.append(b"def") lines.append(b"put") else: lines.extend(self._make_lines(subkey, subvalue)) elif key == "CharStrings": lines.append(b"dup /CharStrings") lines.append( self._tobytes(f"{len(eexec_dict['CharStrings'])} dict dup begin") ) for glyph_name, char_bin in eexec_dict["CharStrings"].items(): char_bin.compile() encrypted_char, R = eexec.encrypt( bytesjoin([char_IV[:lenIV], char_bin.bytecode]), 4330 ) lines.append( bytesjoin( [ self._tobytes( f"/{glyph_name} {len(encrypted_char)} {RD_key} " ), encrypted_char, self._tobytes(f" {ND_key}"), ] ) ) lines.append(b"end put") else: lines.extend(self._make_lines(key, value)) lines.extend( [ b"end", b"dup /FontName get exch definefont pop", b"mark", b"currentfile closefile\n", ] ) eexec_portion = bytesjoin(lines, "\n") encrypted_eexec, R = eexec.encrypt(bytesjoin([eexec_IV, eexec_portion]), 55665) return encrypted_eexec def _make_lines(self, key, value): if key == "FontName": return [self._tobytes(f"/{key} /{value} def")] if key in ["isFixedPitch", "ForceBold", "RndStemUp"]: return [self._tobytes(f"/{key} {'true' if value else 'false'} def")] elif key == "Encoding": if value == StandardEncoding: return [self._tobytes(f"/{key} StandardEncoding def")] else: # follow fontTools.misc.psOperators._type1_Encoding_repr lines = [] lines.append(b"/Encoding 256 array") lines.append(b"0 1 255 {1 index exch /.notdef put} for") for i in range(256): name = value[i] if name != ".notdef": lines.append(self._tobytes(f"dup {i} /{name} put")) lines.append(b"def") return lines if isinstance(value, str): return [self._tobytes(f"/{key} ({value}) def")] elif isinstance(value, bool): return [self._tobytes(f"/{key} {'true' if value else 'false'} def")] elif isinstance(value, list): return [self._tobytes(f"/{key} [{' '.join(str(v) for v in value)}] def")] elif isinstance(value, tuple): return [self._tobytes(f"/{key} {{{' '.join(str(v) for v in value)}}} def")] else: return [self._tobytes(f"/{key} {value} def")] def _tobytes(self, s, errors="strict"): return tobytes(s, self.encoding, errors) # low level T1 data read and write functions def read(path, onlyHeader=False): """reads any Type 1 font file, returns raw data""" _, ext = os.path.splitext(path) ext = ext.lower() creator, typ = getMacCreatorAndType(path) if typ == "LWFN": return readLWFN(path, onlyHeader), "LWFN" if ext == ".pfb": return readPFB(path, onlyHeader), "PFB" else: return readOther(path), "OTHER" def write(path, data, kind="OTHER", dohex=False): assertType1(data) kind = kind.upper() try: os.remove(path) except os.error: pass err = 1 try: if kind == "LWFN": writeLWFN(path, data) elif kind == "PFB": writePFB(path, data) else: writeOther(path, data, dohex) err = 0 finally: if err and not DEBUG: try: os.remove(path) except os.error: pass # -- internal -- LWFNCHUNKSIZE = 2000 HEXLINELENGTH = 80 def readLWFN(path, onlyHeader=False): """reads an LWFN font file, returns raw data""" from fontTools.misc.macRes import ResourceReader reader = ResourceReader(path) try: data = [] for res in reader.get("POST", []): code = byteord(res.data[0]) if byteord(res.data[1]) != 0: raise T1Error("corrupt LWFN file") if code in [1, 2]: if onlyHeader and code == 2: break data.append(res.data[2:]) elif code in [3, 5]: break elif code == 4: with open(path, "rb") as f: data.append(f.read()) elif code == 0: pass # comment, ignore else: raise T1Error("bad chunk code: " + repr(code)) finally: reader.close() data = bytesjoin(data) assertType1(data) return data def readPFB(path, onlyHeader=False): """reads a PFB font file, returns raw data""" data = [] with open(path, "rb") as f: while True: if f.read(1) != bytechr(128): raise T1Error("corrupt PFB file") code = byteord(f.read(1)) if code in [1, 2]: chunklen = stringToLong(f.read(4)) chunk = f.read(chunklen) assert len(chunk) == chunklen data.append(chunk) elif code == 3: break else: raise T1Error("bad chunk code: " + repr(code)) if onlyHeader: break data = bytesjoin(data) assertType1(data) return data def readOther(path): """reads any (font) file, returns raw data""" with open(path, "rb") as f: data = f.read() assertType1(data) chunks = findEncryptedChunks(data) data = [] for isEncrypted, chunk in chunks: if isEncrypted and isHex(chunk[:4]): data.append(deHexString(chunk)) else: data.append(chunk) return bytesjoin(data) # file writing tools def writeLWFN(path, data): # Res.FSpCreateResFile was deprecated in OS X 10.5 Res.FSpCreateResFile(path, "just", "LWFN", 0) resRef = Res.FSOpenResFile(path, 2) # write-only try: Res.UseResFile(resRef) resID = 501 chunks = findEncryptedChunks(data) for isEncrypted, chunk in chunks: if isEncrypted: code = 2 else: code = 1 while chunk: res = Res.Resource(bytechr(code) + "\0" + chunk[: LWFNCHUNKSIZE - 2]) res.AddResource("POST", resID, "") chunk = chunk[LWFNCHUNKSIZE - 2 :] resID = resID + 1 res = Res.Resource(bytechr(5) + "\0") res.AddResource("POST", resID, "") finally: Res.CloseResFile(resRef) def writePFB(path, data): chunks = findEncryptedChunks(data) with open(path, "wb") as f: for isEncrypted, chunk in chunks: if isEncrypted: code = 2 else: code = 1 f.write(bytechr(128) + bytechr(code)) f.write(longToString(len(chunk))) f.write(chunk) f.write(bytechr(128) + bytechr(3)) def writeOther(path, data, dohex=False): chunks = findEncryptedChunks(data) with open(path, "wb") as f: hexlinelen = HEXLINELENGTH // 2 for isEncrypted, chunk in chunks: if isEncrypted: code = 2 else: code = 1 if code == 2 and dohex: while chunk: f.write(eexec.hexString(chunk[:hexlinelen])) f.write(b"\r") chunk = chunk[hexlinelen:] else: f.write(chunk) # decryption tools EEXECBEGIN = b"currentfile eexec" # The spec allows for 512 ASCII zeros interrupted by arbitrary whitespace to # follow eexec EEXECEND = re.compile(b"(0[ \t\r\n]*){512}", flags=re.M) EEXECINTERNALEND = b"currentfile closefile" EEXECBEGINMARKER = b"%-- eexec start\r" EEXECENDMARKER = b"%-- eexec end\r" _ishexRE = re.compile(b"[0-9A-Fa-f]*$") def isHex(text): return _ishexRE.match(text) is not None def decryptType1(data): chunks = findEncryptedChunks(data) data = [] for isEncrypted, chunk in chunks: if isEncrypted: if isHex(chunk[:4]): chunk = deHexString(chunk) decrypted, R = eexec.decrypt(chunk, 55665) decrypted = decrypted[4:] if ( decrypted[-len(EEXECINTERNALEND) - 1 : -1] != EEXECINTERNALEND and decrypted[-len(EEXECINTERNALEND) - 2 : -2] != EEXECINTERNALEND ): raise T1Error("invalid end of eexec part") decrypted = decrypted[: -len(EEXECINTERNALEND) - 2] + b"\r" data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER) else: if chunk[-len(EEXECBEGIN) - 1 : -1] == EEXECBEGIN: data.append(chunk[: -len(EEXECBEGIN) - 1]) else: data.append(chunk) return bytesjoin(data) def findEncryptedChunks(data): chunks = [] while True: eBegin = data.find(EEXECBEGIN) if eBegin < 0: break eBegin = eBegin + len(EEXECBEGIN) + 1 endMatch = EEXECEND.search(data, eBegin) if endMatch is None: raise T1Error("can't find end of eexec part") eEnd = endMatch.start() cypherText = data[eBegin : eEnd + 2] if isHex(cypherText[:4]): cypherText = deHexString(cypherText) plainText, R = eexec.decrypt(cypherText, 55665) eEndLocal = plainText.find(EEXECINTERNALEND) if eEndLocal < 0: raise T1Error("can't find end of eexec part") chunks.append((0, data[:eBegin])) chunks.append((1, cypherText[: eEndLocal + len(EEXECINTERNALEND) + 1])) data = data[eEnd:] chunks.append((0, data)) return chunks def deHexString(hexstring): return eexec.deHexString(bytesjoin(hexstring.split())) # Type 1 assertion _fontType1RE = re.compile(rb"/FontType\s+1\s+def") def assertType1(data): for head in [b"%!PS-AdobeFont", b"%!FontType1"]: if data[: len(head)] == head: break else: raise T1Error("not a PostScript font") if not _fontType1RE.search(data): raise T1Error("not a Type 1 font") if data.find(b"currentfile eexec") < 0: raise T1Error("not an encrypted Type 1 font") # XXX what else? return data # pfb helpers def longToString(long): s = b"" for i in range(4): s += bytechr((long & (0xFF << (i * 8))) >> i * 8) return s def stringToLong(s): if len(s) != 4: raise ValueError("string must be 4 bytes long") l = 0 for i in range(4): l += byteord(s[i]) << (i * 8) return l # PS stream helpers font_dictionary_keys = list(_type1_pre_eexec_order) # t1write.c:writeRegNameKeyedFont # always counts following keys font_dictionary_keys.remove("FontMatrix") FontInfo_dictionary_keys = list(_type1_fontinfo_order) # extend because AFDKO tx may use following keys FontInfo_dictionary_keys.extend( [ "FSType", "Copyright", ] ) Private_dictionary_keys = [ # We don't know what names will be actually used. # "RD", # "ND", # "NP", "Subrs", "OtherSubrs", "UniqueID", "BlueValues", "OtherBlues", "FamilyBlues", "FamilyOtherBlues", "BlueScale", "BlueShift", "BlueFuzz", "StdHW", "StdVW", "StemSnapH", "StemSnapV", "ForceBold", "LanguageGroup", "password", "lenIV", "MinFeature", "RndStemUp", ] # t1write_hintothers.h hintothers = """/OtherSubrs[{}{}{}{systemdict/internaldict known not{pop 3}{1183615869 systemdict/internaldict get exec dup/startlock known{/startlock get exec}{dup /strtlck known{/strtlck get exec}{pop 3}ifelse}ifelse}ifelse}executeonly]def""" # t1write.c:saveStdSubrs std_subrs = [ # 3 0 callother pop pop setcurrentpoint return b"\x8e\x8b\x0c\x10\x0c\x11\x0c\x11\x0c\x21\x0b", # 0 1 callother return b"\x8b\x8c\x0c\x10\x0b", # 0 2 callother return b"\x8b\x8d\x0c\x10\x0b", # return b"\x0b", # 3 1 3 callother pop callsubr return b"\x8e\x8c\x8e\x0c\x10\x0c\x11\x0a\x0b", ] # follow t1write.c:writeRegNameKeyedFont eexec_IV = b"cccc" char_IV = b"\x0c\x0c\x0c\x0c" RD_value = ("string", "currentfile", "exch", "readstring", "pop") ND_values = [("def",), ("noaccess", "def")] PD_values = [("put",), ("noaccess", "put")] PKaZZZw ��))fontTools/ttLib/__init__.py"""fontTools.ttLib -- a package for dealing with TrueType fonts.""" from fontTools.misc.loggingTools import deprecateFunction import logging log = logging.getLogger(__name__) class TTLibError(Exception): pass class TTLibFileIsCollectionError(TTLibError): pass @deprecateFunction("use logging instead", category=DeprecationWarning) def debugmsg(msg): import time print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time()))) from fontTools.ttLib.ttFont import * from fontTools.ttLib.ttCollection import TTCollection PKaZZZ�,��N N fontTools/ttLib/__main__.pyimport sys from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError from fontTools.ttLib.ttFont import * from fontTools.ttLib.ttCollection import TTCollection def main(args=None): """Open/save fonts with TTFont() or TTCollection() ./fonttools ttLib [-oFILE] [-yNUMBER] files... If multiple files are given on the command-line, they are each opened (as a font or collection), and added to the font list. If -o (output-file) argument is given, the font list is then saved to the output file, either as a single font, if there is only one font, or as a collection otherwise. If -y (font-number) argument is given, only the specified font from collections is opened. The above allow extracting a single font from a collection, or combining multiple fonts into a collection. If --lazy or --no-lazy are give, those are passed to the TTFont() or TTCollection() constructors. """ from fontTools import configLogger if args is None: args = sys.argv[1:] import argparse parser = argparse.ArgumentParser( "fonttools ttLib", description="Open/save fonts with TTFont() or TTCollection()", epilog=""" If multiple files are given on the command-line, they are each opened (as a font or collection), and added to the font list. The above, when combined with -o / --output, allows for extracting a single font from a collection, or combining multiple fonts into a collection. """, ) parser.add_argument("font", metavar="font", nargs="*", help="Font file.") parser.add_argument( "-t", "--table", metavar="table", nargs="*", help="Tables to decompile." ) parser.add_argument( "-o", "--output", metavar="FILE", default=None, help="Output file." ) parser.add_argument( "-y", metavar="NUMBER", default=-1, help="Font number to load from collections." ) parser.add_argument( "--lazy", action="store_true", default=None, help="Load fonts lazily." ) parser.add_argument( "--no-lazy", dest="lazy", action="store_false", help="Load fonts immediately." ) parser.add_argument( "--flavor", dest="flavor", default=None, help="Flavor of output font. 'woff' or 'woff2'.", ) options = parser.parse_args(args) fontNumber = int(options.y) if options.y is not None else None outFile = options.output lazy = options.lazy flavor = options.flavor tables = options.table if options.table is not None else [] fonts = [] for f in options.font: try: font = TTFont(f, fontNumber=fontNumber, lazy=lazy) fonts.append(font) except TTLibFileIsCollectionError: collection = TTCollection(f, lazy=lazy) fonts.extend(collection.fonts) for font in fonts: for table in tables if "*" not in tables else font.keys(): font[table] # Decompiles if outFile is not None: if len(fonts) == 1: fonts[0].flavor = flavor fonts[0].save(outFile) else: if flavor is not None: raise TTLibError("Cannot set flavor for collections.") collection = TTCollection() collection.fonts = fonts collection.save(outFile) if __name__ == "__main__": sys.exit(main()) PKaZZZ��f���fontTools/ttLib/macUtils.py"""ttLib.macUtils.py -- Various Mac-specific stuff.""" from io import BytesIO from fontTools.misc.macRes import ResourceReader, ResourceError def getSFNTResIndices(path): """Determine whether a file has a 'sfnt' resource fork or not.""" try: reader = ResourceReader(path) indices = reader.getIndices("sfnt") reader.close() return indices except ResourceError: return [] def openTTFonts(path): """Given a pathname, return a list of TTFont objects. In the case of a flat TTF/OTF file, the list will contain just one font object; but in the case of a Mac font suitcase it will contain as many font objects as there are sfnt resources in the file. """ from fontTools import ttLib fonts = [] sfnts = getSFNTResIndices(path) if not sfnts: fonts.append(ttLib.TTFont(path)) else: for index in sfnts: fonts.append(ttLib.TTFont(path, index)) if not fonts: raise ttLib.TTLibError("no fonts found in file '%s'" % path) return fonts class SFNTResourceReader(BytesIO): """Simple read-only file wrapper for 'sfnt' resources.""" def __init__(self, path, res_name_or_index): from fontTools import ttLib reader = ResourceReader(path) if isinstance(res_name_or_index, str): rsrc = reader.getNamedResource("sfnt", res_name_or_index) else: rsrc = reader.getIndResource("sfnt", res_name_or_index) if rsrc is None: raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index) reader.close() self.rsrc = rsrc super(SFNTResourceReader, self).__init__(rsrc.data) self.name = path PKaZZZ�=���!fontTools/ttLib/removeOverlaps.py""" Simplify TrueType glyphs by merging overlapping contours/components. Requires https://github.com/fonttools/skia-pathops """ import itertools import logging from typing import Callable, Iterable, Optional, Mapping from fontTools.misc.roundTools import otRound from fontTools.ttLib import ttFont from fontTools.ttLib.tables import _g_l_y_f from fontTools.ttLib.tables import _h_m_t_x from fontTools.pens.ttGlyphPen import TTGlyphPen import pathops __all__ = ["removeOverlaps"] class RemoveOverlapsError(Exception): pass log = logging.getLogger("fontTools.ttLib.removeOverlaps") _TTGlyphMapping = Mapping[str, ttFont._TTGlyph] def skPathFromGlyph(glyphName: str, glyphSet: _TTGlyphMapping) -> pathops.Path: path = pathops.Path() pathPen = path.getPen(glyphSet=glyphSet) glyphSet[glyphName].draw(pathPen) return path def skPathFromGlyphComponent( component: _g_l_y_f.GlyphComponent, glyphSet: _TTGlyphMapping ): baseGlyphName, transformation = component.getComponentInfo() path = skPathFromGlyph(baseGlyphName, glyphSet) return path.transform(*transformation) def componentsOverlap(glyph: _g_l_y_f.Glyph, glyphSet: _TTGlyphMapping) -> bool: if not glyph.isComposite(): raise ValueError("This method only works with TrueType composite glyphs") if len(glyph.components) < 2: return False # single component, no overlaps component_paths = {} def _get_nth_component_path(index: int) -> pathops.Path: if index not in component_paths: component_paths[index] = skPathFromGlyphComponent( glyph.components[index], glyphSet ) return component_paths[index] return any( pathops.op( _get_nth_component_path(i), _get_nth_component_path(j), pathops.PathOp.INTERSECTION, fix_winding=False, keep_starting_points=False, ) for i, j in itertools.combinations(range(len(glyph.components)), 2) ) def ttfGlyphFromSkPath(path: pathops.Path) -> _g_l_y_f.Glyph: # Skia paths have no 'components', no need for glyphSet ttPen = TTGlyphPen(glyphSet=None) path.draw(ttPen) glyph = ttPen.glyph() assert not glyph.isComposite() # compute glyph.xMin (glyfTable parameter unused for non composites) glyph.recalcBounds(glyfTable=None) return glyph def _round_path( path: pathops.Path, round: Callable[[float], float] = otRound ) -> pathops.Path: rounded_path = pathops.Path() for verb, points in path: rounded_path.add(verb, *((round(p[0]), round(p[1])) for p in points)) return rounded_path def _simplify(path: pathops.Path, debugGlyphName: str) -> pathops.Path: # skia-pathops has a bug where it sometimes fails to simplify paths when there # are float coordinates and control points are very close to one another. # Rounding coordinates to integers works around the bug. # Since we are going to round glyf coordinates later on anyway, here it is # ok(-ish) to also round before simplify. Better than failing the whole process # for the entire font. # https://bugs.chromium.org/p/skia/issues/detail?id=11958 # https://github.com/google/fonts/issues/3365 # TODO(anthrotype): remove once this Skia bug is fixed try: return pathops.simplify(path, clockwise=path.clockwise) except pathops.PathOpsError: pass path = _round_path(path) try: path = pathops.simplify(path, clockwise=path.clockwise) log.debug( "skia-pathops failed to simplify '%s' with float coordinates, " "but succeded using rounded integer coordinates", debugGlyphName, ) return path except pathops.PathOpsError as e: if log.isEnabledFor(logging.DEBUG): path.dump() raise RemoveOverlapsError( f"Failed to remove overlaps from glyph {debugGlyphName!r}" ) from e raise AssertionError("Unreachable") def removeTTGlyphOverlaps( glyphName: str, glyphSet: _TTGlyphMapping, glyfTable: _g_l_y_f.table__g_l_y_f, hmtxTable: _h_m_t_x.table__h_m_t_x, removeHinting: bool = True, ) -> bool: glyph = glyfTable[glyphName] # decompose composite glyphs only if components overlap each other if ( glyph.numberOfContours > 0 or glyph.isComposite() and componentsOverlap(glyph, glyphSet) ): path = skPathFromGlyph(glyphName, glyphSet) # remove overlaps path2 = _simplify(path, glyphName) # replace TTGlyph if simplified path is different (ignoring contour order) if {tuple(c) for c in path.contours} != {tuple(c) for c in path2.contours}: glyfTable[glyphName] = glyph = ttfGlyphFromSkPath(path2) # simplified glyph is always unhinted assert not glyph.program # also ensure hmtx LSB == glyph.xMin so glyph origin is at x=0 width, lsb = hmtxTable[glyphName] if lsb != glyph.xMin: hmtxTable[glyphName] = (width, glyph.xMin) return True if removeHinting: glyph.removeHinting() return False def removeOverlaps( font: ttFont.TTFont, glyphNames: Optional[Iterable[str]] = None, removeHinting: bool = True, ignoreErrors=False, ) -> None: """Simplify glyphs in TTFont by merging overlapping contours. Overlapping components are first decomposed to simple contours, then merged. Currently this only works with TrueType fonts with 'glyf' table. Raises NotImplementedError if 'glyf' table is absent. Note that removing overlaps invalidates the hinting. By default we drop hinting from all glyphs whether or not overlaps are removed from a given one, as it would look weird if only some glyphs are left (un)hinted. Args: font: input TTFont object, modified in place. glyphNames: optional iterable of glyph names (str) to remove overlaps from. By default, all glyphs in the font are processed. removeHinting (bool): set to False to keep hinting for unmodified glyphs. ignoreErrors (bool): set to True to ignore errors while removing overlaps, thus keeping the tricky glyphs unchanged (fonttools/fonttools#2363). """ try: glyfTable = font["glyf"] except KeyError: raise NotImplementedError("removeOverlaps currently only works with TTFs") hmtxTable = font["hmtx"] # wraps the underlying glyf Glyphs, takes care of interfacing with drawing pens glyphSet = font.getGlyphSet() if glyphNames is None: glyphNames = font.getGlyphOrder() # process all simple glyphs first, then composites with increasing component depth, # so that by the time we test for component intersections the respective base glyphs # have already been simplified glyphNames = sorted( glyphNames, key=lambda name: ( ( glyfTable[name].getCompositeMaxpValues(glyfTable).maxComponentDepth if glyfTable[name].isComposite() else 0 ), name, ), ) modified = set() for glyphName in glyphNames: try: if removeTTGlyphOverlaps( glyphName, glyphSet, glyfTable, hmtxTable, removeHinting ): modified.add(glyphName) except RemoveOverlapsError: if not ignoreErrors: raise log.error("Failed to remove overlaps for '%s'", glyphName) log.debug("Removed overlaps for %s glyphs:\n%s", len(modified), " ".join(modified)) def main(args=None): import sys if args is None: args = sys.argv[1:] if len(args) < 2: print( f"usage: fonttools ttLib.removeOverlaps INPUT.ttf OUTPUT.ttf [GLYPHS ...]" ) sys.exit(1) src = args[0] dst = args[1] glyphNames = args[2:] or None with ttFont.TTFont(src) as f: removeOverlaps(f, glyphNames) f.save(dst) if __name__ == "__main__": main() PKaZZZ�C?h'' fontTools/ttLib/reorderGlyphs.py"""Reorder glyphs in a font.""" __author__ = "Rod Sheeter" # See https://docs.google.com/document/d/1h9O-C_ndods87uY0QeIIcgAMiX2gDTpvO_IhMJsKAqs/ # for details. from fontTools import ttLib from fontTools.ttLib.tables import otBase from fontTools.ttLib.tables import otTables as ot from abc import ABC, abstractmethod from dataclasses import dataclass from collections import deque from typing import ( Optional, Any, Callable, Deque, Iterable, List, NamedTuple, Tuple, Union, ) _COVERAGE_ATTR = "Coverage" # tables that have one coverage use this name def _sort_by_gid( get_glyph_id: Callable[[str], int], glyphs: List[str], parallel_list: Optional[List[Any]], ): if parallel_list: reordered = sorted( ((g, e) for g, e in zip(glyphs, parallel_list)), key=lambda t: get_glyph_id(t[0]), ) sorted_glyphs, sorted_parallel_list = map(list, zip(*reordered)) parallel_list[:] = sorted_parallel_list else: sorted_glyphs = sorted(glyphs, key=get_glyph_id) glyphs[:] = sorted_glyphs def _get_dotted_attr(value: Any, dotted_attr: str) -> Any: attr_names = dotted_attr.split(".") assert attr_names while attr_names: attr_name = attr_names.pop(0) value = getattr(value, attr_name) return value class ReorderRule(ABC): """A rule to reorder something in a font to match the fonts glyph order.""" @abstractmethod def apply(self, font: ttLib.TTFont, value: otBase.BaseTable) -> None: ... @dataclass(frozen=True) class ReorderCoverage(ReorderRule): """Reorder a Coverage table, and optionally a list that is sorted parallel to it.""" # A list that is parallel to Coverage parallel_list_attr: Optional[str] = None coverage_attr: str = _COVERAGE_ATTR def apply(self, font: ttLib.TTFont, value: otBase.BaseTable) -> None: coverage = _get_dotted_attr(value, self.coverage_attr) if type(coverage) is not list: # Normal path, process one coverage that might have a parallel list parallel_list = None if self.parallel_list_attr: parallel_list = _get_dotted_attr(value, self.parallel_list_attr) assert ( type(parallel_list) is list ), f"{self.parallel_list_attr} should be a list" assert len(parallel_list) == len(coverage.glyphs), "Nothing makes sense" _sort_by_gid(font.getGlyphID, coverage.glyphs, parallel_list) else: # A few tables have a list of coverage. No parallel list can exist. assert ( not self.parallel_list_attr ), f"Can't have multiple coverage AND a parallel list; {self}" for coverage_entry in coverage: _sort_by_gid(font.getGlyphID, coverage_entry.glyphs, None) @dataclass(frozen=True) class ReorderList(ReorderRule): """Reorder the items within a list to match the updated glyph order. Useful when a list ordered by coverage itself contains something ordered by a gid. For example, the PairSet table of https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#lookup-type-2-pair-adjustment-positioning-subtable. """ list_attr: str key: str def apply(self, font: ttLib.TTFont, value: otBase.BaseTable) -> None: lst = _get_dotted_attr(value, self.list_attr) assert isinstance(lst, list), f"{self.list_attr} should be a list" lst.sort(key=lambda v: font.getGlyphID(getattr(v, self.key))) # (Type, Optional Format) => List[ReorderRule] # Encodes the relationships Cosimo identified _REORDER_RULES = { # GPOS (ot.SinglePos, 1): [ReorderCoverage()], (ot.SinglePos, 2): [ReorderCoverage(parallel_list_attr="Value")], (ot.PairPos, 1): [ReorderCoverage(parallel_list_attr="PairSet")], (ot.PairSet, None): [ReorderList("PairValueRecord", key="SecondGlyph")], (ot.PairPos, 2): [ReorderCoverage()], (ot.CursivePos, 1): [ReorderCoverage(parallel_list_attr="EntryExitRecord")], (ot.MarkBasePos, 1): [ ReorderCoverage( coverage_attr="MarkCoverage", parallel_list_attr="MarkArray.MarkRecord" ), ReorderCoverage( coverage_attr="BaseCoverage", parallel_list_attr="BaseArray.BaseRecord" ), ], (ot.MarkLigPos, 1): [ ReorderCoverage( coverage_attr="MarkCoverage", parallel_list_attr="MarkArray.MarkRecord" ), ReorderCoverage( coverage_attr="LigatureCoverage", parallel_list_attr="LigatureArray.LigatureAttach", ), ], (ot.MarkMarkPos, 1): [ ReorderCoverage( coverage_attr="Mark1Coverage", parallel_list_attr="Mark1Array.MarkRecord" ), ReorderCoverage( coverage_attr="Mark2Coverage", parallel_list_attr="Mark2Array.Mark2Record" ), ], (ot.ContextPos, 1): [ReorderCoverage(parallel_list_attr="PosRuleSet")], (ot.ContextPos, 2): [ReorderCoverage()], (ot.ContextPos, 3): [ReorderCoverage()], (ot.ChainContextPos, 1): [ReorderCoverage(parallel_list_attr="ChainPosRuleSet")], (ot.ChainContextPos, 2): [ReorderCoverage()], (ot.ChainContextPos, 3): [ ReorderCoverage(coverage_attr="BacktrackCoverage"), ReorderCoverage(coverage_attr="InputCoverage"), ReorderCoverage(coverage_attr="LookAheadCoverage"), ], # GSUB (ot.ContextSubst, 1): [ReorderCoverage(parallel_list_attr="SubRuleSet")], (ot.ContextSubst, 2): [ReorderCoverage()], (ot.ContextSubst, 3): [ReorderCoverage()], (ot.ChainContextSubst, 1): [ReorderCoverage(parallel_list_attr="ChainSubRuleSet")], (ot.ChainContextSubst, 2): [ReorderCoverage()], (ot.ChainContextSubst, 3): [ ReorderCoverage(coverage_attr="BacktrackCoverage"), ReorderCoverage(coverage_attr="InputCoverage"), ReorderCoverage(coverage_attr="LookAheadCoverage"), ], (ot.ReverseChainSingleSubst, 1): [ ReorderCoverage(parallel_list_attr="Substitute"), ReorderCoverage(coverage_attr="BacktrackCoverage"), ReorderCoverage(coverage_attr="LookAheadCoverage"), ], # GDEF (ot.AttachList, None): [ReorderCoverage(parallel_list_attr="AttachPoint")], (ot.LigCaretList, None): [ReorderCoverage(parallel_list_attr="LigGlyph")], (ot.MarkGlyphSetsDef, None): [ReorderCoverage()], # MATH (ot.MathGlyphInfo, None): [ReorderCoverage(coverage_attr="ExtendedShapeCoverage")], (ot.MathItalicsCorrectionInfo, None): [ ReorderCoverage(parallel_list_attr="ItalicsCorrection") ], (ot.MathTopAccentAttachment, None): [ ReorderCoverage( coverage_attr="TopAccentCoverage", parallel_list_attr="TopAccentAttachment" ) ], (ot.MathKernInfo, None): [ ReorderCoverage( coverage_attr="MathKernCoverage", parallel_list_attr="MathKernInfoRecords" ) ], (ot.MathVariants, None): [ ReorderCoverage( coverage_attr="VertGlyphCoverage", parallel_list_attr="VertGlyphConstruction", ), ReorderCoverage( coverage_attr="HorizGlyphCoverage", parallel_list_attr="HorizGlyphConstruction", ), ], } # TODO Port to otTraverse SubTablePath = Tuple[otBase.BaseTable.SubTableEntry, ...] def _bfs_base_table( root: otBase.BaseTable, root_accessor: str ) -> Iterable[SubTablePath]: yield from _traverse_ot_data( root, root_accessor, lambda frontier, new: frontier.extend(new) ) # Given f(current frontier, new entries) add new entries to frontier AddToFrontierFn = Callable[[Deque[SubTablePath], List[SubTablePath]], None] def _traverse_ot_data( root: otBase.BaseTable, root_accessor: str, add_to_frontier_fn: AddToFrontierFn ) -> Iterable[SubTablePath]: # no visited because general otData is forward-offset only and thus cannot cycle frontier: Deque[SubTablePath] = deque() frontier.append((otBase.BaseTable.SubTableEntry(root_accessor, root),)) while frontier: # path is (value, attr_name) tuples. attr_name is attr of parent to get value path = frontier.popleft() current = path[-1].value yield path new_entries = [] for subtable_entry in current.iterSubTables(): new_entries.append(path + (subtable_entry,)) add_to_frontier_fn(frontier, new_entries) def reorderGlyphs(font: ttLib.TTFont, new_glyph_order: List[str]): old_glyph_order = font.getGlyphOrder() if len(new_glyph_order) != len(old_glyph_order): raise ValueError( f"New glyph order contains {len(new_glyph_order)} glyphs, " f"but font has {len(old_glyph_order)} glyphs" ) if set(old_glyph_order) != set(new_glyph_order): raise ValueError( "New glyph order does not contain the same set of glyphs as the font:\n" f"* only in new: {set(new_glyph_order) - set(old_glyph_order)}\n" f"* only in old: {set(old_glyph_order) - set(new_glyph_order)}" ) # Changing the order of glyphs in a TTFont requires that all tables that use # glyph indexes have been fully. # Cf. https://github.com/fonttools/fonttools/issues/2060 font.ensureDecompiled() not_loaded = sorted(t for t in font.keys() if not font.isLoaded(t)) if not_loaded: raise ValueError(f"Everything should be loaded, following aren't: {not_loaded}") font.setGlyphOrder(new_glyph_order) coverage_containers = {"GDEF", "GPOS", "GSUB", "MATH"} for tag in coverage_containers: if tag in font.keys(): for path in _bfs_base_table(font[tag].table, f'font["{tag}"]'): value = path[-1].value reorder_key = (type(value), getattr(value, "Format", None)) for reorder in _REORDER_RULES.get(reorder_key, []): reorder.apply(font, value) PKaZZZ}S�3�/�/fontTools/ttLib/scaleUpem.py"""Change the units-per-EM of a font. AAT and Graphite tables are not supported. CFF/CFF2 fonts are de-subroutinized.""" from fontTools.ttLib.ttVisitor import TTVisitor import fontTools.ttLib as ttLib import fontTools.ttLib.tables.otBase as otBase import fontTools.ttLib.tables.otTables as otTables from fontTools.cffLib import VarStoreData import fontTools.cffLib.specializer as cffSpecializer from fontTools.varLib import builder # for VarData.calculateNumShorts from fontTools.misc.fixedTools import otRound from fontTools.ttLib.tables._g_l_y_f import VarComponentFlags __all__ = ["scale_upem", "ScalerVisitor"] class ScalerVisitor(TTVisitor): def __init__(self, scaleFactor): self.scaleFactor = scaleFactor def scale(self, v): return otRound(v * self.scaleFactor) @ScalerVisitor.register_attrs( ( (ttLib.getTableClass("head"), ("unitsPerEm", "xMin", "yMin", "xMax", "yMax")), (ttLib.getTableClass("post"), ("underlinePosition", "underlineThickness")), (ttLib.getTableClass("VORG"), ("defaultVertOriginY")), ( ttLib.getTableClass("hhea"), ( "ascent", "descent", "lineGap", "advanceWidthMax", "minLeftSideBearing", "minRightSideBearing", "xMaxExtent", "caretOffset", ), ), ( ttLib.getTableClass("vhea"), ( "ascent", "descent", "lineGap", "advanceHeightMax", "minTopSideBearing", "minBottomSideBearing", "yMaxExtent", "caretOffset", ), ), ( ttLib.getTableClass("OS/2"), ( "xAvgCharWidth", "ySubscriptXSize", "ySubscriptYSize", "ySubscriptXOffset", "ySubscriptYOffset", "ySuperscriptXSize", "ySuperscriptYSize", "ySuperscriptXOffset", "ySuperscriptYOffset", "yStrikeoutSize", "yStrikeoutPosition", "sTypoAscender", "sTypoDescender", "sTypoLineGap", "usWinAscent", "usWinDescent", "sxHeight", "sCapHeight", ), ), ( otTables.ValueRecord, ("XAdvance", "YAdvance", "XPlacement", "YPlacement"), ), # GPOS (otTables.Anchor, ("XCoordinate", "YCoordinate")), # GPOS (otTables.CaretValue, ("Coordinate")), # GDEF (otTables.BaseCoord, ("Coordinate")), # BASE (otTables.MathValueRecord, ("Value")), # MATH (otTables.ClipBox, ("xMin", "yMin", "xMax", "yMax")), # COLR ) ) def visit(visitor, obj, attr, value): setattr(obj, attr, visitor.scale(value)) @ScalerVisitor.register_attr( (ttLib.getTableClass("hmtx"), ttLib.getTableClass("vmtx")), "metrics" ) def visit(visitor, obj, attr, metrics): for g in metrics: advance, lsb = metrics[g] metrics[g] = visitor.scale(advance), visitor.scale(lsb) @ScalerVisitor.register_attr(ttLib.getTableClass("VMTX"), "VOriginRecords") def visit(visitor, obj, attr, VOriginRecords): for g in VOriginRecords: VOriginRecords[g] = visitor.scale(VOriginRecords[g]) @ScalerVisitor.register_attr(ttLib.getTableClass("glyf"), "glyphs") def visit(visitor, obj, attr, glyphs): for g in glyphs.values(): for attr in ("xMin", "xMax", "yMin", "yMax"): v = getattr(g, attr, None) if v is not None: setattr(g, attr, visitor.scale(v)) if g.isComposite(): for component in g.components: component.x = visitor.scale(component.x) component.y = visitor.scale(component.y) continue if g.isVarComposite(): for component in g.components: for attr in ("translateX", "translateY", "tCenterX", "tCenterY"): v = getattr(component.transform, attr) setattr(component.transform, attr, visitor.scale(v)) continue if hasattr(g, "coordinates"): coordinates = g.coordinates for i, (x, y) in enumerate(coordinates): coordinates[i] = visitor.scale(x), visitor.scale(y) @ScalerVisitor.register_attr(ttLib.getTableClass("gvar"), "variations") def visit(visitor, obj, attr, variations): # VarComposites are a pain to handle :-( glyfTable = visitor.font["glyf"] for glyphName, varlist in variations.items(): glyph = glyfTable[glyphName] isVarComposite = glyph.isVarComposite() for var in varlist: coordinates = var.coordinates if not isVarComposite: for i, xy in enumerate(coordinates): if xy is None: continue coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1]) continue # VarComposite glyph i = 0 for component in glyph.components: if component.flags & VarComponentFlags.AXES_HAVE_VARIATION: i += len(component.location) if component.flags & ( VarComponentFlags.HAVE_TRANSLATE_X | VarComponentFlags.HAVE_TRANSLATE_Y ): xy = coordinates[i] coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1]) i += 1 if component.flags & VarComponentFlags.HAVE_ROTATION: i += 1 if component.flags & ( VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y ): i += 1 if component.flags & ( VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y ): i += 1 if component.flags & ( VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y ): xy = coordinates[i] coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1]) i += 1 # Phantom points assert i + 4 == len(coordinates) for i in range(i, len(coordinates)): xy = coordinates[i] coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1]) @ScalerVisitor.register_attr(ttLib.getTableClass("kern"), "kernTables") def visit(visitor, obj, attr, kernTables): for table in kernTables: kernTable = table.kernTable for k in kernTable.keys(): kernTable[k] = visitor.scale(kernTable[k]) def _cff_scale(visitor, args): for i, arg in enumerate(args): if not isinstance(arg, list): if not isinstance(arg, bytes): args[i] = visitor.scale(arg) else: num_blends = arg[-1] _cff_scale(visitor, arg) arg[-1] = num_blends @ScalerVisitor.register_attr( (ttLib.getTableClass("CFF "), ttLib.getTableClass("CFF2")), "cff" ) def visit(visitor, obj, attr, cff): cff.desubroutinize() topDict = cff.topDictIndex[0] varStore = getattr(topDict, "VarStore", None) getNumRegions = varStore.getNumRegions if varStore is not None else None privates = set() for fontname in cff.keys(): font = cff[fontname] cs = font.CharStrings for g in font.charset: c, _ = cs.getItemAndSelector(g) privates.add(c.private) commands = cffSpecializer.programToCommands( c.program, getNumRegions=getNumRegions ) for op, args in commands: if op == "vsindex": continue _cff_scale(visitor, args) c.program[:] = cffSpecializer.commandsToProgram(commands) # Annoying business of scaling numbers that do not matter whatsoever for attr in ( "UnderlinePosition", "UnderlineThickness", "FontBBox", "StrokeWidth", ): value = getattr(topDict, attr, None) if value is None: continue if isinstance(value, list): _cff_scale(visitor, value) else: setattr(topDict, attr, visitor.scale(value)) for i in range(6): topDict.FontMatrix[i] /= visitor.scaleFactor for private in privates: for attr in ( "BlueValues", "OtherBlues", "FamilyBlues", "FamilyOtherBlues", # "BlueScale", # "BlueShift", # "BlueFuzz", "StdHW", "StdVW", "StemSnapH", "StemSnapV", "defaultWidthX", "nominalWidthX", ): value = getattr(private, attr, None) if value is None: continue if isinstance(value, list): _cff_scale(visitor, value) else: setattr(private, attr, visitor.scale(value)) # ItemVariationStore @ScalerVisitor.register(otTables.VarData) def visit(visitor, varData): for item in varData.Item: for i, v in enumerate(item): item[i] = visitor.scale(v) varData.calculateNumShorts() # COLRv1 def _setup_scale_paint(paint, scale): if -2 <= scale <= 2 - (1 >> 14): paint.Format = otTables.PaintFormat.PaintScaleUniform paint.scale = scale return transform = otTables.Affine2x3() transform.populateDefaults() transform.xy = transform.yx = transform.dx = transform.dy = 0 transform.xx = transform.yy = scale paint.Format = otTables.PaintFormat.PaintTransform paint.Transform = transform @ScalerVisitor.register(otTables.BaseGlyphPaintRecord) def visit(visitor, record): oldPaint = record.Paint scale = otTables.Paint() _setup_scale_paint(scale, visitor.scaleFactor) scale.Paint = oldPaint record.Paint = scale return True @ScalerVisitor.register(otTables.Paint) def visit(visitor, paint): if paint.Format != otTables.PaintFormat.PaintGlyph: return True newPaint = otTables.Paint() newPaint.Format = paint.Format newPaint.Paint = paint.Paint newPaint.Glyph = paint.Glyph del paint.Paint del paint.Glyph _setup_scale_paint(paint, 1 / visitor.scaleFactor) paint.Paint = newPaint visitor.visit(newPaint.Paint) return False def scale_upem(font, new_upem): """Change the units-per-EM of font to the new value.""" upem = font["head"].unitsPerEm visitor = ScalerVisitor(new_upem / upem) visitor.visit(font) def main(args=None): """Change the units-per-EM of fonts""" if args is None: import sys args = sys.argv[1:] from fontTools.ttLib import TTFont from fontTools.misc.cliTools import makeOutputFileName import argparse parser = argparse.ArgumentParser( "fonttools ttLib.scaleUpem", description="Change the units-per-EM of fonts" ) parser.add_argument("font", metavar="font", help="Font file.") parser.add_argument( "new_upem", metavar="new-upem", help="New units-per-EM integer value." ) parser.add_argument( "--output-file", metavar="path", default=None, help="Output file." ) options = parser.parse_args(args) font = TTFont(options.font) new_upem = int(options.new_upem) output_file = ( options.output_file if options.output_file is not None else makeOutputFileName(options.font, overWrite=True, suffix="-scaled") ) scale_upem(font, new_upem) print("Writing %s" % output_file) font.save(output_file) if __name__ == "__main__": import sys sys.exit(main()) PKaZZZ��c�-Y-YfontTools/ttLib/sfnt.py"""ttLib/sfnt.py -- low-level module to deal with the sfnt file format. Defines two public classes: SFNTReader SFNTWriter (Normally you don't have to use these classes explicitly; they are used automatically by ttLib.TTFont.) The reading and writing of sfnt files is separated in two distinct classes, since whenever the number of tables changes or whenever a table's length changes you need to rewrite the whole file anyway. """ from io import BytesIO from types import SimpleNamespace from fontTools.misc.textTools import Tag from fontTools.misc import sstruct from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError import struct from collections import OrderedDict import logging log = logging.getLogger(__name__) class SFNTReader(object): def __new__(cls, *args, **kwargs): """Return an instance of the SFNTReader sub-class which is compatible with the input file type. """ if args and cls is SFNTReader: infile = args[0] infile.seek(0) sfntVersion = Tag(infile.read(4)) infile.seek(0) if sfntVersion == "wOF2": # return new WOFF2Reader object from fontTools.ttLib.woff2 import WOFF2Reader return object.__new__(WOFF2Reader) # return default object return object.__new__(cls) def __init__(self, file, checkChecksums=0, fontNumber=-1): self.file = file self.checkChecksums = checkChecksums self.flavor = None self.flavorData = None self.DirectoryEntry = SFNTDirectoryEntry self.file.seek(0) self.sfntVersion = self.file.read(4) self.file.seek(0) if self.sfntVersion == b"ttcf": header = readTTCHeader(self.file) numFonts = header.numFonts if not 0 <= fontNumber < numFonts: raise TTLibFileIsCollectionError( "specify a font number between 0 and %d (inclusive)" % (numFonts - 1) ) self.numFonts = numFonts self.file.seek(header.offsetTable[fontNumber]) data = self.file.read(sfntDirectorySize) if len(data) != sfntDirectorySize: raise TTLibError("Not a Font Collection (not enough data)") sstruct.unpack(sfntDirectoryFormat, data, self) elif self.sfntVersion == b"wOFF": self.flavor = "woff" self.DirectoryEntry = WOFFDirectoryEntry data = self.file.read(woffDirectorySize) if len(data) != woffDirectorySize: raise TTLibError("Not a WOFF font (not enough data)") sstruct.unpack(woffDirectoryFormat, data, self) else: data = self.file.read(sfntDirectorySize) if len(data) != sfntDirectorySize: raise TTLibError("Not a TrueType or OpenType font (not enough data)") sstruct.unpack(sfntDirectoryFormat, data, self) self.sfntVersion = Tag(self.sfntVersion) if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"): raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") tables = {} for i in range(self.numTables): entry = self.DirectoryEntry() entry.fromFile(self.file) tag = Tag(entry.tag) tables[tag] = entry self.tables = OrderedDict(sorted(tables.items(), key=lambda i: i[1].offset)) # Load flavor data if any if self.flavor == "woff": self.flavorData = WOFFFlavorData(self) def has_key(self, tag): return tag in self.tables __contains__ = has_key def keys(self): return self.tables.keys() def __getitem__(self, tag): """Fetch the raw table data.""" entry = self.tables[Tag(tag)] data = entry.loadData(self.file) if self.checkChecksums: if tag == "head": # Beh: we have to special-case the 'head' table. checksum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:]) else: checksum = calcChecksum(data) if self.checkChecksums > 1: # Be obnoxious, and barf when it's wrong assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag elif checksum != entry.checkSum: # Be friendly, and just log a warning. log.warning("bad checksum for '%s' table", tag) return data def __delitem__(self, tag): del self.tables[Tag(tag)] def close(self): self.file.close() # We define custom __getstate__ and __setstate__ to make SFNTReader pickle-able # and deepcopy-able. When a TTFont is loaded as lazy=True, SFNTReader holds a # reference to an external file object which is not pickleable. So in __getstate__ # we store the file name and current position, and in __setstate__ we reopen the # same named file after unpickling. def __getstate__(self): if isinstance(self.file, BytesIO): # BytesIO is already pickleable, return the state unmodified return self.__dict__ # remove unpickleable file attribute, and only store its name and pos state = self.__dict__.copy() del state["file"] state["_filename"] = self.file.name state["_filepos"] = self.file.tell() return state def __setstate__(self, state): if "file" not in state: self.file = open(state.pop("_filename"), "rb") self.file.seek(state.pop("_filepos")) self.__dict__.update(state) # default compression level for WOFF 1.0 tables and metadata ZLIB_COMPRESSION_LEVEL = 6 # if set to True, use zopfli instead of zlib for compressing WOFF 1.0. # The Python bindings are available at https://pypi.python.org/pypi/zopfli USE_ZOPFLI = False # mapping between zlib's compression levels and zopfli's 'numiterations'. # Use lower values for files over several MB in size or it will be too slow ZOPFLI_LEVELS = { # 0: 0, # can't do 0 iterations... 1: 1, 2: 3, 3: 5, 4: 8, 5: 10, 6: 15, 7: 25, 8: 50, 9: 100, } def compress(data, level=ZLIB_COMPRESSION_LEVEL): """Compress 'data' to Zlib format. If 'USE_ZOPFLI' variable is True, zopfli is used instead of the zlib module. The compression 'level' must be between 0 and 9. 1 gives best speed, 9 gives best compression (0 gives no compression at all). The default value is a compromise between speed and compression (6). """ if not (0 <= level <= 9): raise ValueError("Bad compression level: %s" % level) if not USE_ZOPFLI or level == 0: from zlib import compress return compress(data, level) else: from zopfli.zlib import compress return compress(data, numiterations=ZOPFLI_LEVELS[level]) class SFNTWriter(object): def __new__(cls, *args, **kwargs): """Return an instance of the SFNTWriter sub-class which is compatible with the specified 'flavor'. """ flavor = None if kwargs and "flavor" in kwargs: flavor = kwargs["flavor"] elif args and len(args) > 3: flavor = args[3] if cls is SFNTWriter: if flavor == "woff2": # return new WOFF2Writer object from fontTools.ttLib.woff2 import WOFF2Writer return object.__new__(WOFF2Writer) # return default object return object.__new__(cls) def __init__( self, file, numTables, sfntVersion="\000\001\000\000", flavor=None, flavorData=None, ): self.file = file self.numTables = numTables self.sfntVersion = Tag(sfntVersion) self.flavor = flavor self.flavorData = flavorData if self.flavor == "woff": self.directoryFormat = woffDirectoryFormat self.directorySize = woffDirectorySize self.DirectoryEntry = WOFFDirectoryEntry self.signature = "wOFF" # to calculate WOFF checksum adjustment, we also need the original SFNT offsets self.origNextTableOffset = ( sfntDirectorySize + numTables * sfntDirectoryEntrySize ) else: assert not self.flavor, "Unknown flavor '%s'" % self.flavor self.directoryFormat = sfntDirectoryFormat self.directorySize = sfntDirectorySize self.DirectoryEntry = SFNTDirectoryEntry from fontTools.ttLib import getSearchRange self.searchRange, self.entrySelector, self.rangeShift = getSearchRange( numTables, 16 ) self.directoryOffset = self.file.tell() self.nextTableOffset = ( self.directoryOffset + self.directorySize + numTables * self.DirectoryEntry.formatSize ) # clear out directory area self.file.seek(self.nextTableOffset) # make sure we're actually where we want to be. (old cStringIO bug) self.file.write(b"\0" * (self.nextTableOffset - self.file.tell())) self.tables = OrderedDict() def setEntry(self, tag, entry): if tag in self.tables: raise TTLibError("cannot rewrite '%s' table" % tag) self.tables[tag] = entry def __setitem__(self, tag, data): """Write raw table data to disk.""" if tag in self.tables: raise TTLibError("cannot rewrite '%s' table" % tag) entry = self.DirectoryEntry() entry.tag = tag entry.offset = self.nextTableOffset if tag == "head": entry.checkSum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:]) self.headTable = data entry.uncompressed = True else: entry.checkSum = calcChecksum(data) entry.saveData(self.file, data) if self.flavor == "woff": entry.origOffset = self.origNextTableOffset self.origNextTableOffset += (entry.origLength + 3) & ~3 self.nextTableOffset = self.nextTableOffset + ((entry.length + 3) & ~3) # Add NUL bytes to pad the table data to a 4-byte boundary. # Don't depend on f.seek() as we need to add the padding even if no # subsequent write follows (seek is lazy), ie. after the final table # in the font. self.file.write(b"\0" * (self.nextTableOffset - self.file.tell())) assert self.nextTableOffset == self.file.tell() self.setEntry(tag, entry) def __getitem__(self, tag): return self.tables[tag] def close(self): """All tables must have been written to disk. Now write the directory. """ tables = sorted(self.tables.items()) if len(tables) != self.numTables: raise TTLibError( "wrong number of tables; expected %d, found %d" % (self.numTables, len(tables)) ) if self.flavor == "woff": self.signature = b"wOFF" self.reserved = 0 self.totalSfntSize = 12 self.totalSfntSize += 16 * len(tables) for tag, entry in tables: self.totalSfntSize += (entry.origLength + 3) & ~3 data = self.flavorData if self.flavorData else WOFFFlavorData() if data.majorVersion is not None and data.minorVersion is not None: self.majorVersion = data.majorVersion self.minorVersion = data.minorVersion else: if hasattr(self, "headTable"): self.majorVersion, self.minorVersion = struct.unpack( ">HH", self.headTable[4:8] ) else: self.majorVersion = self.minorVersion = 0 if data.metaData: self.metaOrigLength = len(data.metaData) self.file.seek(0, 2) self.metaOffset = self.file.tell() compressedMetaData = compress(data.metaData) self.metaLength = len(compressedMetaData) self.file.write(compressedMetaData) else: self.metaOffset = self.metaLength = self.metaOrigLength = 0 if data.privData: self.file.seek(0, 2) off = self.file.tell() paddedOff = (off + 3) & ~3 self.file.write(b"\0" * (paddedOff - off)) self.privOffset = self.file.tell() self.privLength = len(data.privData) self.file.write(data.privData) else: self.privOffset = self.privLength = 0 self.file.seek(0, 2) self.length = self.file.tell() else: assert not self.flavor, "Unknown flavor '%s'" % self.flavor pass directory = sstruct.pack(self.directoryFormat, self) self.file.seek(self.directoryOffset + self.directorySize) seenHead = 0 for tag, entry in tables: if tag == "head": seenHead = 1 directory = directory + entry.toString() if seenHead: self.writeMasterChecksum(directory) self.file.seek(self.directoryOffset) self.file.write(directory) def _calcMasterChecksum(self, directory): # calculate checkSumAdjustment tags = list(self.tables.keys()) checksums = [] for i in range(len(tags)): checksums.append(self.tables[tags[i]].checkSum) if self.DirectoryEntry != SFNTDirectoryEntry: # Create a SFNT directory for checksum calculation purposes from fontTools.ttLib import getSearchRange self.searchRange, self.entrySelector, self.rangeShift = getSearchRange( self.numTables, 16 ) directory = sstruct.pack(sfntDirectoryFormat, self) tables = sorted(self.tables.items()) for tag, entry in tables: sfntEntry = SFNTDirectoryEntry() sfntEntry.tag = entry.tag sfntEntry.checkSum = entry.checkSum sfntEntry.offset = entry.origOffset sfntEntry.length = entry.origLength directory = directory + sfntEntry.toString() directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize assert directory_end == len(directory) checksums.append(calcChecksum(directory)) checksum = sum(checksums) & 0xFFFFFFFF # BiboAfba! checksumadjustment = (0xB1B0AFBA - checksum) & 0xFFFFFFFF return checksumadjustment def writeMasterChecksum(self, directory): checksumadjustment = self._calcMasterChecksum(directory) # write the checksum to the file self.file.seek(self.tables["head"].offset + 8) self.file.write(struct.pack(">L", checksumadjustment)) def reordersTables(self): return False # -- sfnt directory helpers and cruft ttcHeaderFormat = """ > # big endian TTCTag: 4s # "ttcf" Version: L # 0x00010000 or 0x00020000 numFonts: L # number of fonts # OffsetTable[numFonts]: L # array with offsets from beginning of file # ulDsigTag: L # version 2.0 only # ulDsigLength: L # version 2.0 only # ulDsigOffset: L # version 2.0 only """ ttcHeaderSize = sstruct.calcsize(ttcHeaderFormat) sfntDirectoryFormat = """ > # big endian sfntVersion: 4s numTables: H # number of tables searchRange: H # (max2 <= numTables)*16 entrySelector: H # log2(max2 <= numTables) rangeShift: H # numTables*16-searchRange """ sfntDirectorySize = sstruct.calcsize(sfntDirectoryFormat) sfntDirectoryEntryFormat = """ > # big endian tag: 4s checkSum: L offset: L length: L """ sfntDirectoryEntrySize = sstruct.calcsize(sfntDirectoryEntryFormat) woffDirectoryFormat = """ > # big endian signature: 4s # "wOFF" sfntVersion: 4s length: L # total woff file size numTables: H # number of tables reserved: H # set to 0 totalSfntSize: L # uncompressed size majorVersion: H # major version of WOFF file minorVersion: H # minor version of WOFF file metaOffset: L # offset to metadata block metaLength: L # length of compressed metadata metaOrigLength: L # length of uncompressed metadata privOffset: L # offset to private data block privLength: L # length of private data block """ woffDirectorySize = sstruct.calcsize(woffDirectoryFormat) woffDirectoryEntryFormat = """ > # big endian tag: 4s offset: L length: L # compressed length origLength: L # original length checkSum: L # original checksum """ woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat) class DirectoryEntry(object): def __init__(self): self.uncompressed = False # if True, always embed entry raw def fromFile(self, file): sstruct.unpack(self.format, file.read(self.formatSize), self) def fromString(self, str): sstruct.unpack(self.format, str, self) def toString(self): return sstruct.pack(self.format, self) def __repr__(self): if hasattr(self, "tag"): return "<%s '%s' at %x>" % (self.__class__.__name__, self.tag, id(self)) else: return "<%s at %x>" % (self.__class__.__name__, id(self)) def loadData(self, file): file.seek(self.offset) data = file.read(self.length) assert len(data) == self.length if hasattr(self.__class__, "decodeData"): data = self.decodeData(data) return data def saveData(self, file, data): if hasattr(self.__class__, "encodeData"): data = self.encodeData(data) self.length = len(data) file.seek(self.offset) file.write(data) def decodeData(self, rawData): return rawData def encodeData(self, data): return data class SFNTDirectoryEntry(DirectoryEntry): format = sfntDirectoryEntryFormat formatSize = sfntDirectoryEntrySize class WOFFDirectoryEntry(DirectoryEntry): format = woffDirectoryEntryFormat formatSize = woffDirectoryEntrySize def __init__(self): super(WOFFDirectoryEntry, self).__init__() # With fonttools<=3.1.2, the only way to set a different zlib # compression level for WOFF directory entries was to set the class # attribute 'zlibCompressionLevel'. This is now replaced by a globally # defined `ZLIB_COMPRESSION_LEVEL`, which is also applied when # compressing the metadata. For backward compatibility, we still # use the class attribute if it was already set. if not hasattr(WOFFDirectoryEntry, "zlibCompressionLevel"): self.zlibCompressionLevel = ZLIB_COMPRESSION_LEVEL def decodeData(self, rawData): import zlib if self.length == self.origLength: data = rawData else: assert self.length < self.origLength data = zlib.decompress(rawData) assert len(data) == self.origLength return data def encodeData(self, data): self.origLength = len(data) if not self.uncompressed: compressedData = compress(data, self.zlibCompressionLevel) if self.uncompressed or len(compressedData) >= self.origLength: # Encode uncompressed rawData = data self.length = self.origLength else: rawData = compressedData self.length = len(rawData) return rawData class WOFFFlavorData: Flavor = "woff" def __init__(self, reader=None): self.majorVersion = None self.minorVersion = None self.metaData = None self.privData = None if reader: self.majorVersion = reader.majorVersion self.minorVersion = reader.minorVersion if reader.metaLength: reader.file.seek(reader.metaOffset) rawData = reader.file.read(reader.metaLength) assert len(rawData) == reader.metaLength data = self._decompress(rawData) assert len(data) == reader.metaOrigLength self.metaData = data if reader.privLength: reader.file.seek(reader.privOffset) data = reader.file.read(reader.privLength) assert len(data) == reader.privLength self.privData = data def _decompress(self, rawData): import zlib return zlib.decompress(rawData) def calcChecksum(data): """Calculate the checksum for an arbitrary block of data. If the data length is not a multiple of four, it assumes it is to be padded with null byte. >>> print(calcChecksum(b"abcd")) 1633837924 >>> print(calcChecksum(b"abcdxyz")) 3655064932 """ remainder = len(data) % 4 if remainder: data += b"\0" * (4 - remainder) value = 0 blockSize = 4096 assert blockSize % 4 == 0 for i in range(0, len(data), blockSize): block = data[i : i + blockSize] longs = struct.unpack(">%dL" % (len(block) // 4), block) value = (value + sum(longs)) & 0xFFFFFFFF return value def readTTCHeader(file): file.seek(0) data = file.read(ttcHeaderSize) if len(data) != ttcHeaderSize: raise TTLibError("Not a Font Collection (not enough data)") self = SimpleNamespace() sstruct.unpack(ttcHeaderFormat, data, self) if self.TTCTag != "ttcf": raise TTLibError("Not a Font Collection") assert self.Version == 0x00010000 or self.Version == 0x00020000, ( "unrecognized TTC version 0x%08x" % self.Version ) self.offsetTable = struct.unpack( ">%dL" % self.numFonts, file.read(self.numFonts * 4) ) if self.Version == 0x00020000: pass # ignoring version 2.0 signatures return self def writeTTCHeader(file, numFonts): self = SimpleNamespace() self.TTCTag = "ttcf" self.Version = 0x00010000 self.numFonts = numFonts file.seek(0) file.write(sstruct.pack(ttcHeaderFormat, self)) offset = file.tell() file.write(struct.pack(">%dL" % self.numFonts, *([0] * self.numFonts))) return offset if __name__ == "__main__": import sys import doctest sys.exit(doctest.testmod().failed) PKaZZZ��;��%fontTools/ttLib/standardGlyphOrder.py# # 'post' table formats 1.0 and 2.0 rely on this list of "standard" # glyphs. # # My list is correct according to the Apple documentation for the 'post' table: # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6post.html # (However, it seems that TTFdump (from MS) and FontLab disagree, at # least with respect to the last glyph, which they list as 'dslash' # instead of 'dcroat'.) # standardGlyphOrder = [ ".notdef", # 0 ".null", # 1 "nonmarkingreturn", # 2 "space", # 3 "exclam", # 4 "quotedbl", # 5 "numbersign", # 6 "dollar", # 7 "percent", # 8 "ampersand", # 9 "quotesingle", # 10 "parenleft", # 11 "parenright", # 12 "asterisk", # 13 "plus", # 14 "comma", # 15 "hyphen", # 16 "period", # 17 "slash", # 18 "zero", # 19 "one", # 20 "two", # 21 "three", # 22 "four", # 23 "five", # 24 "six", # 25 "seven", # 26 "eight", # 27 "nine", # 28 "colon", # 29 "semicolon", # 30 "less", # 31 "equal", # 32 "greater", # 33 "question", # 34 "at", # 35 "A", # 36 "B", # 37 "C", # 38 "D", # 39 "E", # 40 "F", # 41 "G", # 42 "H", # 43 "I", # 44 "J", # 45 "K", # 46 "L", # 47 "M", # 48 "N", # 49 "O", # 50 "P", # 51 "Q", # 52 "R", # 53 "S", # 54 "T", # 55 "U", # 56 "V", # 57 "W", # 58 "X", # 59 "Y", # 60 "Z", # 61 "bracketleft", # 62 "backslash", # 63 "bracketright", # 64 "asciicircum", # 65 "underscore", # 66 "grave", # 67 "a", # 68 "b", # 69 "c", # 70 "d", # 71 "e", # 72 "f", # 73 "g", # 74 "h", # 75 "i", # 76 "j", # 77 "k", # 78 "l", # 79 "m", # 80 "n", # 81 "o", # 82 "p", # 83 "q", # 84 "r", # 85 "s", # 86 "t", # 87 "u", # 88 "v", # 89 "w", # 90 "x", # 91 "y", # 92 "z", # 93 "braceleft", # 94 "bar", # 95 "braceright", # 96 "asciitilde", # 97 "Adieresis", # 98 "Aring", # 99 "Ccedilla", # 100 "Eacute", # 101 "Ntilde", # 102 "Odieresis", # 103 "Udieresis", # 104 "aacute", # 105 "agrave", # 106 "acircumflex", # 107 "adieresis", # 108 "atilde", # 109 "aring", # 110 "ccedilla", # 111 "eacute", # 112 "egrave", # 113 "ecircumflex", # 114 "edieresis", # 115 "iacute", # 116 "igrave", # 117 "icircumflex", # 118 "idieresis", # 119 "ntilde", # 120 "oacute", # 121 "ograve", # 122 "ocircumflex", # 123 "odieresis", # 124 "otilde", # 125 "uacute", # 126 "ugrave", # 127 "ucircumflex", # 128 "udieresis", # 129 "dagger", # 130 "degree", # 131 "cent", # 132 "sterling", # 133 "section", # 134 "bullet", # 135 "paragraph", # 136 "germandbls", # 137 "registered", # 138 "copyright", # 139 "trademark", # 140 "acute", # 141 "dieresis", # 142 "notequal", # 143 "AE", # 144 "Oslash", # 145 "infinity", # 146 "plusminus", # 147 "lessequal", # 148 "greaterequal", # 149 "yen", # 150 "mu", # 151 "partialdiff", # 152 "summation", # 153 "product", # 154 "pi", # 155 "integral", # 156 "ordfeminine", # 157 "ordmasculine", # 158 "Omega", # 159 "ae", # 160 "oslash", # 161 "questiondown", # 162 "exclamdown", # 163 "logicalnot", # 164 "radical", # 165 "florin", # 166 "approxequal", # 167 "Delta", # 168 "guillemotleft", # 169 "guillemotright", # 170 "ellipsis", # 171 "nonbreakingspace", # 172 "Agrave", # 173 "Atilde", # 174 "Otilde", # 175 "OE", # 176 "oe", # 177 "endash", # 178 "emdash", # 179 "quotedblleft", # 180 "quotedblright", # 181 "quoteleft", # 182 "quoteright", # 183 "divide", # 184 "lozenge", # 185 "ydieresis", # 186 "Ydieresis", # 187 "fraction", # 188 "currency", # 189 "guilsinglleft", # 190 "guilsinglright", # 191 "fi", # 192 "fl", # 193 "daggerdbl", # 194 "periodcentered", # 195 "quotesinglbase", # 196 "quotedblbase", # 197 "perthousand", # 198 "Acircumflex", # 199 "Ecircumflex", # 200 "Aacute", # 201 "Edieresis", # 202 "Egrave", # 203 "Iacute", # 204 "Icircumflex", # 205 "Idieresis", # 206 "Igrave", # 207 "Oacute", # 208 "Ocircumflex", # 209 "apple", # 210 "Ograve", # 211 "Uacute", # 212 "Ucircumflex", # 213 "Ugrave", # 214 "dotlessi", # 215 "circumflex", # 216 "tilde", # 217 "macron", # 218 "breve", # 219 "dotaccent", # 220 "ring", # 221 "cedilla", # 222 "hungarumlaut", # 223 "ogonek", # 224 "caron", # 225 "Lslash", # 226 "lslash", # 227 "Scaron", # 228 "scaron", # 229 "Zcaron", # 230 "zcaron", # 231 "brokenbar", # 232 "Eth", # 233 "eth", # 234 "Yacute", # 235 "yacute", # 236 "Thorn", # 237 "thorn", # 238 "minus", # 239 "multiply", # 240 "onesuperior", # 241 "twosuperior", # 242 "threesuperior", # 243 "onehalf", # 244 "onequarter", # 245 "threequarters", # 246 "franc", # 247 "Gbreve", # 248 "gbreve", # 249 "Idotaccent", # 250 "Scedilla", # 251 "scedilla", # 252 "Cacute", # 253 "cacute", # 254 "Ccaron", # 255 "ccaron", # 256 "dcroat", # 257 ] PKaZZZ��5<{{fontTools/ttLib/ttCollection.pyfrom fontTools.ttLib.ttFont import TTFont from fontTools.ttLib.sfnt import readTTCHeader, writeTTCHeader from io import BytesIO import struct import logging log = logging.getLogger(__name__) class TTCollection(object): """Object representing a TrueType Collection / OpenType Collection. The main API is self.fonts being a list of TTFont instances. If shareTables is True, then different fonts in the collection might point to the same table object if the data for the table was the same in the font file. Note, however, that this might result in suprises and incorrect behavior if the different fonts involved have different GlyphOrder. Use only if you know what you are doing. """ def __init__(self, file=None, shareTables=False, **kwargs): fonts = self.fonts = [] if file is None: return assert "fontNumber" not in kwargs, kwargs closeStream = False if not hasattr(file, "read"): file = open(file, "rb") closeStream = True tableCache = {} if shareTables else None header = readTTCHeader(file) for i in range(header.numFonts): font = TTFont(file, fontNumber=i, _tableCache=tableCache, **kwargs) fonts.append(font) # don't close file if lazy=True, as the TTFont hold a reference to the original # file; the file will be closed once the TTFonts are closed in the # TTCollection.close(). We still want to close the file if lazy is None or # False, because in that case the TTFont no longer need the original file # and we want to avoid 'ResourceWarning: unclosed file'. if not kwargs.get("lazy") and closeStream: file.close() def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def close(self): for font in self.fonts: font.close() def save(self, file, shareTables=True): """Save the font to disk. Similarly to the constructor, the 'file' argument can be either a pathname or a writable file object. """ if not hasattr(file, "write"): final = None file = open(file, "wb") else: # assume "file" is a writable file object # write to a temporary stream to allow saving to unseekable streams final = file file = BytesIO() tableCache = {} if shareTables else None offsets_offset = writeTTCHeader(file, len(self.fonts)) offsets = [] for font in self.fonts: offsets.append(file.tell()) font._save(file, tableCache=tableCache) file.seek(0, 2) file.seek(offsets_offset) file.write(struct.pack(">%dL" % len(self.fonts), *offsets)) if final: final.write(file.getvalue()) file.close() def saveXML(self, fileOrPath, newlinestr="\n", writeVersion=True, **kwargs): from fontTools.misc import xmlWriter writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr) if writeVersion: from fontTools import version version = ".".join(version.split(".")[:2]) writer.begintag("ttCollection", ttLibVersion=version) else: writer.begintag("ttCollection") writer.newline() writer.newline() for font in self.fonts: font._saveXML(writer, writeVersion=False, **kwargs) writer.newline() writer.endtag("ttCollection") writer.newline() writer.close() def __getitem__(self, item): return self.fonts[item] def __setitem__(self, item, value): self.fonts[item] = value def __delitem__(self, item): return self.fonts[item] def __len__(self): return len(self.fonts) def __iter__(self): return iter(self.fonts) PKaZZZH�U�����fontTools/ttLib/ttFont.pyfrom fontTools.config import Config from fontTools.misc import xmlWriter from fontTools.misc.configTools import AbstractConfig from fontTools.misc.textTools import Tag, byteord, tostr from fontTools.misc.loggingTools import deprecateArgument from fontTools.ttLib import TTLibError from fontTools.ttLib.ttGlyphSet import _TTGlyph, _TTGlyphSetCFF, _TTGlyphSetGlyf from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter from io import BytesIO, StringIO, UnsupportedOperation import os import logging import traceback log = logging.getLogger(__name__) class TTFont(object): """Represents a TrueType font. The object manages file input and output, and offers a convenient way of accessing tables. Tables will be only decompiled when necessary, ie. when they're actually accessed. This means that simple operations can be extremely fast. Example usage:: >> from fontTools import ttLib >> tt = ttLib.TTFont("afont.ttf") # Load an existing font file >> tt['maxp'].numGlyphs 242 >> tt['OS/2'].achVendID 'B&H\000' >> tt['head'].unitsPerEm 2048 For details of the objects returned when accessing each table, see :ref:`tables`. To add a table to the font, use the :py:func:`newTable` function:: >> os2 = newTable("OS/2") >> os2.version = 4 >> # set other attributes >> font["OS/2"] = os2 TrueType fonts can also be serialized to and from XML format (see also the :ref:`ttx` binary):: >> tt.saveXML("afont.ttx") Dumping 'LTSH' table... Dumping 'OS/2' table... [...] >> tt2 = ttLib.TTFont() # Create a new font object >> tt2.importXML("afont.ttx") >> tt2['maxp'].numGlyphs 242 The TTFont object may be used as a context manager; this will cause the file reader to be closed after the context ``with`` block is exited:: with TTFont(filename) as f: # Do stuff Args: file: When reading a font from disk, either a pathname pointing to a file, or a readable file object. res_name_or_index: If running on a Macintosh, either a sfnt resource name or an sfnt resource index number. If the index number is zero, TTLib will autodetect whether the file is a flat file or a suitcase. (If it is a suitcase, only the first 'sfnt' resource will be read.) sfntVersion (str): When constructing a font object from scratch, sets the four-byte sfnt magic number to be used. Defaults to ``\0\1\0\0`` (TrueType). To create an OpenType file, use ``OTTO``. flavor (str): Set this to ``woff`` when creating a WOFF file or ``woff2`` for a WOFF2 file. checkChecksums (int): How checksum data should be treated. Default is 0 (no checking). Set to 1 to check and warn on wrong checksums; set to 2 to raise an exception if any wrong checksums are found. recalcBBoxes (bool): If true (the default), recalculates ``glyf``, ``CFF ``, ``head`` bounding box values and ``hhea``/``vhea`` min/max values on save. Also compiles the glyphs on importing, which saves memory consumption and time. ignoreDecompileErrors (bool): If true, exceptions raised during table decompilation will be ignored, and the binary data will be returned for those tables instead. recalcTimestamp (bool): If true (the default), sets the ``modified`` timestamp in the ``head`` table on save. fontNumber (int): The index of the font in a TrueType Collection file. lazy (bool): If lazy is set to True, many data structures are loaded lazily, upon access only. If it is set to False, many data structures are loaded immediately. The default is ``lazy=None`` which is somewhere in between. """ def __init__( self, file=None, res_name_or_index=None, sfntVersion="\000\001\000\000", flavor=None, checkChecksums=0, verbose=None, recalcBBoxes=True, allowVID=NotImplemented, ignoreDecompileErrors=False, recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=None, _tableCache=None, cfg={}, ): for name in ("verbose", "quiet"): val = locals().get(name) if val is not None: deprecateArgument(name, "configure logging instead") setattr(self, name, val) self.lazy = lazy self.recalcBBoxes = recalcBBoxes self.recalcTimestamp = recalcTimestamp self.tables = {} self.reader = None self.cfg = cfg.copy() if isinstance(cfg, AbstractConfig) else Config(cfg) self.ignoreDecompileErrors = ignoreDecompileErrors if not file: self.sfntVersion = sfntVersion self.flavor = flavor self.flavorData = None return seekable = True if not hasattr(file, "read"): closeStream = True # assume file is a string if res_name_or_index is not None: # see if it contains 'sfnt' resources in the resource or data fork from . import macUtils if res_name_or_index == 0: if macUtils.getSFNTResIndices(file): # get the first available sfnt font. file = macUtils.SFNTResourceReader(file, 1) else: file = open(file, "rb") else: file = macUtils.SFNTResourceReader(file, res_name_or_index) else: file = open(file, "rb") else: # assume "file" is a readable file object closeStream = False # SFNTReader wants the input file to be seekable. # SpooledTemporaryFile has no seekable() on < 3.11, but still can seek: # https://github.com/fonttools/fonttools/issues/3052 if hasattr(file, "seekable"): seekable = file.seekable() elif hasattr(file, "seek"): try: file.seek(0) except UnsupportedOperation: seekable = False if not self.lazy: # read input file in memory and wrap a stream around it to allow overwriting if seekable: file.seek(0) tmp = BytesIO(file.read()) if hasattr(file, "name"): # save reference to input file name tmp.name = file.name if closeStream: file.close() file = tmp elif not seekable: raise TTLibError("Input file must be seekable when lazy=True") self._tableCache = _tableCache self.reader = SFNTReader(file, checkChecksums, fontNumber=fontNumber) self.sfntVersion = self.reader.sfntVersion self.flavor = self.reader.flavor self.flavorData = self.reader.flavorData def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def close(self): """If we still have a reader object, close it.""" if self.reader is not None: self.reader.close() def save(self, file, reorderTables=True): """Save the font to disk. Args: file: Similarly to the constructor, can be either a pathname or a writable file object. reorderTables (Option[bool]): If true (the default), reorder the tables, sorting them by tag (recommended by the OpenType specification). If false, retain the original font order. If None, reorder by table dependency (fastest). """ if not hasattr(file, "write"): if self.lazy and self.reader.file.name == file: raise TTLibError("Can't overwrite TTFont when 'lazy' attribute is True") createStream = True else: # assume "file" is a writable file object createStream = False tmp = BytesIO() writer_reordersTables = self._save(tmp) if not ( reorderTables is None or writer_reordersTables or (reorderTables is False and self.reader is None) ): if reorderTables is False: # sort tables using the original font's order tableOrder = list(self.reader.keys()) else: # use the recommended order from the OpenType specification tableOrder = None tmp.flush() tmp2 = BytesIO() reorderFontTables(tmp, tmp2, tableOrder) tmp.close() tmp = tmp2 if createStream: # "file" is a path with open(file, "wb") as file: file.write(tmp.getvalue()) else: file.write(tmp.getvalue()) tmp.close() def _save(self, file, tableCache=None): """Internal function, to be shared by save() and TTCollection.save()""" if self.recalcTimestamp and "head" in self: self[ "head" ] # make sure 'head' is loaded so the recalculation is actually done tags = list(self.keys()) if "GlyphOrder" in tags: tags.remove("GlyphOrder") numTables = len(tags) # write to a temporary stream to allow saving to unseekable streams writer = SFNTWriter( file, numTables, self.sfntVersion, self.flavor, self.flavorData ) done = [] for tag in tags: self._writeTable(tag, writer, done, tableCache) writer.close() return writer.reordersTables() def saveXML(self, fileOrPath, newlinestr="\n", **kwargs): """Export the font as TTX (an XML-based text file), or as a series of text files when splitTables is true. In the latter case, the 'fileOrPath' argument should be a path to a directory. The 'tables' argument must either be false (dump all tables) or a list of tables to dump. The 'skipTables' argument may be a list of tables to skip, but only when the 'tables' argument is false. """ writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr) self._saveXML(writer, **kwargs) writer.close() def _saveXML( self, writer, writeVersion=True, quiet=None, tables=None, skipTables=None, splitTables=False, splitGlyphs=False, disassembleInstructions=True, bitmapGlyphDataFormat="raw", ): if quiet is not None: deprecateArgument("quiet", "configure logging instead") self.disassembleInstructions = disassembleInstructions self.bitmapGlyphDataFormat = bitmapGlyphDataFormat if not tables: tables = list(self.keys()) if "GlyphOrder" not in tables: tables = ["GlyphOrder"] + tables if skipTables: for tag in skipTables: if tag in tables: tables.remove(tag) numTables = len(tables) if writeVersion: from fontTools import version version = ".".join(version.split(".")[:2]) writer.begintag( "ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1], ttLibVersion=version, ) else: writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1]) writer.newline() # always splitTables if splitGlyphs is enabled splitTables = splitTables or splitGlyphs if not splitTables: writer.newline() else: path, ext = os.path.splitext(writer.filename) for i in range(numTables): tag = tables[i] if splitTables: tablePath = path + "." + tagToIdentifier(tag) + ext tableWriter = xmlWriter.XMLWriter( tablePath, newlinestr=writer.newlinestr ) tableWriter.begintag("ttFont", ttLibVersion=version) tableWriter.newline() tableWriter.newline() writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath)) writer.newline() else: tableWriter = writer self._tableToXML(tableWriter, tag, splitGlyphs=splitGlyphs) if splitTables: tableWriter.endtag("ttFont") tableWriter.newline() tableWriter.close() writer.endtag("ttFont") writer.newline() def _tableToXML(self, writer, tag, quiet=None, splitGlyphs=False): if quiet is not None: deprecateArgument("quiet", "configure logging instead") if tag in self: table = self[tag] report = "Dumping '%s' table..." % tag else: report = "No '%s' table found." % tag log.info(report) if tag not in self: return xmlTag = tagToXML(tag) attrs = dict() if hasattr(table, "ERROR"): attrs["ERROR"] = "decompilation error" from .tables.DefaultTable import DefaultTable if table.__class__ == DefaultTable: attrs["raw"] = True writer.begintag(xmlTag, **attrs) writer.newline() if tag == "glyf": table.toXML(writer, self, splitGlyphs=splitGlyphs) else: table.toXML(writer, self) writer.endtag(xmlTag) writer.newline() writer.newline() def importXML(self, fileOrPath, quiet=None): """Import a TTX file (an XML-based text format), so as to recreate a font object. """ if quiet is not None: deprecateArgument("quiet", "configure logging instead") if "maxp" in self and "post" in self: # Make sure the glyph order is loaded, as it otherwise gets # lost if the XML doesn't contain the glyph order, yet does # contain the table which was originally used to extract the # glyph names from (ie. 'post', 'cmap' or 'CFF '). self.getGlyphOrder() from fontTools.misc import xmlReader reader = xmlReader.XMLReader(fileOrPath, self) reader.read() def isLoaded(self, tag): """Return true if the table identified by ``tag`` has been decompiled and loaded into memory.""" return tag in self.tables def has_key(self, tag): """Test if the table identified by ``tag`` is present in the font. As well as this method, ``tag in font`` can also be used to determine the presence of the table.""" if self.isLoaded(tag): return True elif self.reader and tag in self.reader: return True elif tag == "GlyphOrder": return True else: return False __contains__ = has_key def keys(self): """Returns the list of tables in the font, along with the ``GlyphOrder`` pseudo-table.""" keys = list(self.tables.keys()) if self.reader: for key in list(self.reader.keys()): if key not in keys: keys.append(key) if "GlyphOrder" in keys: keys.remove("GlyphOrder") keys = sortedTagList(keys) return ["GlyphOrder"] + keys def ensureDecompiled(self, recurse=None): """Decompile all the tables, even if a TTFont was opened in 'lazy' mode.""" for tag in self.keys(): table = self[tag] if recurse is None: recurse = self.lazy is not False if recurse and hasattr(table, "ensureDecompiled"): table.ensureDecompiled(recurse=recurse) self.lazy = False def __len__(self): return len(list(self.keys())) def __getitem__(self, tag): tag = Tag(tag) table = self.tables.get(tag) if table is None: if tag == "GlyphOrder": table = GlyphOrder(tag) self.tables[tag] = table elif self.reader is not None: table = self._readTable(tag) else: raise KeyError("'%s' table not found" % tag) return table def _readTable(self, tag): log.debug("Reading '%s' table from disk", tag) data = self.reader[tag] if self._tableCache is not None: table = self._tableCache.get((tag, data)) if table is not None: return table tableClass = getTableClass(tag) table = tableClass(tag) self.tables[tag] = table log.debug("Decompiling '%s' table", tag) try: table.decompile(data, self) except Exception: if not self.ignoreDecompileErrors: raise # fall back to DefaultTable, retaining the binary table data log.exception( "An exception occurred during the decompilation of the '%s' table", tag ) from .tables.DefaultTable import DefaultTable file = StringIO() traceback.print_exc(file=file) table = DefaultTable(tag) table.ERROR = file.getvalue() self.tables[tag] = table table.decompile(data, self) if self._tableCache is not None: self._tableCache[(tag, data)] = table return table def __setitem__(self, tag, table): self.tables[Tag(tag)] = table def __delitem__(self, tag): if tag not in self: raise KeyError("'%s' table not found" % tag) if tag in self.tables: del self.tables[tag] if self.reader and tag in self.reader: del self.reader[tag] def get(self, tag, default=None): """Returns the table if it exists or (optionally) a default if it doesn't.""" try: return self[tag] except KeyError: return default def setGlyphOrder(self, glyphOrder): """Set the glyph order Args: glyphOrder ([str]): List of glyph names in order. """ self.glyphOrder = glyphOrder if hasattr(self, "_reverseGlyphOrderDict"): del self._reverseGlyphOrderDict if self.isLoaded("glyf"): self["glyf"].setGlyphOrder(glyphOrder) def getGlyphOrder(self): """Returns a list of glyph names ordered by their position in the font.""" try: return self.glyphOrder except AttributeError: pass if "CFF " in self: cff = self["CFF "] self.glyphOrder = cff.getGlyphOrder() elif "post" in self: # TrueType font glyphOrder = self["post"].getGlyphOrder() if glyphOrder is None: # # No names found in the 'post' table. # Try to create glyph names from the unicode cmap (if available) # in combination with the Adobe Glyph List (AGL). # self._getGlyphNamesFromCmap() elif len(glyphOrder) < self["maxp"].numGlyphs: # # Not enough names found in the 'post' table. # Can happen when 'post' format 1 is improperly used on a font that # has more than 258 glyphs (the lenght of 'standardGlyphOrder'). # log.warning( "Not enough names found in the 'post' table, generating them from cmap instead" ) self._getGlyphNamesFromCmap() else: self.glyphOrder = glyphOrder else: self._getGlyphNamesFromCmap() return self.glyphOrder def _getGlyphNamesFromCmap(self): # # This is rather convoluted, but then again, it's an interesting problem: # - we need to use the unicode values found in the cmap table to # build glyph names (eg. because there is only a minimal post table, # or none at all). # - but the cmap parser also needs glyph names to work with... # So here's what we do: # - make up glyph names based on glyphID # - load a temporary cmap table based on those names # - extract the unicode values, build the "real" glyph names # - unload the temporary cmap table # if self.isLoaded("cmap"): # Bootstrapping: we're getting called by the cmap parser # itself. This means self.tables['cmap'] contains a partially # loaded cmap, making it impossible to get at a unicode # subtable here. We remove the partially loaded cmap and # restore it later. # This only happens if the cmap table is loaded before any # other table that does f.getGlyphOrder() or f.getGlyphName(). cmapLoading = self.tables["cmap"] del self.tables["cmap"] else: cmapLoading = None # Make up glyph names based on glyphID, which will be used by the # temporary cmap and by the real cmap in case we don't find a unicode # cmap. numGlyphs = int(self["maxp"].numGlyphs) glyphOrder = [None] * numGlyphs glyphOrder[0] = ".notdef" for i in range(1, numGlyphs): glyphOrder[i] = "glyph%.5d" % i # Set the glyph order, so the cmap parser has something # to work with (so we don't get called recursively). self.glyphOrder = glyphOrder # Make up glyph names based on the reversed cmap table. Because some # glyphs (eg. ligatures or alternates) may not be reachable via cmap, # this naming table will usually not cover all glyphs in the font. # If the font has no Unicode cmap table, reversecmap will be empty. if "cmap" in self: reversecmap = self["cmap"].buildReversed() else: reversecmap = {} useCount = {} for i in range(numGlyphs): tempName = glyphOrder[i] if tempName in reversecmap: # If a font maps both U+0041 LATIN CAPITAL LETTER A and # U+0391 GREEK CAPITAL LETTER ALPHA to the same glyph, # we prefer naming the glyph as "A". glyphName = self._makeGlyphName(min(reversecmap[tempName])) numUses = useCount[glyphName] = useCount.get(glyphName, 0) + 1 if numUses > 1: glyphName = "%s.alt%d" % (glyphName, numUses - 1) glyphOrder[i] = glyphName if "cmap" in self: # Delete the temporary cmap table from the cache, so it can # be parsed again with the right names. del self.tables["cmap"] self.glyphOrder = glyphOrder if cmapLoading: # restore partially loaded cmap, so it can continue loading # using the proper names. self.tables["cmap"] = cmapLoading @staticmethod def _makeGlyphName(codepoint): from fontTools import agl # Adobe Glyph List if codepoint in agl.UV2AGL: return agl.UV2AGL[codepoint] elif codepoint <= 0xFFFF: return "uni%04X" % codepoint else: return "u%X" % codepoint def getGlyphNames(self): """Get a list of glyph names, sorted alphabetically.""" glyphNames = sorted(self.getGlyphOrder()) return glyphNames def getGlyphNames2(self): """Get a list of glyph names, sorted alphabetically, but not case sensitive. """ from fontTools.misc import textTools return textTools.caselessSort(self.getGlyphOrder()) def getGlyphName(self, glyphID): """Returns the name for the glyph with the given ID. If no name is available, synthesises one with the form ``glyphXXXXX``` where ```XXXXX`` is the zero-padded glyph ID. """ try: return self.getGlyphOrder()[glyphID] except IndexError: return "glyph%.5d" % glyphID def getGlyphNameMany(self, lst): """Converts a list of glyph IDs into a list of glyph names.""" glyphOrder = self.getGlyphOrder() cnt = len(glyphOrder) return [glyphOrder[gid] if gid < cnt else "glyph%.5d" % gid for gid in lst] def getGlyphID(self, glyphName): """Returns the ID of the glyph with the given name.""" try: return self.getReverseGlyphMap()[glyphName] except KeyError: if glyphName[:5] == "glyph": try: return int(glyphName[5:]) except (NameError, ValueError): raise KeyError(glyphName) raise def getGlyphIDMany(self, lst): """Converts a list of glyph names into a list of glyph IDs.""" d = self.getReverseGlyphMap() try: return [d[glyphName] for glyphName in lst] except KeyError: getGlyphID = self.getGlyphID return [getGlyphID(glyphName) for glyphName in lst] def getReverseGlyphMap(self, rebuild=False): """Returns a mapping of glyph names to glyph IDs.""" if rebuild or not hasattr(self, "_reverseGlyphOrderDict"): self._buildReverseGlyphOrderDict() return self._reverseGlyphOrderDict def _buildReverseGlyphOrderDict(self): self._reverseGlyphOrderDict = d = {} for glyphID, glyphName in enumerate(self.getGlyphOrder()): d[glyphName] = glyphID return d def _writeTable(self, tag, writer, done, tableCache=None): """Internal helper function for self.save(). Keeps track of inter-table dependencies. """ if tag in done: return tableClass = getTableClass(tag) for masterTable in tableClass.dependencies: if masterTable not in done: if masterTable in self: self._writeTable(masterTable, writer, done, tableCache) else: done.append(masterTable) done.append(tag) tabledata = self.getTableData(tag) if tableCache is not None: entry = tableCache.get((Tag(tag), tabledata)) if entry is not None: log.debug("reusing '%s' table", tag) writer.setEntry(tag, entry) return log.debug("Writing '%s' table to disk", tag) writer[tag] = tabledata if tableCache is not None: tableCache[(Tag(tag), tabledata)] = writer[tag] def getTableData(self, tag): """Returns the binary representation of a table. If the table is currently loaded and in memory, the data is compiled to binary and returned; if it is not currently loaded, the binary data is read from the font file and returned. """ tag = Tag(tag) if self.isLoaded(tag): log.debug("Compiling '%s' table", tag) return self.tables[tag].compile(self) elif self.reader and tag in self.reader: log.debug("Reading '%s' table from disk", tag) return self.reader[tag] else: raise KeyError(tag) def getGlyphSet( self, preferCFF=True, location=None, normalized=False, recalcBounds=True ): """Return a generic GlyphSet, which is a dict-like object mapping glyph names to glyph objects. The returned glyph objects have a ``.draw()`` method that supports the Pen protocol, and will have an attribute named 'width'. If the font is CFF-based, the outlines will be taken from the ``CFF `` or ``CFF2`` tables. Otherwise the outlines will be taken from the ``glyf`` table. If the font contains both a ``CFF ``/``CFF2`` and a ``glyf`` table, you can use the ``preferCFF`` argument to specify which one should be taken. If the font contains both a ``CFF `` and a ``CFF2`` table, the latter is taken. If the ``location`` parameter is set, it should be a dictionary mapping four-letter variation tags to their float values, and the returned glyph-set will represent an instance of a variable font at that location. If the ``normalized`` variable is set to True, that location is interpreted as in the normalized (-1..+1) space, otherwise it is in the font's defined axes space. """ if location and "fvar" not in self: location = None if location and not normalized: location = self.normalizeLocation(location) if ("CFF " in self or "CFF2" in self) and (preferCFF or "glyf" not in self): return _TTGlyphSetCFF(self, location) elif "glyf" in self: return _TTGlyphSetGlyf(self, location, recalcBounds=recalcBounds) else: raise TTLibError("Font contains no outlines") def normalizeLocation(self, location): """Normalize a ``location`` from the font's defined axes space (also known as user space) into the normalized (-1..+1) space. It applies ``avar`` mapping if the font contains an ``avar`` table. The ``location`` parameter should be a dictionary mapping four-letter variation tags to their float values. Raises ``TTLibError`` if the font is not a variable font. """ from fontTools.varLib.models import normalizeLocation, piecewiseLinearMap if "fvar" not in self: raise TTLibError("Not a variable font") axes = { a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in self["fvar"].axes } location = normalizeLocation(location, axes) if "avar" in self: avar = self["avar"] avarSegments = avar.segments mappedLocation = {} for axisTag, value in location.items(): avarMapping = avarSegments.get(axisTag, None) if avarMapping is not None: value = piecewiseLinearMap(value, avarMapping) mappedLocation[axisTag] = value location = mappedLocation return location def getBestCmap( self, cmapPreferences=( (3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0), ), ): """Returns the 'best' Unicode cmap dictionary available in the font or ``None``, if no Unicode cmap subtable is available. By default it will search for the following (platformID, platEncID) pairs in order:: (3, 10), # Windows Unicode full repertoire (0, 6), # Unicode full repertoire (format 13 subtable) (0, 4), # Unicode 2.0 full repertoire (3, 1), # Windows Unicode BMP (0, 3), # Unicode 2.0 BMP (0, 2), # Unicode ISO/IEC 10646 (0, 1), # Unicode 1.1 (0, 0) # Unicode 1.0 This particular order matches what HarfBuzz uses to choose what subtable to use by default. This order prefers the largest-repertoire subtable, and among those, prefers the Windows-platform over the Unicode-platform as the former has wider support. This order can be customized via the ``cmapPreferences`` argument. """ return self["cmap"].getBestCmap(cmapPreferences=cmapPreferences) def reorderGlyphs(self, new_glyph_order): from .reorderGlyphs import reorderGlyphs reorderGlyphs(self, new_glyph_order) class GlyphOrder(object): """A pseudo table. The glyph order isn't in the font as a separate table, but it's nice to present it as such in the TTX format. """ def __init__(self, tag=None): pass def toXML(self, writer, ttFont): glyphOrder = ttFont.getGlyphOrder() writer.comment( "The 'id' attribute is only for humans; " "it is ignored when parsed." ) writer.newline() for i in range(len(glyphOrder)): glyphName = glyphOrder[i] writer.simpletag("GlyphID", id=i, name=glyphName) writer.newline() def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "glyphOrder"): self.glyphOrder = [] if name == "GlyphID": self.glyphOrder.append(attrs["name"]) ttFont.setGlyphOrder(self.glyphOrder) def getTableModule(tag): """Fetch the packer/unpacker module for a table. Return None when no module is found. """ from . import tables pyTag = tagToIdentifier(tag) try: __import__("fontTools.ttLib.tables." + pyTag) except ImportError as err: # If pyTag is found in the ImportError message, # means table is not implemented. If it's not # there, then some other module is missing, don't # suppress the error. if str(err).find(pyTag) >= 0: return None else: raise err else: return getattr(tables, pyTag) # Registry for custom table packer/unpacker classes. Keys are table # tags, values are (moduleName, className) tuples. # See registerCustomTableClass() and getCustomTableClass() _customTableRegistry = {} def registerCustomTableClass(tag, moduleName, className=None): """Register a custom packer/unpacker class for a table. The 'moduleName' must be an importable module. If no 'className' is given, it is derived from the tag, for example it will be ``table_C_U_S_T_`` for a 'CUST' tag. The registered table class should be a subclass of :py:class:`fontTools.ttLib.tables.DefaultTable.DefaultTable` """ if className is None: className = "table_" + tagToIdentifier(tag) _customTableRegistry[tag] = (moduleName, className) def unregisterCustomTableClass(tag): """Unregister the custom packer/unpacker class for a table.""" del _customTableRegistry[tag] def getCustomTableClass(tag): """Return the custom table class for tag, if one has been registered with 'registerCustomTableClass()'. Else return None. """ if tag not in _customTableRegistry: return None import importlib moduleName, className = _customTableRegistry[tag] module = importlib.import_module(moduleName) return getattr(module, className) def getTableClass(tag): """Fetch the packer/unpacker class for a table.""" tableClass = getCustomTableClass(tag) if tableClass is not None: return tableClass module = getTableModule(tag) if module is None: from .tables.DefaultTable import DefaultTable return DefaultTable pyTag = tagToIdentifier(tag) tableClass = getattr(module, "table_" + pyTag) return tableClass def getClassTag(klass): """Fetch the table tag for a class object.""" name = klass.__name__ assert name[:6] == "table_" name = name[6:] # Chop 'table_' return identifierToTag(name) def newTable(tag): """Return a new instance of a table.""" tableClass = getTableClass(tag) return tableClass(tag) def _escapechar(c): """Helper function for tagToIdentifier()""" import re if re.match("[a-z0-9]", c): return "_" + c elif re.match("[A-Z]", c): return c + "_" else: return hex(byteord(c))[2:] def tagToIdentifier(tag): """Convert a table tag to a valid (but UGLY) python identifier, as well as a filename that's guaranteed to be unique even on a caseless file system. Each character is mapped to two characters. Lowercase letters get an underscore before the letter, uppercase letters get an underscore after the letter. Trailing spaces are trimmed. Illegal characters are escaped as two hex bytes. If the result starts with a number (as the result of a hex escape), an extra underscore is prepended. Examples:: >>> tagToIdentifier('glyf') '_g_l_y_f' >>> tagToIdentifier('cvt ') '_c_v_t' >>> tagToIdentifier('OS/2') 'O_S_2f_2' """ import re tag = Tag(tag) if tag == "GlyphOrder": return tag assert len(tag) == 4, "tag should be 4 characters long" while len(tag) > 1 and tag[-1] == " ": tag = tag[:-1] ident = "" for c in tag: ident = ident + _escapechar(c) if re.match("[0-9]", ident): ident = "_" + ident return ident def identifierToTag(ident): """the opposite of tagToIdentifier()""" if ident == "GlyphOrder": return ident if len(ident) % 2 and ident[0] == "_": ident = ident[1:] assert not (len(ident) % 2) tag = "" for i in range(0, len(ident), 2): if ident[i] == "_": tag = tag + ident[i + 1] elif ident[i + 1] == "_": tag = tag + ident[i] else: # assume hex tag = tag + chr(int(ident[i : i + 2], 16)) # append trailing spaces tag = tag + (4 - len(tag)) * " " return Tag(tag) def tagToXML(tag): """Similarly to tagToIdentifier(), this converts a TT tag to a valid XML element name. Since XML element names are case sensitive, this is a fairly simple/readable translation. """ import re tag = Tag(tag) if tag == "OS/2": return "OS_2" elif tag == "GlyphOrder": return tag if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag): return tag.strip() else: return tagToIdentifier(tag) def xmlToTag(tag): """The opposite of tagToXML()""" if tag == "OS_2": return Tag("OS/2") if len(tag) == 8: return identifierToTag(tag) else: return Tag(tag + " " * (4 - len(tag))) # Table order as recommended in the OpenType specification 1.4 TTFTableOrder = [ "head", "hhea", "maxp", "OS/2", "hmtx", "LTSH", "VDMX", "hdmx", "cmap", "fpgm", "prep", "cvt ", "loca", "glyf", "kern", "name", "post", "gasp", "PCLT", ] OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post", "CFF "] def sortedTagList(tagList, tableOrder=None): """Return a sorted copy of tagList, sorted according to the OpenType specification, or according to a custom tableOrder. If given and not None, tableOrder needs to be a list of tag names. """ tagList = sorted(tagList) if tableOrder is None: if "DSIG" in tagList: # DSIG should be last (XXX spec reference?) tagList.remove("DSIG") tagList.append("DSIG") if "CFF " in tagList: tableOrder = OTFTableOrder else: tableOrder = TTFTableOrder orderedTables = [] for tag in tableOrder: if tag in tagList: orderedTables.append(tag) tagList.remove(tag) orderedTables.extend(tagList) return orderedTables def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=False): """Rewrite a font file, ordering the tables as recommended by the OpenType specification 1.4. """ inFile.seek(0) outFile.seek(0) reader = SFNTReader(inFile, checkChecksums=checkChecksums) writer = SFNTWriter( outFile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData, ) tables = list(reader.keys()) for tag in sortedTagList(tables, tableOrder): writer[tag] = reader[tag] writer.close() def maxPowerOfTwo(x): """Return the highest exponent of two, so that (2 ** exponent) <= x. Return 0 if x is 0. """ exponent = 0 while x: x = x >> 1 exponent = exponent + 1 return max(exponent - 1, 0) def getSearchRange(n, itemSize=16): """Calculate searchRange, entrySelector, rangeShift.""" # itemSize defaults to 16, for backward compatibility # with upstream fonttools. exponent = maxPowerOfTwo(n) searchRange = (2**exponent) * itemSize entrySelector = exponent rangeShift = max(0, n * itemSize - searchRange) return searchRange, entrySelector, rangeShift PKaZZZ� �X�3�3fontTools/ttLib/ttGlyphSet.py"""GlyphSets returned by a TTFont.""" from abc import ABC, abstractmethod from collections.abc import Mapping from contextlib import contextmanager from copy import copy from types import SimpleNamespace from fontTools.misc.fixedTools import otRound from fontTools.misc.loggingTools import deprecateFunction from fontTools.misc.transform import Transform from fontTools.pens.transformPen import TransformPen, TransformPointPen from fontTools.pens.recordingPen import ( DecomposingRecordingPen, lerpRecordings, replayRecording, ) class _TTGlyphSet(Mapping): """Generic dict-like GlyphSet class that pulls metrics from hmtx and glyph shape from TrueType or CFF. """ def __init__(self, font, location, glyphsMapping, *, recalcBounds=True): self.recalcBounds = recalcBounds self.font = font self.defaultLocationNormalized = ( {axis.axisTag: 0 for axis in self.font["fvar"].axes} if "fvar" in self.font else {} ) self.location = location if location is not None else {} self.rawLocation = {} # VarComponent-only location self.originalLocation = location if location is not None else {} self.depth = 0 self.locationStack = [] self.rawLocationStack = [] self.glyphsMapping = glyphsMapping self.hMetrics = font["hmtx"].metrics self.vMetrics = getattr(font.get("vmtx"), "metrics", None) self.hvarTable = None if location: from fontTools.varLib.varStore import VarStoreInstancer self.hvarTable = getattr(font.get("HVAR"), "table", None) if self.hvarTable is not None: self.hvarInstancer = VarStoreInstancer( self.hvarTable.VarStore, font["fvar"].axes, location ) # TODO VVAR, VORG @contextmanager def pushLocation(self, location, reset: bool): self.locationStack.append(self.location) self.rawLocationStack.append(self.rawLocation) if reset: self.location = self.originalLocation.copy() self.rawLocation = self.defaultLocationNormalized.copy() else: self.location = self.location.copy() self.rawLocation = {} self.location.update(location) self.rawLocation.update(location) try: yield None finally: self.location = self.locationStack.pop() self.rawLocation = self.rawLocationStack.pop() @contextmanager def pushDepth(self): try: depth = self.depth self.depth += 1 yield depth finally: self.depth -= 1 def __contains__(self, glyphName): return glyphName in self.glyphsMapping def __iter__(self): return iter(self.glyphsMapping.keys()) def __len__(self): return len(self.glyphsMapping) @deprecateFunction( "use 'glyphName in glyphSet' instead", category=DeprecationWarning ) def has_key(self, glyphName): return glyphName in self.glyphsMapping class _TTGlyphSetGlyf(_TTGlyphSet): def __init__(self, font, location, recalcBounds=True): self.glyfTable = font["glyf"] super().__init__(font, location, self.glyfTable, recalcBounds=recalcBounds) self.gvarTable = font.get("gvar") def __getitem__(self, glyphName): return _TTGlyphGlyf(self, glyphName, recalcBounds=self.recalcBounds) class _TTGlyphSetCFF(_TTGlyphSet): def __init__(self, font, location): tableTag = "CFF2" if "CFF2" in font else "CFF " self.charStrings = list(font[tableTag].cff.values())[0].CharStrings super().__init__(font, location, self.charStrings) self.blender = None if location: from fontTools.varLib.varStore import VarStoreInstancer varStore = getattr(self.charStrings, "varStore", None) if varStore is not None: instancer = VarStoreInstancer( varStore.otVarStore, font["fvar"].axes, location ) self.blender = instancer.interpolateFromDeltas def __getitem__(self, glyphName): return _TTGlyphCFF(self, glyphName) class _TTGlyph(ABC): """Glyph object that supports the Pen protocol, meaning that it has .draw() and .drawPoints() methods that take a pen object as their only argument. Additionally there are 'width' and 'lsb' attributes, read from the 'hmtx' table. If the font contains a 'vmtx' table, there will also be 'height' and 'tsb' attributes. """ def __init__(self, glyphSet, glyphName, *, recalcBounds=True): self.glyphSet = glyphSet self.name = glyphName self.recalcBounds = recalcBounds self.width, self.lsb = glyphSet.hMetrics[glyphName] if glyphSet.vMetrics is not None: self.height, self.tsb = glyphSet.vMetrics[glyphName] else: self.height, self.tsb = None, None if glyphSet.location and glyphSet.hvarTable is not None: varidx = ( glyphSet.font.getGlyphID(glyphName) if glyphSet.hvarTable.AdvWidthMap is None else glyphSet.hvarTable.AdvWidthMap.mapping[glyphName] ) self.width += glyphSet.hvarInstancer[varidx] # TODO: VVAR/VORG @abstractmethod def draw(self, pen): """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details how that works. """ raise NotImplementedError def drawPoints(self, pen): """Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details how that works. """ from fontTools.pens.pointPen import SegmentToPointPen self.draw(SegmentToPointPen(pen)) class _TTGlyphGlyf(_TTGlyph): def draw(self, pen): """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details how that works. """ glyph, offset = self._getGlyphAndOffset() with self.glyphSet.pushDepth() as depth: if depth: offset = 0 # Offset should only apply at top-level if glyph.isVarComposite(): self._drawVarComposite(glyph, pen, False) return glyph.draw(pen, self.glyphSet.glyfTable, offset) def drawPoints(self, pen): """Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details how that works. """ glyph, offset = self._getGlyphAndOffset() with self.glyphSet.pushDepth() as depth: if depth: offset = 0 # Offset should only apply at top-level if glyph.isVarComposite(): self._drawVarComposite(glyph, pen, True) return glyph.drawPoints(pen, self.glyphSet.glyfTable, offset) def _drawVarComposite(self, glyph, pen, isPointPen): from fontTools.ttLib.tables._g_l_y_f import ( VarComponentFlags, VAR_COMPONENT_TRANSFORM_MAPPING, ) for comp in glyph.components: with self.glyphSet.pushLocation( comp.location, comp.flags & VarComponentFlags.RESET_UNSPECIFIED_AXES ): try: pen.addVarComponent( comp.glyphName, comp.transform, self.glyphSet.rawLocation ) except AttributeError: t = comp.transform.toTransform() if isPointPen: tPen = TransformPointPen(pen, t) self.glyphSet[comp.glyphName].drawPoints(tPen) else: tPen = TransformPen(pen, t) self.glyphSet[comp.glyphName].draw(tPen) def _getGlyphAndOffset(self): if self.glyphSet.location and self.glyphSet.gvarTable is not None: glyph = self._getGlyphInstance() else: glyph = self.glyphSet.glyfTable[self.name] offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0 return glyph, offset def _getGlyphInstance(self): from fontTools.varLib.iup import iup_delta from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates from fontTools.varLib.models import supportScalar glyphSet = self.glyphSet glyfTable = glyphSet.glyfTable variations = glyphSet.gvarTable.variations[self.name] hMetrics = glyphSet.hMetrics vMetrics = glyphSet.vMetrics coordinates, _ = glyfTable._getCoordinatesAndControls( self.name, hMetrics, vMetrics ) origCoords, endPts = None, None for var in variations: scalar = supportScalar(glyphSet.location, var.axes) if not scalar: continue delta = var.coordinates if None in delta: if origCoords is None: origCoords, control = glyfTable._getCoordinatesAndControls( self.name, hMetrics, vMetrics ) endPts = ( control[1] if control[0] >= 1 else list(range(len(control[1]))) ) delta = iup_delta(delta, origCoords, endPts) coordinates += GlyphCoordinates(delta) * scalar glyph = copy(glyfTable[self.name]) # Shallow copy width, lsb, height, tsb = _setCoordinates( glyph, coordinates, glyfTable, recalcBounds=self.recalcBounds ) self.lsb = lsb self.tsb = tsb if glyphSet.hvarTable is None: # no HVAR: let's set metrics from the phantom points self.width = width self.height = height return glyph class _TTGlyphCFF(_TTGlyph): def draw(self, pen): """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details how that works. """ self.glyphSet.charStrings[self.name].draw(pen, self.glyphSet.blender) def _setCoordinates(glyph, coord, glyfTable, *, recalcBounds=True): # Handle phantom points for (left, right, top, bottom) positions. assert len(coord) >= 4 leftSideX = coord[-4][0] rightSideX = coord[-3][0] topSideY = coord[-2][1] bottomSideY = coord[-1][1] for _ in range(4): del coord[-1] if glyph.isComposite(): assert len(coord) == len(glyph.components) glyph.components = [copy(comp) for comp in glyph.components] # Shallow copy for p, comp in zip(coord, glyph.components): if hasattr(comp, "x"): comp.x, comp.y = p elif glyph.isVarComposite(): glyph.components = [copy(comp) for comp in glyph.components] # Shallow copy for comp in glyph.components: coord = comp.setCoordinates(coord) assert not coord elif glyph.numberOfContours == 0: assert len(coord) == 0 else: assert len(coord) == len(glyph.coordinates) glyph.coordinates = coord if recalcBounds: glyph.recalcBounds(glyfTable) horizontalAdvanceWidth = otRound(rightSideX - leftSideX) verticalAdvanceWidth = otRound(topSideY - bottomSideY) leftSideBearing = otRound(glyph.xMin - leftSideX) topSideBearing = otRound(topSideY - glyph.yMax) return ( horizontalAdvanceWidth, leftSideBearing, verticalAdvanceWidth, topSideBearing, ) class LerpGlyphSet(Mapping): """A glyphset that interpolates between two other glyphsets. Factor is typically between 0 and 1. 0 means the first glyphset, 1 means the second glyphset, and 0.5 means the average of the two glyphsets. Other values are possible, and can be useful to extrapolate. Defaults to 0.5. """ def __init__(self, glyphset1, glyphset2, factor=0.5): self.glyphset1 = glyphset1 self.glyphset2 = glyphset2 self.factor = factor def __getitem__(self, glyphname): if glyphname in self.glyphset1 and glyphname in self.glyphset2: return LerpGlyph(glyphname, self) raise KeyError(glyphname) def __contains__(self, glyphname): return glyphname in self.glyphset1 and glyphname in self.glyphset2 def __iter__(self): set1 = set(self.glyphset1) set2 = set(self.glyphset2) return iter(set1.intersection(set2)) def __len__(self): set1 = set(self.glyphset1) set2 = set(self.glyphset2) return len(set1.intersection(set2)) class LerpGlyph: def __init__(self, glyphname, glyphset): self.glyphset = glyphset self.glyphname = glyphname def draw(self, pen): recording1 = DecomposingRecordingPen(self.glyphset.glyphset1) self.glyphset.glyphset1[self.glyphname].draw(recording1) recording2 = DecomposingRecordingPen(self.glyphset.glyphset2) self.glyphset.glyphset2[self.glyphname].draw(recording2) factor = self.glyphset.factor replayRecording(lerpRecordings(recording1.value, recording2.value, factor), pen) PKaZZZi�7fontTools/ttLib/ttVisitor.py"""Specialization of fontTools.misc.visitor to work with TTFont.""" from fontTools.misc.visitor import Visitor from fontTools.ttLib import TTFont class TTVisitor(Visitor): def visitAttr(self, obj, attr, value, *args, **kwargs): if isinstance(value, TTFont): return False super().visitAttr(obj, attr, value, *args, **kwargs) def visit(self, obj, *args, **kwargs): if hasattr(obj, "ensureDecompiled"): obj.ensureDecompiled(recurse=False) super().visit(obj, *args, **kwargs) @TTVisitor.register(TTFont) def visit(visitor, font, *args, **kwargs): # Some objects have links back to TTFont; even though we # have a check in visitAttr to stop them from recursing # onto TTFont, sometimes they still do, for example when # someone overrides visitAttr. if hasattr(visitor, "font"): return False visitor.font = font for tag in font.keys(): visitor.visit(font[tag], *args, **kwargs) del visitor.font return False PKaZZZ��������fontTools/ttLib/woff2.pyfrom io import BytesIO import sys import array import struct from collections import OrderedDict from fontTools.misc import sstruct from fontTools.misc.arrayTools import calcIntBounds from fontTools.misc.textTools import Tag, bytechr, byteord, bytesjoin, pad from fontTools.ttLib import ( TTFont, TTLibError, getTableModule, getTableClass, getSearchRange, ) from fontTools.ttLib.sfnt import ( SFNTReader, SFNTWriter, DirectoryEntry, WOFFFlavorData, sfntDirectoryFormat, sfntDirectorySize, SFNTDirectoryEntry, sfntDirectoryEntrySize, calcChecksum, ) from fontTools.ttLib.tables import ttProgram, _g_l_y_f import logging log = logging.getLogger("fontTools.ttLib.woff2") haveBrotli = False try: try: import brotlicffi as brotli except ImportError: import brotli haveBrotli = True except ImportError: pass class WOFF2Reader(SFNTReader): flavor = "woff2" def __init__(self, file, checkChecksums=0, fontNumber=-1): if not haveBrotli: log.error( "The WOFF2 decoder requires the Brotli Python extension, available at: " "https://github.com/google/brotli" ) raise ImportError("No module named brotli") self.file = file signature = Tag(self.file.read(4)) if signature != b"wOF2": raise TTLibError("Not a WOFF2 font (bad signature)") self.file.seek(0) self.DirectoryEntry = WOFF2DirectoryEntry data = self.file.read(woff2DirectorySize) if len(data) != woff2DirectorySize: raise TTLibError("Not a WOFF2 font (not enough data)") sstruct.unpack(woff2DirectoryFormat, data, self) self.tables = OrderedDict() offset = 0 for i in range(self.numTables): entry = self.DirectoryEntry() entry.fromFile(self.file) tag = Tag(entry.tag) self.tables[tag] = entry entry.offset = offset offset += entry.length totalUncompressedSize = offset compressedData = self.file.read(self.totalCompressedSize) decompressedData = brotli.decompress(compressedData) if len(decompressedData) != totalUncompressedSize: raise TTLibError( "unexpected size for decompressed font data: expected %d, found %d" % (totalUncompressedSize, len(decompressedData)) ) self.transformBuffer = BytesIO(decompressedData) self.file.seek(0, 2) if self.length != self.file.tell(): raise TTLibError("reported 'length' doesn't match the actual file size") self.flavorData = WOFF2FlavorData(self) # make empty TTFont to store data while reconstructing tables self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False) def __getitem__(self, tag): """Fetch the raw table data. Reconstruct transformed tables.""" entry = self.tables[Tag(tag)] if not hasattr(entry, "data"): if entry.transformed: entry.data = self.reconstructTable(tag) else: entry.data = entry.loadData(self.transformBuffer) return entry.data def reconstructTable(self, tag): """Reconstruct table named 'tag' from transformed data.""" entry = self.tables[Tag(tag)] rawData = entry.loadData(self.transformBuffer) if tag == "glyf": # no need to pad glyph data when reconstructing padding = self.padding if hasattr(self, "padding") else None data = self._reconstructGlyf(rawData, padding) elif tag == "loca": data = self._reconstructLoca() elif tag == "hmtx": data = self._reconstructHmtx(rawData) else: raise TTLibError("transform for table '%s' is unknown" % tag) return data def _reconstructGlyf(self, data, padding=None): """Return recostructed glyf table data, and set the corresponding loca's locations. Optionally pad glyph offsets to the specified number of bytes. """ self.ttFont["loca"] = WOFF2LocaTable() glyfTable = self.ttFont["glyf"] = WOFF2GlyfTable() glyfTable.reconstruct(data, self.ttFont) if padding: glyfTable.padding = padding data = glyfTable.compile(self.ttFont) return data def _reconstructLoca(self): """Return reconstructed loca table data.""" if "loca" not in self.ttFont: # make sure glyf is reconstructed first self.tables["glyf"].data = self.reconstructTable("glyf") locaTable = self.ttFont["loca"] data = locaTable.compile(self.ttFont) if len(data) != self.tables["loca"].origLength: raise TTLibError( "reconstructed 'loca' table doesn't match original size: " "expected %d, found %d" % (self.tables["loca"].origLength, len(data)) ) return data def _reconstructHmtx(self, data): """Return reconstructed hmtx table data.""" # Before reconstructing 'hmtx' table we need to parse other tables: # 'glyf' is required for reconstructing the sidebearings from the glyphs' # bounding box; 'hhea' is needed for the numberOfHMetrics field. if "glyf" in self.flavorData.transformedTables: # transformed 'glyf' table is self-contained, thus 'loca' not needed tableDependencies = ("maxp", "hhea", "glyf") else: # decompiling untransformed 'glyf' requires 'loca', which requires 'head' tableDependencies = ("maxp", "head", "hhea", "loca", "glyf") for tag in tableDependencies: self._decompileTable(tag) hmtxTable = self.ttFont["hmtx"] = WOFF2HmtxTable() hmtxTable.reconstruct(data, self.ttFont) data = hmtxTable.compile(self.ttFont) return data def _decompileTable(self, tag): """Decompile table data and store it inside self.ttFont.""" data = self[tag] if self.ttFont.isLoaded(tag): return self.ttFont[tag] tableClass = getTableClass(tag) table = tableClass(tag) self.ttFont.tables[tag] = table table.decompile(data, self.ttFont) class WOFF2Writer(SFNTWriter): flavor = "woff2" def __init__( self, file, numTables, sfntVersion="\000\001\000\000", flavor=None, flavorData=None, ): if not haveBrotli: log.error( "The WOFF2 encoder requires the Brotli Python extension, available at: " "https://github.com/google/brotli" ) raise ImportError("No module named brotli") self.file = file self.numTables = numTables self.sfntVersion = Tag(sfntVersion) self.flavorData = WOFF2FlavorData(data=flavorData) self.directoryFormat = woff2DirectoryFormat self.directorySize = woff2DirectorySize self.DirectoryEntry = WOFF2DirectoryEntry self.signature = Tag("wOF2") self.nextTableOffset = 0 self.transformBuffer = BytesIO() self.tables = OrderedDict() # make empty TTFont to store data while normalising and transforming tables self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False) def __setitem__(self, tag, data): """Associate new entry named 'tag' with raw table data.""" if tag in self.tables: raise TTLibError("cannot rewrite '%s' table" % tag) if tag == "DSIG": # always drop DSIG table, since the encoding process can invalidate it self.numTables -= 1 return entry = self.DirectoryEntry() entry.tag = Tag(tag) entry.flags = getKnownTagIndex(entry.tag) # WOFF2 table data are written to disk only on close(), after all tags # have been specified entry.data = data self.tables[tag] = entry def close(self): """All tags must have been specified. Now write the table data and directory.""" if len(self.tables) != self.numTables: raise TTLibError( "wrong number of tables; expected %d, found %d" % (self.numTables, len(self.tables)) ) if self.sfntVersion in ("\x00\x01\x00\x00", "true"): isTrueType = True elif self.sfntVersion == "OTTO": isTrueType = False else: raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") # The WOFF2 spec no longer requires the glyph offsets to be 4-byte aligned. # However, the reference WOFF2 implementation still fails to reconstruct # 'unpadded' glyf tables, therefore we need to 'normalise' them. # See: # https://github.com/khaledhosny/ots/issues/60 # https://github.com/google/woff2/issues/15 if ( isTrueType and "glyf" in self.flavorData.transformedTables and "glyf" in self.tables ): self._normaliseGlyfAndLoca(padding=4) self._setHeadTransformFlag() # To pass the legacy OpenType Sanitiser currently included in browsers, # we must sort the table directory and data alphabetically by tag. # See: # https://github.com/google/woff2/pull/3 # https://lists.w3.org/Archives/Public/public-webfonts-wg/2015Mar/0000.html # # 2023: We rely on this in _transformTables where we expect that # "loca" comes after "glyf" table. self.tables = OrderedDict(sorted(self.tables.items())) self.totalSfntSize = self._calcSFNTChecksumsLengthsAndOffsets() fontData = self._transformTables() compressedFont = brotli.compress(fontData, mode=brotli.MODE_FONT) self.totalCompressedSize = len(compressedFont) self.length = self._calcTotalSize() self.majorVersion, self.minorVersion = self._getVersion() self.reserved = 0 directory = self._packTableDirectory() self.file.seek(0) self.file.write(pad(directory + compressedFont, size=4)) self._writeFlavorData() def _normaliseGlyfAndLoca(self, padding=4): """Recompile glyf and loca tables, aligning glyph offsets to multiples of 'padding' size. Update the head table's 'indexToLocFormat' accordingly while compiling loca. """ if self.sfntVersion == "OTTO": return for tag in ("maxp", "head", "loca", "glyf", "fvar"): if tag in self.tables: self._decompileTable(tag) self.ttFont["glyf"].padding = padding for tag in ("glyf", "loca"): self._compileTable(tag) def _setHeadTransformFlag(self): """Set bit 11 of 'head' table flags to indicate that the font has undergone a lossless modifying transform. Re-compile head table data.""" self._decompileTable("head") self.ttFont["head"].flags |= 1 << 11 self._compileTable("head") def _decompileTable(self, tag): """Fetch table data, decompile it, and store it inside self.ttFont.""" tag = Tag(tag) if tag not in self.tables: raise TTLibError("missing required table: %s" % tag) if self.ttFont.isLoaded(tag): return data = self.tables[tag].data if tag == "loca": tableClass = WOFF2LocaTable elif tag == "glyf": tableClass = WOFF2GlyfTable elif tag == "hmtx": tableClass = WOFF2HmtxTable else: tableClass = getTableClass(tag) table = tableClass(tag) self.ttFont.tables[tag] = table table.decompile(data, self.ttFont) def _compileTable(self, tag): """Compile table and store it in its 'data' attribute.""" self.tables[tag].data = self.ttFont[tag].compile(self.ttFont) def _calcSFNTChecksumsLengthsAndOffsets(self): """Compute the 'original' SFNT checksums, lengths and offsets for checksum adjustment calculation. Return the total size of the uncompressed font. """ offset = sfntDirectorySize + sfntDirectoryEntrySize * len(self.tables) for tag, entry in self.tables.items(): data = entry.data entry.origOffset = offset entry.origLength = len(data) if tag == "head": entry.checkSum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:]) else: entry.checkSum = calcChecksum(data) offset += (entry.origLength + 3) & ~3 return offset def _transformTables(self): """Return transformed font data.""" transformedTables = self.flavorData.transformedTables for tag, entry in self.tables.items(): data = None if tag in transformedTables: data = self.transformTable(tag) if data is not None: entry.transformed = True if data is None: if tag == "glyf": # Currently we always sort table tags so # 'loca' comes after 'glyf'. transformedTables.discard("loca") # pass-through the table data without transformation data = entry.data entry.transformed = False entry.offset = self.nextTableOffset entry.saveData(self.transformBuffer, data) self.nextTableOffset += entry.length self.writeMasterChecksum() fontData = self.transformBuffer.getvalue() return fontData def transformTable(self, tag): """Return transformed table data, or None if some pre-conditions aren't met -- in which case, the non-transformed table data will be used. """ if tag == "loca": data = b"" elif tag == "glyf": for tag in ("maxp", "head", "loca", "glyf"): self._decompileTable(tag) glyfTable = self.ttFont["glyf"] data = glyfTable.transform(self.ttFont) elif tag == "hmtx": if "glyf" not in self.tables: return for tag in ("maxp", "head", "hhea", "loca", "glyf", "hmtx"): self._decompileTable(tag) hmtxTable = self.ttFont["hmtx"] data = hmtxTable.transform(self.ttFont) # can be None else: raise TTLibError("Transform for table '%s' is unknown" % tag) return data def _calcMasterChecksum(self): """Calculate checkSumAdjustment.""" tags = list(self.tables.keys()) checksums = [] for i in range(len(tags)): checksums.append(self.tables[tags[i]].checkSum) # Create a SFNT directory for checksum calculation purposes self.searchRange, self.entrySelector, self.rangeShift = getSearchRange( self.numTables, 16 ) directory = sstruct.pack(sfntDirectoryFormat, self) tables = sorted(self.tables.items()) for tag, entry in tables: sfntEntry = SFNTDirectoryEntry() sfntEntry.tag = entry.tag sfntEntry.checkSum = entry.checkSum sfntEntry.offset = entry.origOffset sfntEntry.length = entry.origLength directory = directory + sfntEntry.toString() directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize assert directory_end == len(directory) checksums.append(calcChecksum(directory)) checksum = sum(checksums) & 0xFFFFFFFF # BiboAfba! checksumadjustment = (0xB1B0AFBA - checksum) & 0xFFFFFFFF return checksumadjustment def writeMasterChecksum(self): """Write checkSumAdjustment to the transformBuffer.""" checksumadjustment = self._calcMasterChecksum() self.transformBuffer.seek(self.tables["head"].offset + 8) self.transformBuffer.write(struct.pack(">L", checksumadjustment)) def _calcTotalSize(self): """Calculate total size of WOFF2 font, including any meta- and/or private data.""" offset = self.directorySize for entry in self.tables.values(): offset += len(entry.toString()) offset += self.totalCompressedSize offset = (offset + 3) & ~3 offset = self._calcFlavorDataOffsetsAndSize(offset) return offset def _calcFlavorDataOffsetsAndSize(self, start): """Calculate offsets and lengths for any meta- and/or private data.""" offset = start data = self.flavorData if data.metaData: self.metaOrigLength = len(data.metaData) self.metaOffset = offset self.compressedMetaData = brotli.compress( data.metaData, mode=brotli.MODE_TEXT ) self.metaLength = len(self.compressedMetaData) offset += self.metaLength else: self.metaOffset = self.metaLength = self.metaOrigLength = 0 self.compressedMetaData = b"" if data.privData: # make sure private data is padded to 4-byte boundary offset = (offset + 3) & ~3 self.privOffset = offset self.privLength = len(data.privData) offset += self.privLength else: self.privOffset = self.privLength = 0 return offset def _getVersion(self): """Return the WOFF2 font's (majorVersion, minorVersion) tuple.""" data = self.flavorData if data.majorVersion is not None and data.minorVersion is not None: return data.majorVersion, data.minorVersion else: # if None, return 'fontRevision' from 'head' table if "head" in self.tables: return struct.unpack(">HH", self.tables["head"].data[4:8]) else: return 0, 0 def _packTableDirectory(self): """Return WOFF2 table directory data.""" directory = sstruct.pack(self.directoryFormat, self) for entry in self.tables.values(): directory = directory + entry.toString() return directory def _writeFlavorData(self): """Write metadata and/or private data using appropiate padding.""" compressedMetaData = self.compressedMetaData privData = self.flavorData.privData if compressedMetaData and privData: compressedMetaData = pad(compressedMetaData, size=4) if compressedMetaData: self.file.seek(self.metaOffset) assert self.file.tell() == self.metaOffset self.file.write(compressedMetaData) if privData: self.file.seek(self.privOffset) assert self.file.tell() == self.privOffset self.file.write(privData) def reordersTables(self): return True # -- woff2 directory helpers and cruft woff2DirectoryFormat = """ > # big endian signature: 4s # "wOF2" sfntVersion: 4s length: L # total woff2 file size numTables: H # number of tables reserved: H # set to 0 totalSfntSize: L # uncompressed size totalCompressedSize: L # compressed size majorVersion: H # major version of WOFF file minorVersion: H # minor version of WOFF file metaOffset: L # offset to metadata block metaLength: L # length of compressed metadata metaOrigLength: L # length of uncompressed metadata privOffset: L # offset to private data block privLength: L # length of private data block """ woff2DirectorySize = sstruct.calcsize(woff2DirectoryFormat) woff2KnownTags = ( "cmap", "head", "hhea", "hmtx", "maxp", "name", "OS/2", "post", "cvt ", "fpgm", "glyf", "loca", "prep", "CFF ", "VORG", "EBDT", "EBLC", "gasp", "hdmx", "kern", "LTSH", "PCLT", "VDMX", "vhea", "vmtx", "BASE", "GDEF", "GPOS", "GSUB", "EBSC", "JSTF", "MATH", "CBDT", "CBLC", "COLR", "CPAL", "SVG ", "sbix", "acnt", "avar", "bdat", "bloc", "bsln", "cvar", "fdsc", "feat", "fmtx", "fvar", "gvar", "hsty", "just", "lcar", "mort", "morx", "opbd", "prop", "trak", "Zapf", "Silf", "Glat", "Gloc", "Feat", "Sill", ) woff2FlagsFormat = """ > # big endian flags: B # table type and flags """ woff2FlagsSize = sstruct.calcsize(woff2FlagsFormat) woff2UnknownTagFormat = """ > # big endian tag: 4s # 4-byte tag (optional) """ woff2UnknownTagSize = sstruct.calcsize(woff2UnknownTagFormat) woff2UnknownTagIndex = 0x3F woff2Base128MaxSize = 5 woff2DirectoryEntryMaxSize = ( woff2FlagsSize + woff2UnknownTagSize + 2 * woff2Base128MaxSize ) woff2TransformedTableTags = ("glyf", "loca") woff2GlyfTableFormat = """ > # big endian version: H # = 0x0000 optionFlags: H # Bit 0: we have overlapSimpleBitmap[], Bits 1-15: reserved numGlyphs: H # Number of glyphs indexFormat: H # Offset format for loca table nContourStreamSize: L # Size of nContour stream nPointsStreamSize: L # Size of nPoints stream flagStreamSize: L # Size of flag stream glyphStreamSize: L # Size of glyph stream compositeStreamSize: L # Size of composite stream bboxStreamSize: L # Comnined size of bboxBitmap and bboxStream instructionStreamSize: L # Size of instruction stream """ woff2GlyfTableFormatSize = sstruct.calcsize(woff2GlyfTableFormat) bboxFormat = """ > # big endian xMin: h yMin: h xMax: h yMax: h """ woff2OverlapSimpleBitmapFlag = 0x0001 def getKnownTagIndex(tag): """Return index of 'tag' in woff2KnownTags list. Return 63 if not found.""" for i in range(len(woff2KnownTags)): if tag == woff2KnownTags[i]: return i return woff2UnknownTagIndex class WOFF2DirectoryEntry(DirectoryEntry): def fromFile(self, file): pos = file.tell() data = file.read(woff2DirectoryEntryMaxSize) left = self.fromString(data) consumed = len(data) - len(left) file.seek(pos + consumed) def fromString(self, data): if len(data) < 1: raise TTLibError("can't read table 'flags': not enough data") dummy, data = sstruct.unpack2(woff2FlagsFormat, data, self) if self.flags & 0x3F == 0x3F: # if bits [0..5] of the flags byte == 63, read a 4-byte arbitrary tag value if len(data) < woff2UnknownTagSize: raise TTLibError("can't read table 'tag': not enough data") dummy, data = sstruct.unpack2(woff2UnknownTagFormat, data, self) else: # otherwise, tag is derived from a fixed 'Known Tags' table self.tag = woff2KnownTags[self.flags & 0x3F] self.tag = Tag(self.tag) self.origLength, data = unpackBase128(data) self.length = self.origLength if self.transformed: self.length, data = unpackBase128(data) if self.tag == "loca" and self.length != 0: raise TTLibError("the transformLength of the 'loca' table must be 0") # return left over data return data def toString(self): data = bytechr(self.flags) if (self.flags & 0x3F) == 0x3F: data += struct.pack(">4s", self.tag.tobytes()) data += packBase128(self.origLength) if self.transformed: data += packBase128(self.length) return data @property def transformVersion(self): """Return bits 6-7 of table entry's flags, which indicate the preprocessing transformation version number (between 0 and 3). """ return self.flags >> 6 @transformVersion.setter def transformVersion(self, value): assert 0 <= value <= 3 self.flags |= value << 6 @property def transformed(self): """Return True if the table has any transformation, else return False.""" # For all tables in a font, except for 'glyf' and 'loca', the transformation # version 0 indicates the null transform (where the original table data is # passed directly to the Brotli compressor). For 'glyf' and 'loca' tables, # transformation version 3 indicates the null transform if self.tag in {"glyf", "loca"}: return self.transformVersion != 3 else: return self.transformVersion != 0 @transformed.setter def transformed(self, booleanValue): # here we assume that a non-null transform means version 0 for 'glyf' and # 'loca' and 1 for every other table (e.g. hmtx); but that may change as # new transformation formats are introduced in the future (if ever). if self.tag in {"glyf", "loca"}: self.transformVersion = 3 if not booleanValue else 0 else: self.transformVersion = int(booleanValue) class WOFF2LocaTable(getTableClass("loca")): """Same as parent class. The only difference is that it attempts to preserve the 'indexFormat' as encoded in the WOFF2 glyf table. """ def __init__(self, tag=None): self.tableTag = Tag(tag or "loca") def compile(self, ttFont): try: max_location = max(self.locations) except AttributeError: self.set([]) max_location = 0 if "glyf" in ttFont and hasattr(ttFont["glyf"], "indexFormat"): # copile loca using the indexFormat specified in the WOFF2 glyf table indexFormat = ttFont["glyf"].indexFormat if indexFormat == 0: if max_location >= 0x20000: raise TTLibError("indexFormat is 0 but local offsets > 0x20000") if not all(l % 2 == 0 for l in self.locations): raise TTLibError( "indexFormat is 0 but local offsets not multiples of 2" ) locations = array.array("H") for i in range(len(self.locations)): locations.append(self.locations[i] // 2) else: locations = array.array("I", self.locations) if sys.byteorder != "big": locations.byteswap() data = locations.tobytes() else: # use the most compact indexFormat given the current glyph offsets data = super(WOFF2LocaTable, self).compile(ttFont) return data class WOFF2GlyfTable(getTableClass("glyf")): """Decoder/Encoder for WOFF2 'glyf' table transform.""" subStreams = ( "nContourStream", "nPointsStream", "flagStream", "glyphStream", "compositeStream", "bboxStream", "instructionStream", ) def __init__(self, tag=None): self.tableTag = Tag(tag or "glyf") def reconstruct(self, data, ttFont): """Decompile transformed 'glyf' data.""" inputDataSize = len(data) if inputDataSize < woff2GlyfTableFormatSize: raise TTLibError("not enough 'glyf' data") dummy, data = sstruct.unpack2(woff2GlyfTableFormat, data, self) offset = woff2GlyfTableFormatSize for stream in self.subStreams: size = getattr(self, stream + "Size") setattr(self, stream, data[:size]) data = data[size:] offset += size hasOverlapSimpleBitmap = self.optionFlags & woff2OverlapSimpleBitmapFlag self.overlapSimpleBitmap = None if hasOverlapSimpleBitmap: overlapSimpleBitmapSize = (self.numGlyphs + 7) >> 3 self.overlapSimpleBitmap = array.array("B", data[:overlapSimpleBitmapSize]) offset += overlapSimpleBitmapSize if offset != inputDataSize: raise TTLibError( "incorrect size of transformed 'glyf' table: expected %d, received %d bytes" % (offset, inputDataSize) ) bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2 bboxBitmap = self.bboxStream[:bboxBitmapSize] self.bboxBitmap = array.array("B", bboxBitmap) self.bboxStream = self.bboxStream[bboxBitmapSize:] self.nContourStream = array.array("h", self.nContourStream) if sys.byteorder != "big": self.nContourStream.byteswap() assert len(self.nContourStream) == self.numGlyphs if "head" in ttFont: ttFont["head"].indexToLocFormat = self.indexFormat try: self.glyphOrder = ttFont.getGlyphOrder() except: self.glyphOrder = None if self.glyphOrder is None: self.glyphOrder = [".notdef"] self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)]) else: if len(self.glyphOrder) != self.numGlyphs: raise TTLibError( "incorrect glyphOrder: expected %d glyphs, found %d" % (len(self.glyphOrder), self.numGlyphs) ) glyphs = self.glyphs = {} for glyphID, glyphName in enumerate(self.glyphOrder): glyph = self._decodeGlyph(glyphID) glyphs[glyphName] = glyph def transform(self, ttFont): """Return transformed 'glyf' data""" self.numGlyphs = len(self.glyphs) assert len(self.glyphOrder) == self.numGlyphs if "maxp" in ttFont: ttFont["maxp"].numGlyphs = self.numGlyphs self.indexFormat = ttFont["head"].indexToLocFormat for stream in self.subStreams: setattr(self, stream, b"") bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2 self.bboxBitmap = array.array("B", [0] * bboxBitmapSize) self.overlapSimpleBitmap = array.array("B", [0] * ((self.numGlyphs + 7) >> 3)) for glyphID in range(self.numGlyphs): try: self._encodeGlyph(glyphID) except NotImplementedError: return None hasOverlapSimpleBitmap = any(self.overlapSimpleBitmap) self.bboxStream = self.bboxBitmap.tobytes() + self.bboxStream for stream in self.subStreams: setattr(self, stream + "Size", len(getattr(self, stream))) self.version = 0 self.optionFlags = 0 if hasOverlapSimpleBitmap: self.optionFlags |= woff2OverlapSimpleBitmapFlag data = sstruct.pack(woff2GlyfTableFormat, self) data += bytesjoin([getattr(self, s) for s in self.subStreams]) if hasOverlapSimpleBitmap: data += self.overlapSimpleBitmap.tobytes() return data def _decodeGlyph(self, glyphID): glyph = getTableModule("glyf").Glyph() glyph.numberOfContours = self.nContourStream[glyphID] if glyph.numberOfContours == 0: return glyph elif glyph.isComposite(): self._decodeComponents(glyph) else: self._decodeCoordinates(glyph) self._decodeOverlapSimpleFlag(glyph, glyphID) self._decodeBBox(glyphID, glyph) return glyph def _decodeComponents(self, glyph): data = self.compositeStream glyph.components = [] more = 1 haveInstructions = 0 while more: component = getTableModule("glyf").GlyphComponent() more, haveInstr, data = component.decompile(data, self) haveInstructions = haveInstructions | haveInstr glyph.components.append(component) self.compositeStream = data if haveInstructions: self._decodeInstructions(glyph) def _decodeCoordinates(self, glyph): data = self.nPointsStream endPtsOfContours = [] endPoint = -1 for i in range(glyph.numberOfContours): ptsOfContour, data = unpack255UShort(data) endPoint += ptsOfContour endPtsOfContours.append(endPoint) glyph.endPtsOfContours = endPtsOfContours self.nPointsStream = data self._decodeTriplets(glyph) self._decodeInstructions(glyph) def _decodeOverlapSimpleFlag(self, glyph, glyphID): if self.overlapSimpleBitmap is None or glyph.numberOfContours <= 0: return byte = glyphID >> 3 bit = glyphID & 7 if self.overlapSimpleBitmap[byte] & (0x80 >> bit): glyph.flags[0] |= _g_l_y_f.flagOverlapSimple def _decodeInstructions(self, glyph): glyphStream = self.glyphStream instructionStream = self.instructionStream instructionLength, glyphStream = unpack255UShort(glyphStream) glyph.program = ttProgram.Program() glyph.program.fromBytecode(instructionStream[:instructionLength]) self.glyphStream = glyphStream self.instructionStream = instructionStream[instructionLength:] def _decodeBBox(self, glyphID, glyph): haveBBox = bool(self.bboxBitmap[glyphID >> 3] & (0x80 >> (glyphID & 7))) if glyph.isComposite() and not haveBBox: raise TTLibError("no bbox values for composite glyph %d" % glyphID) if haveBBox: dummy, self.bboxStream = sstruct.unpack2(bboxFormat, self.bboxStream, glyph) else: glyph.recalcBounds(self) def _decodeTriplets(self, glyph): def withSign(flag, baseval): assert 0 <= baseval and baseval < 65536, "integer overflow" return baseval if flag & 1 else -baseval nPoints = glyph.endPtsOfContours[-1] + 1 flagSize = nPoints if flagSize > len(self.flagStream): raise TTLibError("not enough 'flagStream' data") flagsData = self.flagStream[:flagSize] self.flagStream = self.flagStream[flagSize:] flags = array.array("B", flagsData) triplets = array.array("B", self.glyphStream) nTriplets = len(triplets) assert nPoints <= nTriplets x = 0 y = 0 glyph.coordinates = getTableModule("glyf").GlyphCoordinates.zeros(nPoints) glyph.flags = array.array("B") tripletIndex = 0 for i in range(nPoints): flag = flags[i] onCurve = not bool(flag >> 7) flag &= 0x7F if flag < 84: nBytes = 1 elif flag < 120: nBytes = 2 elif flag < 124: nBytes = 3 else: nBytes = 4 assert (tripletIndex + nBytes) <= nTriplets if flag < 10: dx = 0 dy = withSign(flag, ((flag & 14) << 7) + triplets[tripletIndex]) elif flag < 20: dx = withSign(flag, (((flag - 10) & 14) << 7) + triplets[tripletIndex]) dy = 0 elif flag < 84: b0 = flag - 20 b1 = triplets[tripletIndex] dx = withSign(flag, 1 + (b0 & 0x30) + (b1 >> 4)) dy = withSign(flag >> 1, 1 + ((b0 & 0x0C) << 2) + (b1 & 0x0F)) elif flag < 120: b0 = flag - 84 dx = withSign(flag, 1 + ((b0 // 12) << 8) + triplets[tripletIndex]) dy = withSign( flag >> 1, 1 + (((b0 % 12) >> 2) << 8) + triplets[tripletIndex + 1] ) elif flag < 124: b2 = triplets[tripletIndex + 1] dx = withSign(flag, (triplets[tripletIndex] << 4) + (b2 >> 4)) dy = withSign( flag >> 1, ((b2 & 0x0F) << 8) + triplets[tripletIndex + 2] ) else: dx = withSign( flag, (triplets[tripletIndex] << 8) + triplets[tripletIndex + 1] ) dy = withSign( flag >> 1, (triplets[tripletIndex + 2] << 8) + triplets[tripletIndex + 3], ) tripletIndex += nBytes x += dx y += dy glyph.coordinates[i] = (x, y) glyph.flags.append(int(onCurve)) bytesConsumed = tripletIndex self.glyphStream = self.glyphStream[bytesConsumed:] def _encodeGlyph(self, glyphID): glyphName = self.getGlyphName(glyphID) glyph = self[glyphName] self.nContourStream += struct.pack(">h", glyph.numberOfContours) if glyph.numberOfContours == 0: return elif glyph.isComposite(): self._encodeComponents(glyph) elif glyph.isVarComposite(): raise NotImplementedError else: self._encodeCoordinates(glyph) self._encodeOverlapSimpleFlag(glyph, glyphID) self._encodeBBox(glyphID, glyph) def _encodeComponents(self, glyph): lastcomponent = len(glyph.components) - 1 more = 1 haveInstructions = 0 for i in range(len(glyph.components)): if i == lastcomponent: haveInstructions = hasattr(glyph, "program") more = 0 component = glyph.components[i] self.compositeStream += component.compile(more, haveInstructions, self) if haveInstructions: self._encodeInstructions(glyph) def _encodeCoordinates(self, glyph): lastEndPoint = -1 if _g_l_y_f.flagCubic in glyph.flags: raise NotImplementedError for endPoint in glyph.endPtsOfContours: ptsOfContour = endPoint - lastEndPoint self.nPointsStream += pack255UShort(ptsOfContour) lastEndPoint = endPoint self._encodeTriplets(glyph) self._encodeInstructions(glyph) def _encodeOverlapSimpleFlag(self, glyph, glyphID): if glyph.numberOfContours <= 0: return if glyph.flags[0] & _g_l_y_f.flagOverlapSimple: byte = glyphID >> 3 bit = glyphID & 7 self.overlapSimpleBitmap[byte] |= 0x80 >> bit def _encodeInstructions(self, glyph): instructions = glyph.program.getBytecode() self.glyphStream += pack255UShort(len(instructions)) self.instructionStream += instructions def _encodeBBox(self, glyphID, glyph): assert glyph.numberOfContours != 0, "empty glyph has no bbox" if not glyph.isComposite(): # for simple glyphs, compare the encoded bounding box info with the calculated # values, and if they match omit the bounding box info currentBBox = glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax calculatedBBox = calcIntBounds(glyph.coordinates) if currentBBox == calculatedBBox: return self.bboxBitmap[glyphID >> 3] |= 0x80 >> (glyphID & 7) self.bboxStream += sstruct.pack(bboxFormat, glyph) def _encodeTriplets(self, glyph): assert len(glyph.coordinates) == len(glyph.flags) coordinates = glyph.coordinates.copy() coordinates.absoluteToRelative() flags = array.array("B") triplets = array.array("B") for i in range(len(coordinates)): onCurve = glyph.flags[i] & _g_l_y_f.flagOnCurve x, y = coordinates[i] absX = abs(x) absY = abs(y) onCurveBit = 0 if onCurve else 128 xSignBit = 0 if (x < 0) else 1 ySignBit = 0 if (y < 0) else 1 xySignBits = xSignBit + 2 * ySignBit if x == 0 and absY < 1280: flags.append(onCurveBit + ((absY & 0xF00) >> 7) + ySignBit) triplets.append(absY & 0xFF) elif y == 0 and absX < 1280: flags.append(onCurveBit + 10 + ((absX & 0xF00) >> 7) + xSignBit) triplets.append(absX & 0xFF) elif absX < 65 and absY < 65: flags.append( onCurveBit + 20 + ((absX - 1) & 0x30) + (((absY - 1) & 0x30) >> 2) + xySignBits ) triplets.append((((absX - 1) & 0xF) << 4) | ((absY - 1) & 0xF)) elif absX < 769 and absY < 769: flags.append( onCurveBit + 84 + 12 * (((absX - 1) & 0x300) >> 8) + (((absY - 1) & 0x300) >> 6) + xySignBits ) triplets.append((absX - 1) & 0xFF) triplets.append((absY - 1) & 0xFF) elif absX < 4096 and absY < 4096: flags.append(onCurveBit + 120 + xySignBits) triplets.append(absX >> 4) triplets.append(((absX & 0xF) << 4) | (absY >> 8)) triplets.append(absY & 0xFF) else: flags.append(onCurveBit + 124 + xySignBits) triplets.append(absX >> 8) triplets.append(absX & 0xFF) triplets.append(absY >> 8) triplets.append(absY & 0xFF) self.flagStream += flags.tobytes() self.glyphStream += triplets.tobytes() class WOFF2HmtxTable(getTableClass("hmtx")): def __init__(self, tag=None): self.tableTag = Tag(tag or "hmtx") def reconstruct(self, data, ttFont): (flags,) = struct.unpack(">B", data[:1]) data = data[1:] if flags & 0b11111100 != 0: raise TTLibError("Bits 2-7 of '%s' flags are reserved" % self.tableTag) # When bit 0 is _not_ set, the lsb[] array is present hasLsbArray = flags & 1 == 0 # When bit 1 is _not_ set, the leftSideBearing[] array is present hasLeftSideBearingArray = flags & 2 == 0 if hasLsbArray and hasLeftSideBearingArray: raise TTLibError( "either bits 0 or 1 (or both) must set in transformed '%s' flags" % self.tableTag ) glyfTable = ttFont["glyf"] headerTable = ttFont["hhea"] glyphOrder = glyfTable.glyphOrder numGlyphs = len(glyphOrder) numberOfHMetrics = min(int(headerTable.numberOfHMetrics), numGlyphs) assert len(data) >= 2 * numberOfHMetrics advanceWidthArray = array.array("H", data[: 2 * numberOfHMetrics]) if sys.byteorder != "big": advanceWidthArray.byteswap() data = data[2 * numberOfHMetrics :] if hasLsbArray: assert len(data) >= 2 * numberOfHMetrics lsbArray = array.array("h", data[: 2 * numberOfHMetrics]) if sys.byteorder != "big": lsbArray.byteswap() data = data[2 * numberOfHMetrics :] else: # compute (proportional) glyphs' lsb from their xMin lsbArray = array.array("h") for i, glyphName in enumerate(glyphOrder): if i >= numberOfHMetrics: break glyph = glyfTable[glyphName] xMin = getattr(glyph, "xMin", 0) lsbArray.append(xMin) numberOfSideBearings = numGlyphs - numberOfHMetrics if hasLeftSideBearingArray: assert len(data) >= 2 * numberOfSideBearings leftSideBearingArray = array.array("h", data[: 2 * numberOfSideBearings]) if sys.byteorder != "big": leftSideBearingArray.byteswap() data = data[2 * numberOfSideBearings :] else: # compute (monospaced) glyphs' leftSideBearing from their xMin leftSideBearingArray = array.array("h") for i, glyphName in enumerate(glyphOrder): if i < numberOfHMetrics: continue glyph = glyfTable[glyphName] xMin = getattr(glyph, "xMin", 0) leftSideBearingArray.append(xMin) if data: raise TTLibError("too much '%s' table data" % self.tableTag) self.metrics = {} for i in range(numberOfHMetrics): glyphName = glyphOrder[i] advanceWidth, lsb = advanceWidthArray[i], lsbArray[i] self.metrics[glyphName] = (advanceWidth, lsb) lastAdvance = advanceWidthArray[-1] for i in range(numberOfSideBearings): glyphName = glyphOrder[i + numberOfHMetrics] self.metrics[glyphName] = (lastAdvance, leftSideBearingArray[i]) def transform(self, ttFont): glyphOrder = ttFont.getGlyphOrder() glyf = ttFont["glyf"] hhea = ttFont["hhea"] numberOfHMetrics = hhea.numberOfHMetrics # check if any of the proportional glyphs has left sidebearings that # differ from their xMin bounding box values. hasLsbArray = False for i in range(numberOfHMetrics): glyphName = glyphOrder[i] lsb = self.metrics[glyphName][1] if lsb != getattr(glyf[glyphName], "xMin", 0): hasLsbArray = True break # do the same for the monospaced glyphs (if any) at the end of hmtx table hasLeftSideBearingArray = False for i in range(numberOfHMetrics, len(glyphOrder)): glyphName = glyphOrder[i] lsb = self.metrics[glyphName][1] if lsb != getattr(glyf[glyphName], "xMin", 0): hasLeftSideBearingArray = True break # if we need to encode both sidebearings arrays, then no transformation is # applicable, and we must use the untransformed hmtx data if hasLsbArray and hasLeftSideBearingArray: return # set bit 0 and 1 when the respective arrays are _not_ present flags = 0 if not hasLsbArray: flags |= 1 << 0 if not hasLeftSideBearingArray: flags |= 1 << 1 data = struct.pack(">B", flags) advanceWidthArray = array.array( "H", [ self.metrics[glyphName][0] for i, glyphName in enumerate(glyphOrder) if i < numberOfHMetrics ], ) if sys.byteorder != "big": advanceWidthArray.byteswap() data += advanceWidthArray.tobytes() if hasLsbArray: lsbArray = array.array( "h", [ self.metrics[glyphName][1] for i, glyphName in enumerate(glyphOrder) if i < numberOfHMetrics ], ) if sys.byteorder != "big": lsbArray.byteswap() data += lsbArray.tobytes() if hasLeftSideBearingArray: leftSideBearingArray = array.array( "h", [ self.metrics[glyphOrder[i]][1] for i in range(numberOfHMetrics, len(glyphOrder)) ], ) if sys.byteorder != "big": leftSideBearingArray.byteswap() data += leftSideBearingArray.tobytes() return data class WOFF2FlavorData(WOFFFlavorData): Flavor = "woff2" def __init__(self, reader=None, data=None, transformedTables=None): """Data class that holds the WOFF2 header major/minor version, any metadata or private data (as bytes strings), and the set of table tags that have transformations applied (if reader is not None), or will have once the WOFF2 font is compiled. Args: reader: an SFNTReader (or subclass) object to read flavor data from. data: another WOFFFlavorData object to initialise data from. transformedTables: set of strings containing table tags to be transformed. Raises: ImportError if the brotli module is not installed. NOTE: The 'reader' argument, on the one hand, and the 'data' and 'transformedTables' arguments, on the other hand, are mutually exclusive. """ if not haveBrotli: raise ImportError("No module named brotli") if reader is not None: if data is not None: raise TypeError("'reader' and 'data' arguments are mutually exclusive") if transformedTables is not None: raise TypeError( "'reader' and 'transformedTables' arguments are mutually exclusive" ) if transformedTables is not None and ( "glyf" in transformedTables and "loca" not in transformedTables or "loca" in transformedTables and "glyf" not in transformedTables ): raise ValueError("'glyf' and 'loca' must be transformed (or not) together") super(WOFF2FlavorData, self).__init__(reader=reader) if reader: transformedTables = [ tag for tag, entry in reader.tables.items() if entry.transformed ] elif data: self.majorVersion = data.majorVersion self.majorVersion = data.minorVersion self.metaData = data.metaData self.privData = data.privData if transformedTables is None and hasattr(data, "transformedTables"): transformedTables = data.transformedTables if transformedTables is None: transformedTables = woff2TransformedTableTags self.transformedTables = set(transformedTables) def _decompress(self, rawData): return brotli.decompress(rawData) def unpackBase128(data): r"""Read one to five bytes from UIntBase128-encoded input string, and return a tuple containing the decoded integer plus any leftover data. >>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00") True >>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295 True >>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "<stdin>", line 1, in ? TTLibError: UIntBase128 value must not start with leading zeros >>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "<stdin>", line 1, in ? TTLibError: UIntBase128-encoded sequence is longer than 5 bytes >>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "<stdin>", line 1, in ? TTLibError: UIntBase128 value exceeds 2**32-1 """ if len(data) == 0: raise TTLibError("not enough data to unpack UIntBase128") result = 0 if byteord(data[0]) == 0x80: # font must be rejected if UIntBase128 value starts with 0x80 raise TTLibError("UIntBase128 value must not start with leading zeros") for i in range(woff2Base128MaxSize): if len(data) == 0: raise TTLibError("not enough data to unpack UIntBase128") code = byteord(data[0]) data = data[1:] # if any of the top seven bits are set then we're about to overflow if result & 0xFE000000: raise TTLibError("UIntBase128 value exceeds 2**32-1") # set current value = old value times 128 bitwise-or (byte bitwise-and 127) result = (result << 7) | (code & 0x7F) # repeat until the most significant bit of byte is false if (code & 0x80) == 0: # return result plus left over data return result, data # make sure not to exceed the size bound raise TTLibError("UIntBase128-encoded sequence is longer than 5 bytes") def base128Size(n): """Return the length in bytes of a UIntBase128-encoded sequence with value n. >>> base128Size(0) 1 >>> base128Size(24567) 3 >>> base128Size(2**32-1) 5 """ assert n >= 0 size = 1 while n >= 128: size += 1 n >>= 7 return size def packBase128(n): r"""Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of bytes using UIntBase128 variable-length encoding. Produce the shortest possible encoding. >>> packBase128(63) == b"\x3f" True >>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f' True """ if n < 0 or n >= 2**32: raise TTLibError("UIntBase128 format requires 0 <= integer <= 2**32-1") data = b"" size = base128Size(n) for i in range(size): b = (n >> (7 * (size - i - 1))) & 0x7F if i < size - 1: b |= 0x80 data += struct.pack("B", b) return data def unpack255UShort(data): """Read one to three bytes from 255UInt16-encoded input string, and return a tuple containing the decoded integer plus any leftover data. >>> unpack255UShort(bytechr(252))[0] 252 Note that some numbers (e.g. 506) can have multiple encodings: >>> unpack255UShort(struct.pack("BB", 254, 0))[0] 506 >>> unpack255UShort(struct.pack("BB", 255, 253))[0] 506 >>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0] 506 """ code = byteord(data[:1]) data = data[1:] if code == 253: # read two more bytes as an unsigned short if len(data) < 2: raise TTLibError("not enough data to unpack 255UInt16") (result,) = struct.unpack(">H", data[:2]) data = data[2:] elif code == 254: # read another byte, plus 253 * 2 if len(data) == 0: raise TTLibError("not enough data to unpack 255UInt16") result = byteord(data[:1]) result += 506 data = data[1:] elif code == 255: # read another byte, plus 253 if len(data) == 0: raise TTLibError("not enough data to unpack 255UInt16") result = byteord(data[:1]) result += 253 data = data[1:] else: # leave as is if lower than 253 result = code # return result plus left over data return result, data def pack255UShort(value): r"""Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring using 255UInt16 variable-length encoding. >>> pack255UShort(252) == b'\xfc' True >>> pack255UShort(506) == b'\xfe\x00' True >>> pack255UShort(762) == b'\xfd\x02\xfa' True """ if value < 0 or value > 0xFFFF: raise TTLibError("255UInt16 format requires 0 <= integer <= 65535") if value < 253: return struct.pack(">B", value) elif value < 506: return struct.pack(">BB", 255, value - 253) elif value < 762: return struct.pack(">BB", 254, value - 506) else: return struct.pack(">BH", 253, value) def compress(input_file, output_file, transform_tables=None): """Compress OpenType font to WOFF2. Args: input_file: a file path, file or file-like object (open in binary mode) containing an OpenType font (either CFF- or TrueType-flavored). output_file: a file path, file or file-like object where to save the compressed WOFF2 font. transform_tables: Optional[Iterable[str]]: a set of table tags for which to enable preprocessing transformations. By default, only 'glyf' and 'loca' tables are transformed. An empty set means disable all transformations. """ log.info("Processing %s => %s" % (input_file, output_file)) font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False) font.flavor = "woff2" if transform_tables is not None: font.flavorData = WOFF2FlavorData( data=font.flavorData, transformedTables=transform_tables ) font.save(output_file, reorderTables=False) def decompress(input_file, output_file): """Decompress WOFF2 font to OpenType font. Args: input_file: a file path, file or file-like object (open in binary mode) containing a compressed WOFF2 font. output_file: a file path, file or file-like object where to save the decompressed OpenType font. """ log.info("Processing %s => %s" % (input_file, output_file)) font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False) font.flavor = None font.flavorData = None font.save(output_file, reorderTables=True) def main(args=None): """Compress and decompress WOFF2 fonts""" import argparse from fontTools import configLogger from fontTools.ttx import makeOutputFileName class _HelpAction(argparse._HelpAction): def __call__(self, parser, namespace, values, option_string=None): subparsers_actions = [ action for action in parser._actions if isinstance(action, argparse._SubParsersAction) ] for subparsers_action in subparsers_actions: for choice, subparser in subparsers_action.choices.items(): print(subparser.format_help()) parser.exit() class _NoGlyfTransformAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): namespace.transform_tables.difference_update({"glyf", "loca"}) class _HmtxTransformAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): namespace.transform_tables.add("hmtx") parser = argparse.ArgumentParser( prog="fonttools ttLib.woff2", description=main.__doc__, add_help=False ) parser.add_argument( "-h", "--help", action=_HelpAction, help="show this help message and exit" ) parser_group = parser.add_subparsers(title="sub-commands") parser_compress = parser_group.add_parser( "compress", description="Compress a TTF or OTF font to WOFF2" ) parser_decompress = parser_group.add_parser( "decompress", description="Decompress a WOFF2 font to OTF" ) for subparser in (parser_compress, parser_decompress): group = subparser.add_mutually_exclusive_group(required=False) group.add_argument( "-v", "--verbose", action="store_true", help="print more messages to console", ) group.add_argument( "-q", "--quiet", action="store_true", help="do not print messages to console", ) parser_compress.add_argument( "input_file", metavar="INPUT", help="the input OpenType font (.ttf or .otf)", ) parser_decompress.add_argument( "input_file", metavar="INPUT", help="the input WOFF2 font", ) parser_compress.add_argument( "-o", "--output-file", metavar="OUTPUT", help="the output WOFF2 font", ) parser_decompress.add_argument( "-o", "--output-file", metavar="OUTPUT", help="the output OpenType font", ) transform_group = parser_compress.add_argument_group() transform_group.add_argument( "--no-glyf-transform", dest="transform_tables", nargs=0, action=_NoGlyfTransformAction, help="Do not transform glyf (and loca) tables", ) transform_group.add_argument( "--hmtx-transform", dest="transform_tables", nargs=0, action=_HmtxTransformAction, help="Enable optional transformation for 'hmtx' table", ) parser_compress.set_defaults( subcommand=compress, transform_tables={"glyf", "loca"}, ) parser_decompress.set_defaults(subcommand=decompress) options = vars(parser.parse_args(args)) subcommand = options.pop("subcommand", None) if not subcommand: parser.print_help() return quiet = options.pop("quiet") verbose = options.pop("verbose") configLogger( level=("ERROR" if quiet else "DEBUG" if verbose else "INFO"), ) if not options["output_file"]: if subcommand is compress: extension = ".woff2" elif subcommand is decompress: # choose .ttf/.otf file extension depending on sfntVersion with open(options["input_file"], "rb") as f: f.seek(4) # skip 'wOF2' signature sfntVersion = f.read(4) assert len(sfntVersion) == 4, "not enough data" extension = ".otf" if sfntVersion == b"OTTO" else ".ttf" else: raise AssertionError(subcommand) options["output_file"] = makeOutputFileName( options["input_file"], outputDir=None, extension=extension ) try: subcommand(**options) except TTLibError as e: parser.error(e) if __name__ == "__main__": sys.exit(main()) PKaZZZ��XX"fontTools/ttLib/tables/B_A_S_E_.pyfrom .otBase import BaseTTXConverter class table_B_A_S_E_(BaseTTXConverter): pass PKaZZZ|ɇ��,fontTools/ttLib/tables/BitmapGlyphMetrics.py# Since bitmap glyph metrics are shared between EBLC and EBDT # this class gets its own python file. from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval import logging log = logging.getLogger(__name__) bigGlyphMetricsFormat = """ > # big endian height: B width: B horiBearingX: b horiBearingY: b horiAdvance: B vertBearingX: b vertBearingY: b vertAdvance: B """ smallGlyphMetricsFormat = """ > # big endian height: B width: B BearingX: b BearingY: b Advance: B """ class BitmapGlyphMetrics(object): def toXML(self, writer, ttFont): writer.begintag(self.__class__.__name__) writer.newline() for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]: writer.simpletag(metricName, value=getattr(self, metricName)) writer.newline() writer.endtag(self.__class__.__name__) writer.newline() def fromXML(self, name, attrs, content, ttFont): metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1]) for element in content: if not isinstance(element, tuple): continue name, attrs, content = element # Make sure this is a metric that is needed by GlyphMetrics. if name in metricNames: vars(self)[name] = safeEval(attrs["value"]) else: log.warning( "unknown name '%s' being ignored in %s.", name, self.__class__.__name__, ) class BigGlyphMetrics(BitmapGlyphMetrics): binaryFormat = bigGlyphMetricsFormat class SmallGlyphMetrics(BitmapGlyphMetrics): binaryFormat = smallGlyphMetricsFormat PKaZZZ��'   "fontTools/ttLib/tables/C_B_D_T_.py# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): Matt Fontaine from fontTools.misc.textTools import bytesjoin from fontTools.misc import sstruct from . import E_B_D_T_ from .BitmapGlyphMetrics import ( BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat, ) from .E_B_D_T_ import ( BitmapGlyph, BitmapPlusSmallMetricsMixin, BitmapPlusBigMetricsMixin, ) import struct class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_): # Change the data locator table being referenced. locatorName = "CBLC" # Modify the format class accessor for color bitmap use. def getImageFormatClass(self, imageFormat): try: return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat) except KeyError: return cbdt_bitmap_classes[imageFormat] # Helper method for removing export features not supported by color bitmaps. # Write data in the parent class will default to raw if an option is unsupported. def _removeUnsupportedForColor(dataFunctions): dataFunctions = dict(dataFunctions) del dataFunctions["row"] return dataFunctions class ColorBitmapGlyph(BitmapGlyph): fileExtension = ".png" xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions) class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph): def decompile(self): self.metrics = SmallGlyphMetrics() dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) (dataLen,) = struct.unpack(">L", data[:4]) data = data[4:] # For the image data cut it to the size specified by dataLen. assert dataLen <= len(data), "Data overun in format 17" self.imageData = data[:dataLen] def compile(self, ttFont): dataList = [] dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics)) dataList.append(struct.pack(">L", len(self.imageData))) dataList.append(self.imageData) return bytesjoin(dataList) class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph): def decompile(self): self.metrics = BigGlyphMetrics() dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) (dataLen,) = struct.unpack(">L", data[:4]) data = data[4:] # For the image data cut it to the size specified by dataLen. assert dataLen <= len(data), "Data overun in format 18" self.imageData = data[:dataLen] def compile(self, ttFont): dataList = [] dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) dataList.append(struct.pack(">L", len(self.imageData))) dataList.append(self.imageData) return bytesjoin(dataList) class cbdt_bitmap_format_19(ColorBitmapGlyph): def decompile(self): (dataLen,) = struct.unpack(">L", self.data[:4]) data = self.data[4:] assert dataLen <= len(data), "Data overun in format 19" self.imageData = data[:dataLen] def compile(self, ttFont): return struct.pack(">L", len(self.imageData)) + self.imageData # Dict for CBDT extended formats. cbdt_bitmap_classes = { 17: cbdt_bitmap_format_17, 18: cbdt_bitmap_format_18, 19: cbdt_bitmap_format_19, } PKaZZZ)�Ƽ�"fontTools/ttLib/tables/C_B_L_C_.py# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): Matt Fontaine from . import E_B_L_C_ class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_): dependencies = ["CBDT"] PKaZZZ��b$�� fontTools/ttLib/tables/C_F_F_.pyfrom io import BytesIO from fontTools import cffLib from . import DefaultTable class table_C_F_F_(DefaultTable.DefaultTable): def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.cff = cffLib.CFFFontSet() self._gaveGlyphOrder = False def decompile(self, data, otFont): self.cff.decompile(BytesIO(data), otFont, isCFF2=False) assert len(self.cff) == 1, "can't deal with multi-font CFF tables." def compile(self, otFont): f = BytesIO() self.cff.compile(f, otFont, isCFF2=False) return f.getvalue() def haveGlyphNames(self): if hasattr(self.cff[self.cff.fontNames[0]], "ROS"): return False # CID-keyed font else: return True def getGlyphOrder(self): if self._gaveGlyphOrder: from fontTools import ttLib raise ttLib.TTLibError("illegal use of getGlyphOrder()") self._gaveGlyphOrder = True return self.cff[self.cff.fontNames[0]].getGlyphOrder() def setGlyphOrder(self, glyphOrder): pass # XXX # self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder) def toXML(self, writer, otFont): self.cff.toXML(writer) def fromXML(self, name, attrs, content, otFont): if not hasattr(self, "cff"): self.cff = cffLib.CFFFontSet() self.cff.fromXML(name, attrs, content, otFont) PKaZZZI���"fontTools/ttLib/tables/C_F_F__2.pyfrom io import BytesIO from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_ class table_C_F_F__2(table_C_F_F_): def decompile(self, data, otFont): self.cff.decompile(BytesIO(data), otFont, isCFF2=True) assert len(self.cff) == 1, "can't deal with multi-font CFF tables." def compile(self, otFont): f = BytesIO() self.cff.compile(f, otFont, isCFF2=True) return f.getvalue() PKaZZZH��X]]"fontTools/ttLib/tables/C_O_L_R_.py# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): Behdad Esfahbod from fontTools.misc.textTools import safeEval from . import DefaultTable class table_C_O_L_R_(DefaultTable.DefaultTable): """This table is structured so that you can treat it like a dictionary keyed by glyph name. ``ttFont['COLR'][<glyphName>]`` will return the color layers for any glyph. ``ttFont['COLR'][<glyphName>] = <value>`` will set the color layers for any glyph. """ @staticmethod def _decompileColorLayersV0(table): if not table.LayerRecordArray: return {} colorLayerLists = {} layerRecords = table.LayerRecordArray.LayerRecord numLayerRecords = len(layerRecords) for baseRec in table.BaseGlyphRecordArray.BaseGlyphRecord: baseGlyph = baseRec.BaseGlyph firstLayerIndex = baseRec.FirstLayerIndex numLayers = baseRec.NumLayers assert firstLayerIndex + numLayers <= numLayerRecords layers = [] for i in range(firstLayerIndex, firstLayerIndex + numLayers): layerRec = layerRecords[i] layers.append(LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex)) colorLayerLists[baseGlyph] = layers return colorLayerLists def _toOTTable(self, ttFont): from . import otTables from fontTools.colorLib.builder import populateCOLRv0 tableClass = getattr(otTables, self.tableTag) table = tableClass() table.Version = self.version populateCOLRv0( table, { baseGlyph: [(layer.name, layer.colorID) for layer in layers] for baseGlyph, layers in self.ColorLayers.items() }, glyphMap=ttFont.getReverseGlyphMap(rebuild=True), ) return table def decompile(self, data, ttFont): from .otBase import OTTableReader from . import otTables # We use otData to decompile, but we adapt the decompiled otTables to the # existing COLR v0 API for backward compatibility. reader = OTTableReader(data, tableTag=self.tableTag) tableClass = getattr(otTables, self.tableTag) table = tableClass() table.decompile(reader, ttFont) self.version = table.Version if self.version == 0: self.ColorLayers = self._decompileColorLayersV0(table) else: # for new versions, keep the raw otTables around self.table = table def compile(self, ttFont): from .otBase import OTTableWriter if hasattr(self, "table"): table = self.table else: table = self._toOTTable(ttFont) writer = OTTableWriter(tableTag=self.tableTag) table.compile(writer, ttFont) return writer.getAllData() def toXML(self, writer, ttFont): if hasattr(self, "table"): self.table.toXML2(writer, ttFont) else: writer.simpletag("version", value=self.version) writer.newline() for baseGlyph in sorted(self.ColorLayers.keys(), key=ttFont.getGlyphID): writer.begintag("ColorGlyph", name=baseGlyph) writer.newline() for layer in self.ColorLayers[baseGlyph]: layer.toXML(writer, ttFont) writer.endtag("ColorGlyph") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "version": # old COLR v0 API setattr(self, name, safeEval(attrs["value"])) elif name == "ColorGlyph": if not hasattr(self, "ColorLayers"): self.ColorLayers = {} glyphName = attrs["name"] for element in content: if isinstance(element, str): continue layers = [] for element in content: if isinstance(element, str): continue layer = LayerRecord() layer.fromXML(element[0], element[1], element[2], ttFont) layers.append(layer) self.ColorLayers[glyphName] = layers else: # new COLR v1 API from . import otTables if not hasattr(self, "table"): tableClass = getattr(otTables, self.tableTag) self.table = tableClass() self.table.fromXML(name, attrs, content, ttFont) self.table.populateDefaults() self.version = self.table.Version def __getitem__(self, glyphName): if not isinstance(glyphName, str): raise TypeError(f"expected str, found {type(glyphName).__name__}") return self.ColorLayers[glyphName] def __setitem__(self, glyphName, value): if not isinstance(glyphName, str): raise TypeError(f"expected str, found {type(glyphName).__name__}") if value is not None: self.ColorLayers[glyphName] = value elif glyphName in self.ColorLayers: del self.ColorLayers[glyphName] def __delitem__(self, glyphName): del self.ColorLayers[glyphName] class LayerRecord(object): def __init__(self, name=None, colorID=None): self.name = name self.colorID = colorID def toXML(self, writer, ttFont): writer.simpletag("layer", name=self.name, colorID=self.colorID) writer.newline() def fromXML(self, eltname, attrs, content, ttFont): for name, value in attrs.items(): if name == "name": setattr(self, name, value) else: setattr(self, name, safeEval(value)) PKaZZZ6���o-o-"fontTools/ttLib/tables/C_P_A_L_.py# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): Behdad Esfahbod from fontTools.misc.textTools import bytesjoin, safeEval from . import DefaultTable import array from collections import namedtuple import struct import sys class table_C_P_A_L_(DefaultTable.DefaultTable): NO_NAME_ID = 0xFFFF DEFAULT_PALETTE_TYPE = 0 def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.palettes = [] self.paletteTypes = [] self.paletteLabels = [] self.paletteEntryLabels = [] def decompile(self, data, ttFont): ( self.version, self.numPaletteEntries, numPalettes, numColorRecords, goffsetFirstColorRecord, ) = struct.unpack(">HHHHL", data[:12]) assert ( self.version <= 1 ), "Version of CPAL table is higher than I know how to handle" self.palettes = [] pos = 12 for i in range(numPalettes): startIndex = struct.unpack(">H", data[pos : pos + 2])[0] assert startIndex + self.numPaletteEntries <= numColorRecords pos += 2 palette = [] ppos = goffsetFirstColorRecord + startIndex * 4 for j in range(self.numPaletteEntries): palette.append(Color(*struct.unpack(">BBBB", data[ppos : ppos + 4]))) ppos += 4 self.palettes.append(palette) if self.version == 0: offsetToPaletteTypeArray = 0 offsetToPaletteLabelArray = 0 offsetToPaletteEntryLabelArray = 0 else: pos = 12 + numPalettes * 2 ( offsetToPaletteTypeArray, offsetToPaletteLabelArray, offsetToPaletteEntryLabelArray, ) = struct.unpack(">LLL", data[pos : pos + 12]) self.paletteTypes = self._decompileUInt32Array( data, offsetToPaletteTypeArray, numPalettes, default=self.DEFAULT_PALETTE_TYPE, ) self.paletteLabels = self._decompileUInt16Array( data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID ) self.paletteEntryLabels = self._decompileUInt16Array( data, offsetToPaletteEntryLabelArray, self.numPaletteEntries, default=self.NO_NAME_ID, ) def _decompileUInt16Array(self, data, offset, numElements, default=0): if offset == 0: return [default] * numElements result = array.array("H", data[offset : offset + 2 * numElements]) if sys.byteorder != "big": result.byteswap() assert len(result) == numElements, result return result.tolist() def _decompileUInt32Array(self, data, offset, numElements, default=0): if offset == 0: return [default] * numElements result = array.array("I", data[offset : offset + 4 * numElements]) if sys.byteorder != "big": result.byteswap() assert len(result) == numElements, result return result.tolist() def compile(self, ttFont): colorRecordIndices, colorRecords = self._compileColorRecords() paletteTypes = self._compilePaletteTypes() paletteLabels = self._compilePaletteLabels() paletteEntryLabels = self._compilePaletteEntryLabels() numColorRecords = len(colorRecords) // 4 offsetToFirstColorRecord = 12 + len(colorRecordIndices) if self.version >= 1: offsetToFirstColorRecord += 12 header = struct.pack( ">HHHHL", self.version, self.numPaletteEntries, len(self.palettes), numColorRecords, offsetToFirstColorRecord, ) if self.version == 0: dataList = [header, colorRecordIndices, colorRecords] else: pos = offsetToFirstColorRecord + len(colorRecords) if len(paletteTypes) == 0: offsetToPaletteTypeArray = 0 else: offsetToPaletteTypeArray = pos pos += len(paletteTypes) if len(paletteLabels) == 0: offsetToPaletteLabelArray = 0 else: offsetToPaletteLabelArray = pos pos += len(paletteLabels) if len(paletteEntryLabels) == 0: offsetToPaletteEntryLabelArray = 0 else: offsetToPaletteEntryLabelArray = pos pos += len(paletteLabels) header1 = struct.pack( ">LLL", offsetToPaletteTypeArray, offsetToPaletteLabelArray, offsetToPaletteEntryLabelArray, ) dataList = [ header, colorRecordIndices, header1, colorRecords, paletteTypes, paletteLabels, paletteEntryLabels, ] return bytesjoin(dataList) def _compilePalette(self, palette): assert len(palette) == self.numPaletteEntries pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha) return bytesjoin([pack(color) for color in palette]) def _compileColorRecords(self): colorRecords, colorRecordIndices, pool = [], [], {} for palette in self.palettes: packedPalette = self._compilePalette(palette) if packedPalette in pool: index = pool[packedPalette] else: index = len(colorRecords) colorRecords.append(packedPalette) pool[packedPalette] = index colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries)) return bytesjoin(colorRecordIndices), bytesjoin(colorRecords) def _compilePaletteTypes(self): if self.version == 0 or not any(self.paletteTypes): return b"" assert len(self.paletteTypes) == len(self.palettes) result = bytesjoin([struct.pack(">I", ptype) for ptype in self.paletteTypes]) assert len(result) == 4 * len(self.palettes) return result def _compilePaletteLabels(self): if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels): return b"" assert len(self.paletteLabels) == len(self.palettes) result = bytesjoin([struct.pack(">H", label) for label in self.paletteLabels]) assert len(result) == 2 * len(self.palettes) return result def _compilePaletteEntryLabels(self): if self.version == 0 or all( l == self.NO_NAME_ID for l in self.paletteEntryLabels ): return b"" assert len(self.paletteEntryLabels) == self.numPaletteEntries result = bytesjoin( [struct.pack(">H", label) for label in self.paletteEntryLabels] ) assert len(result) == 2 * self.numPaletteEntries return result def toXML(self, writer, ttFont): numPalettes = len(self.palettes) paletteLabels = {i: nameID for (i, nameID) in enumerate(self.paletteLabels)} paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)} writer.simpletag("version", value=self.version) writer.newline() writer.simpletag("numPaletteEntries", value=self.numPaletteEntries) writer.newline() for index, palette in enumerate(self.palettes): attrs = {"index": index} paletteType = paletteTypes.get(index, self.DEFAULT_PALETTE_TYPE) paletteLabel = paletteLabels.get(index, self.NO_NAME_ID) if self.version > 0 and paletteLabel != self.NO_NAME_ID: attrs["label"] = paletteLabel if self.version > 0 and paletteType != self.DEFAULT_PALETTE_TYPE: attrs["type"] = paletteType writer.begintag("palette", **attrs) writer.newline() if ( self.version > 0 and paletteLabel != self.NO_NAME_ID and ttFont and "name" in ttFont ): name = ttFont["name"].getDebugName(paletteLabel) if name is not None: writer.comment(name) writer.newline() assert len(palette) == self.numPaletteEntries for cindex, color in enumerate(palette): color.toXML(writer, ttFont, cindex) writer.endtag("palette") writer.newline() if self.version > 0 and not all( l == self.NO_NAME_ID for l in self.paletteEntryLabels ): writer.begintag("paletteEntryLabels") writer.newline() for index, label in enumerate(self.paletteEntryLabels): if label != self.NO_NAME_ID: writer.simpletag("label", index=index, value=label) if self.version > 0 and label and ttFont and "name" in ttFont: name = ttFont["name"].getDebugName(label) if name is not None: writer.comment(name) writer.newline() writer.endtag("paletteEntryLabels") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "palette": self.paletteLabels.append(int(attrs.get("label", self.NO_NAME_ID))) self.paletteTypes.append(int(attrs.get("type", self.DEFAULT_PALETTE_TYPE))) palette = [] for element in content: if isinstance(element, str): continue attrs = element[1] color = Color.fromHex(attrs["value"]) palette.append(color) self.palettes.append(palette) elif name == "paletteEntryLabels": colorLabels = {} for element in content: if isinstance(element, str): continue elementName, elementAttr, _ = element if elementName == "label": labelIndex = safeEval(elementAttr["index"]) nameID = safeEval(elementAttr["value"]) colorLabels[labelIndex] = nameID self.paletteEntryLabels = [ colorLabels.get(i, self.NO_NAME_ID) for i in range(self.numPaletteEntries) ] elif "value" in attrs: value = safeEval(attrs["value"]) setattr(self, name, value) if name == "numPaletteEntries": self.paletteEntryLabels = [self.NO_NAME_ID] * self.numPaletteEntries class Color(namedtuple("Color", "blue green red alpha")): def hex(self): return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha) def __repr__(self): return self.hex() def toXML(self, writer, ttFont, index=None): writer.simpletag("color", value=self.hex(), index=index) writer.newline() @classmethod def fromHex(cls, value): if value[0] == "#": value = value[1:] red = int(value[0:2], 16) green = int(value[2:4], 16) blue = int(value[4:6], 16) alpha = int(value[6:8], 16) if len(value) >= 8 else 0xFF return cls(red=red, green=green, blue=blue, alpha=alpha) @classmethod def fromRGBA(cls, red, green, blue, alpha): return cls(red=red, green=green, blue=blue, alpha=alpha) PKaZZZA ]��"fontTools/ttLib/tables/D_S_I_G_.pyfrom fontTools.misc.textTools import bytesjoin, strjoin, tobytes, tostr, safeEval from fontTools.misc import sstruct from . import DefaultTable import base64 DSIG_HeaderFormat = """ > # big endian ulVersion: L usNumSigs: H usFlag: H """ # followed by an array of usNumSigs DSIG_Signature records DSIG_SignatureFormat = """ > # big endian ulFormat: L ulLength: L # length includes DSIG_SignatureBlock header ulOffset: L """ # followed by an array of usNumSigs DSIG_SignatureBlock records, # each followed immediately by the pkcs7 bytes DSIG_SignatureBlockFormat = """ > # big endian usReserved1: H usReserved2: H cbSignature: l # length of following raw pkcs7 data """ # # NOTE # the DSIG table format allows for SignatureBlocks residing # anywhere in the table and possibly in a different order as # listed in the array after the first table header # # this implementation does not keep track of any gaps and/or data # before or after the actual signature blocks while decompiling, # and puts them in the same physical order as listed in the header # on compilation with no padding whatsoever. # class table_D_S_I_G_(DefaultTable.DefaultTable): def decompile(self, data, ttFont): dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self) assert self.ulVersion == 1, "DSIG ulVersion must be 1" assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0" self.signatureRecords = sigrecs = [] for n in range(self.usNumSigs): sigrec, newData = sstruct.unpack2( DSIG_SignatureFormat, newData, SignatureRecord() ) assert sigrec.ulFormat == 1, ( "DSIG signature record #%d ulFormat must be 1" % n ) sigrecs.append(sigrec) for sigrec in sigrecs: dummy, newData = sstruct.unpack2( DSIG_SignatureBlockFormat, data[sigrec.ulOffset :], sigrec ) assert sigrec.usReserved1 == 0, ( "DSIG signature record #%d usReserverd1 must be 0" % n ) assert sigrec.usReserved2 == 0, ( "DSIG signature record #%d usReserverd2 must be 0" % n ) sigrec.pkcs7 = newData[: sigrec.cbSignature] def compile(self, ttFont): packed = sstruct.pack(DSIG_HeaderFormat, self) headers = [packed] offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat) data = [] for sigrec in self.signatureRecords: # first pack signature block sigrec.cbSignature = len(sigrec.pkcs7) packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7 data.append(packed) # update redundant length field sigrec.ulLength = len(packed) # update running table offset sigrec.ulOffset = offset headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec)) offset += sigrec.ulLength if offset % 2: # Pad to even bytes data.append(b"\0") return bytesjoin(headers + data) def toXML(self, xmlWriter, ttFont): xmlWriter.comment( "note that the Digital Signature will be invalid after recompilation!" ) xmlWriter.newline() xmlWriter.simpletag( "tableHeader", version=self.ulVersion, numSigs=self.usNumSigs, flag="0x%X" % self.usFlag, ) for sigrec in self.signatureRecords: xmlWriter.newline() sigrec.toXML(xmlWriter, ttFont) xmlWriter.newline() def fromXML(self, name, attrs, content, ttFont): if name == "tableHeader": self.signatureRecords = [] self.ulVersion = safeEval(attrs["version"]) self.usNumSigs = safeEval(attrs["numSigs"]) self.usFlag = safeEval(attrs["flag"]) return if name == "SignatureRecord": sigrec = SignatureRecord() sigrec.fromXML(name, attrs, content, ttFont) self.signatureRecords.append(sigrec) pem_spam = lambda l, spam={ "-----BEGIN PKCS7-----": True, "-----END PKCS7-----": True, "": True, }: not spam.get(l.strip()) def b64encode(b): s = base64.b64encode(b) # Line-break at 76 chars. items = [] while s: items.append(tostr(s[:76])) items.append("\n") s = s[76:] return strjoin(items) class SignatureRecord(object): def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.__dict__) def toXML(self, writer, ttFont): writer.begintag(self.__class__.__name__, format=self.ulFormat) writer.newline() writer.write_noindent("-----BEGIN PKCS7-----\n") writer.write_noindent(b64encode(self.pkcs7)) writer.write_noindent("-----END PKCS7-----\n") writer.endtag(self.__class__.__name__) def fromXML(self, name, attrs, content, ttFont): self.ulFormat = safeEval(attrs["format"]) self.usReserved1 = safeEval(attrs.get("reserved1", "0")) self.usReserved2 = safeEval(attrs.get("reserved2", "0")) self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content)))) PKaZZZ����"fontTools/ttLib/tables/D__e_b_g.pyimport json from . import DefaultTable class table_D__e_b_g(DefaultTable.DefaultTable): def decompile(self, data, ttFont): self.data = json.loads(data) def compile(self, ttFont): return json.dumps(self.data).encode("utf-8") def toXML(self, writer, ttFont): writer.writecdata(json.dumps(self.data, indent=2)) def fromXML(self, name, attrs, content, ttFont): self.data = json.loads(content) PKaZZZ�rW���&fontTools/ttLib/tables/DefaultTable.pyfrom fontTools.misc.textTools import Tag from fontTools.ttLib import getClassTag class DefaultTable(object): dependencies = [] def __init__(self, tag=None): if tag is None: tag = getClassTag(self.__class__) self.tableTag = Tag(tag) def decompile(self, data, ttFont): self.data = data def compile(self, ttFont): return self.data def toXML(self, writer, ttFont, **kwargs): if hasattr(self, "ERROR"): writer.comment("An error occurred during the decompilation of this table") writer.newline() writer.comment(self.ERROR) writer.newline() writer.begintag("hexdata") writer.newline() writer.dumphex(self.compile(ttFont)) writer.endtag("hexdata") writer.newline() def fromXML(self, name, attrs, content, ttFont): from fontTools.misc.textTools import readHex from fontTools import ttLib if name != "hexdata": raise ttLib.TTLibError("can't handle '%s' element" % name) self.decompile(readHex(content), ttFont) def __repr__(self): return "<'%s' table at %x>" % (self.tableTag, id(self)) def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.__dict__ == other.__dict__ def __ne__(self, other): result = self.__eq__(other) return result if result is NotImplemented else not result PKaZZZDe��~~"fontTools/ttLib/tables/E_B_D_T_.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import ( bytechr, byteord, bytesjoin, strjoin, safeEval, readHex, hexStr, deHexStr, ) from .BitmapGlyphMetrics import ( BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat, ) from . import DefaultTable import itertools import os import struct import logging log = logging.getLogger(__name__) ebdtTableVersionFormat = """ > # big endian version: 16.16F """ ebdtComponentFormat = """ > # big endian glyphCode: H xOffset: b yOffset: b """ class table_E_B_D_T_(DefaultTable.DefaultTable): # Keep a reference to the name of the data locator table. locatorName = "EBLC" # This method can be overridden in subclasses to support new formats # without changing the other implementation. Also can be used as a # convenience method for coverting a font file to an alternative format. def getImageFormatClass(self, imageFormat): return ebdt_bitmap_classes[imageFormat] def decompile(self, data, ttFont): # Get the version but don't advance the slice. # Most of the lookup for this table is done relative # to the begining so slice by the offsets provided # in the EBLC table. sstruct.unpack2(ebdtTableVersionFormat, data, self) # Keep a dict of glyphs that have been seen so they aren't remade. # This dict maps intervals of data to the BitmapGlyph. glyphDict = {} # Pull out the EBLC table and loop through glyphs. # A strike is a concept that spans both tables. # The actual bitmap data is stored in the EBDT. locator = ttFont[self.__class__.locatorName] self.strikeData = [] for curStrike in locator.strikes: bitmapGlyphDict = {} self.strikeData.append(bitmapGlyphDict) for indexSubTable in curStrike.indexSubTables: dataIter = zip(indexSubTable.names, indexSubTable.locations) for curName, curLoc in dataIter: # Don't create duplicate data entries for the same glyphs. # Instead just use the structures that already exist if they exist. if curLoc in glyphDict: curGlyph = glyphDict[curLoc] else: curGlyphData = data[slice(*curLoc)] imageFormatClass = self.getImageFormatClass( indexSubTable.imageFormat ) curGlyph = imageFormatClass(curGlyphData, ttFont) glyphDict[curLoc] = curGlyph bitmapGlyphDict[curName] = curGlyph def compile(self, ttFont): dataList = [] dataList.append(sstruct.pack(ebdtTableVersionFormat, self)) dataSize = len(dataList[0]) # Keep a dict of glyphs that have been seen so they aren't remade. # This dict maps the id of the BitmapGlyph to the interval # in the data. glyphDict = {} # Go through the bitmap glyph data. Just in case the data for a glyph # changed the size metrics should be recalculated. There are a variety # of formats and they get stored in the EBLC table. That is why # recalculation is defered to the EblcIndexSubTable class and just # pass what is known about bitmap glyphs from this particular table. locator = ttFont[self.__class__.locatorName] for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData): for curIndexSubTable in curStrike.indexSubTables: dataLocations = [] for curName in curIndexSubTable.names: # Handle the data placement based on seeing the glyph or not. # Just save a reference to the location if the glyph has already # been saved in compile. This code assumes that glyphs will only # be referenced multiple times from indexFormat5. By luck the # code may still work when referencing poorly ordered fonts with # duplicate references. If there is a font that is unlucky the # respective compile methods for the indexSubTables will fail # their assertions. All fonts seem to follow this assumption. # More complicated packing may be needed if a counter-font exists. glyph = curGlyphDict[curName] objectId = id(glyph) if objectId not in glyphDict: data = glyph.compile(ttFont) data = curIndexSubTable.padBitmapData(data) startByte = dataSize dataSize += len(data) endByte = dataSize dataList.append(data) dataLoc = (startByte, endByte) glyphDict[objectId] = dataLoc else: dataLoc = glyphDict[objectId] dataLocations.append(dataLoc) # Just use the new data locations in the indexSubTable. # The respective compile implementations will take care # of any of the problems in the convertion that may arise. curIndexSubTable.locations = dataLocations return bytesjoin(dataList) def toXML(self, writer, ttFont): # When exporting to XML if one of the data export formats # requires metrics then those metrics may be in the locator. # In this case populate the bitmaps with "export metrics". if ttFont.bitmapGlyphDataFormat in ("row", "bitwise"): locator = ttFont[self.__class__.locatorName] for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData): for curIndexSubTable in curStrike.indexSubTables: for curName in curIndexSubTable.names: glyph = curGlyphDict[curName] # I'm not sure which metrics have priority here. # For now if both metrics exist go with glyph metrics. if hasattr(glyph, "metrics"): glyph.exportMetrics = glyph.metrics else: glyph.exportMetrics = curIndexSubTable.metrics glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth writer.simpletag("header", [("version", self.version)]) writer.newline() locator = ttFont[self.__class__.locatorName] for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData): writer.begintag("strikedata", [("index", strikeIndex)]) writer.newline() for curName, curBitmap in bitmapGlyphDict.items(): curBitmap.toXML(strikeIndex, curName, writer, ttFont) writer.endtag("strikedata") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "header": self.version = safeEval(attrs["version"]) elif name == "strikedata": if not hasattr(self, "strikeData"): self.strikeData = [] strikeIndex = safeEval(attrs["index"]) bitmapGlyphDict = {} for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]): imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix) :]) glyphName = attrs["name"] imageFormatClass = self.getImageFormatClass(imageFormat) curGlyph = imageFormatClass(None, None) curGlyph.fromXML(name, attrs, content, ttFont) assert glyphName not in bitmapGlyphDict, ( "Duplicate glyphs with the same name '%s' in the same strike." % glyphName ) bitmapGlyphDict[glyphName] = curGlyph else: log.warning("%s being ignored by %s", name, self.__class__.__name__) # Grow the strike data array to the appropriate size. The XML # format allows the strike index value to be out of order. if strikeIndex >= len(self.strikeData): self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData)) assert ( self.strikeData[strikeIndex] is None ), "Duplicate strike EBDT indices." self.strikeData[strikeIndex] = bitmapGlyphDict class EbdtComponent(object): def toXML(self, writer, ttFont): writer.begintag("ebdtComponent", [("name", self.name)]) writer.newline() for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]: writer.simpletag(componentName, value=getattr(self, componentName)) writer.newline() writer.endtag("ebdtComponent") writer.newline() def fromXML(self, name, attrs, content, ttFont): self.name = attrs["name"] componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:]) for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name in componentNames: vars(self)[name] = safeEval(attrs["value"]) else: log.warning("unknown name '%s' being ignored by EbdtComponent.", name) # Helper functions for dealing with binary. def _data2binary(data, numBits): binaryList = [] for curByte in data: value = byteord(curByte) numBitsCut = min(8, numBits) for i in range(numBitsCut): if value & 0x1: binaryList.append("1") else: binaryList.append("0") value = value >> 1 numBits -= numBitsCut return strjoin(binaryList) def _binary2data(binary): byteList = [] for bitLoc in range(0, len(binary), 8): byteString = binary[bitLoc : bitLoc + 8] curByte = 0 for curBit in reversed(byteString): curByte = curByte << 1 if curBit == "1": curByte |= 1 byteList.append(bytechr(curByte)) return bytesjoin(byteList) def _memoize(f): class memodict(dict): def __missing__(self, key): ret = f(key) if isinstance(key, int) or len(key) == 1: self[key] = ret return ret return memodict().__getitem__ # 00100111 -> 11100100 per byte, not to be confused with little/big endian. # Bitmap data per byte is in the order that binary is written on the page # with the least significant bit as far right as possible. This is the # opposite of what makes sense algorithmically and hence this function. @_memoize def _reverseBytes(data): r""" >>> bin(ord(_reverseBytes(0b00100111))) '0b11100100' >>> _reverseBytes(b'\x00\xf0') b'\x00\x0f' """ if isinstance(data, bytes) and len(data) != 1: return bytesjoin(map(_reverseBytes, data)) byte = byteord(data) result = 0 for i in range(8): result = result << 1 result |= byte & 1 byte = byte >> 1 return bytechr(result) # This section of code is for reading and writing image data to/from XML. def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): writer.begintag("rawimagedata") writer.newline() writer.dumphex(bitmapObject.imageData) writer.endtag("rawimagedata") writer.newline() def _readRawImageData(bitmapObject, name, attrs, content, ttFont): bitmapObject.imageData = readHex(content) def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): metrics = bitmapObject.exportMetrics del bitmapObject.exportMetrics bitDepth = bitmapObject.exportBitDepth del bitmapObject.exportBitDepth writer.begintag( "rowimagedata", bitDepth=bitDepth, width=metrics.width, height=metrics.height ) writer.newline() for curRow in range(metrics.height): rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics) writer.simpletag("row", value=hexStr(rowData)) writer.newline() writer.endtag("rowimagedata") writer.newline() def _readRowImageData(bitmapObject, name, attrs, content, ttFont): bitDepth = safeEval(attrs["bitDepth"]) metrics = SmallGlyphMetrics() metrics.width = safeEval(attrs["width"]) metrics.height = safeEval(attrs["height"]) dataRows = [] for element in content: if not isinstance(element, tuple): continue name, attr, content = element # Chop off 'imagedata' from the tag to get just the option. if name == "row": dataRows.append(deHexStr(attr["value"])) bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics) def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): metrics = bitmapObject.exportMetrics del bitmapObject.exportMetrics bitDepth = bitmapObject.exportBitDepth del bitmapObject.exportBitDepth # A dict for mapping binary to more readable/artistic ASCII characters. binaryConv = {"0": ".", "1": "@"} writer.begintag( "bitwiseimagedata", bitDepth=bitDepth, width=metrics.width, height=metrics.height, ) writer.newline() for curRow in range(metrics.height): rowData = bitmapObject.getRow( curRow, bitDepth=1, metrics=metrics, reverseBytes=True ) rowData = _data2binary(rowData, metrics.width) # Make the output a readable ASCII art form. rowData = strjoin(map(binaryConv.get, rowData)) writer.simpletag("row", value=rowData) writer.newline() writer.endtag("bitwiseimagedata") writer.newline() def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont): bitDepth = safeEval(attrs["bitDepth"]) metrics = SmallGlyphMetrics() metrics.width = safeEval(attrs["width"]) metrics.height = safeEval(attrs["height"]) # A dict for mapping from ASCII to binary. All characters are considered # a '1' except space, period and '0' which maps to '0'. binaryConv = {" ": "0", ".": "0", "0": "0"} dataRows = [] for element in content: if not isinstance(element, tuple): continue name, attr, content = element if name == "row": mapParams = zip(attr["value"], itertools.repeat("1")) rowData = strjoin(itertools.starmap(binaryConv.get, mapParams)) dataRows.append(_binary2data(rowData)) bitmapObject.setRows( dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True ) def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): try: folder = os.path.dirname(writer.file.name) except AttributeError: # fall back to current directory if output file's directory isn't found folder = "." folder = os.path.join(folder, "bitmaps") filename = glyphName + bitmapObject.fileExtension if not os.path.isdir(folder): os.makedirs(folder) folder = os.path.join(folder, "strike%d" % strikeIndex) if not os.path.isdir(folder): os.makedirs(folder) fullPath = os.path.join(folder, filename) writer.simpletag("extfileimagedata", value=fullPath) writer.newline() with open(fullPath, "wb") as file: file.write(bitmapObject.imageData) def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont): fullPath = attrs["value"] with open(fullPath, "rb") as file: bitmapObject.imageData = file.read() # End of XML writing code. # Important information about the naming scheme. Used for identifying formats # in XML. _bitmapGlyphSubclassPrefix = "ebdt_bitmap_format_" class BitmapGlyph(object): # For the external file format. This can be changed in subclasses. This way # when the extfile option is turned on files have the form: glyphName.ext # The default is just a flat binary file with no meaning. fileExtension = ".bin" # Keep track of reading and writing of various forms. xmlDataFunctions = { "raw": (_writeRawImageData, _readRawImageData), "row": (_writeRowImageData, _readRowImageData), "bitwise": (_writeBitwiseImageData, _readBitwiseImageData), "extfile": (_writeExtFileImageData, _readExtFileImageData), } def __init__(self, data, ttFont): self.data = data self.ttFont = ttFont # TODO Currently non-lazy decompilation is untested here... # if not ttFont.lazy: # self.decompile() # del self.data def __getattr__(self, attr): # Allow lazy decompile. if attr[:2] == "__": raise AttributeError(attr) if attr == "data": raise AttributeError(attr) self.decompile() del self.data return getattr(self, attr) def ensureDecompiled(self, recurse=False): if hasattr(self, "data"): self.decompile() del self.data # Not a fan of this but it is needed for safer safety checking. def getFormat(self): return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix) :]) def toXML(self, strikeIndex, glyphName, writer, ttFont): writer.begintag(self.__class__.__name__, [("name", glyphName)]) writer.newline() self.writeMetrics(writer, ttFont) # Use the internal write method to write using the correct output format. self.writeData(strikeIndex, glyphName, writer, ttFont) writer.endtag(self.__class__.__name__) writer.newline() def fromXML(self, name, attrs, content, ttFont): self.readMetrics(name, attrs, content, ttFont) for element in content: if not isinstance(element, tuple): continue name, attr, content = element if not name.endswith("imagedata"): continue # Chop off 'imagedata' from the tag to get just the option. option = name[: -len("imagedata")] assert option in self.__class__.xmlDataFunctions self.readData(name, attr, content, ttFont) # Some of the glyphs have the metrics. This allows for metrics to be # added if the glyph format has them. Default behavior is to do nothing. def writeMetrics(self, writer, ttFont): pass # The opposite of write metrics. def readMetrics(self, name, attrs, content, ttFont): pass def writeData(self, strikeIndex, glyphName, writer, ttFont): try: writeFunc, readFunc = self.__class__.xmlDataFunctions[ ttFont.bitmapGlyphDataFormat ] except KeyError: writeFunc = _writeRawImageData writeFunc(strikeIndex, glyphName, self, writer, ttFont) def readData(self, name, attrs, content, ttFont): # Chop off 'imagedata' from the tag to get just the option. option = name[: -len("imagedata")] writeFunc, readFunc = self.__class__.xmlDataFunctions[option] readFunc(self, name, attrs, content, ttFont) # A closure for creating a mixin for the two types of metrics handling. # Most of the code is very similar so its easier to deal with here. # Everything works just by passing the class that the mixin is for. def _createBitmapPlusMetricsMixin(metricsClass): # Both metrics names are listed here to make meaningful error messages. metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__] curMetricsName = metricsClass.__name__ # Find which metrics this is for and determine the opposite name. metricsId = metricStrings.index(curMetricsName) oppositeMetricsName = metricStrings[1 - metricsId] class BitmapPlusMetricsMixin(object): def writeMetrics(self, writer, ttFont): self.metrics.toXML(writer, ttFont) def readMetrics(self, name, attrs, content, ttFont): for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name == curMetricsName: self.metrics = metricsClass() self.metrics.fromXML(name, attrs, content, ttFont) elif name == oppositeMetricsName: log.warning( "Warning: %s being ignored in format %d.", oppositeMetricsName, self.getFormat(), ) return BitmapPlusMetricsMixin # Since there are only two types of mixin's just create them here. BitmapPlusBigMetricsMixin = _createBitmapPlusMetricsMixin(BigGlyphMetrics) BitmapPlusSmallMetricsMixin = _createBitmapPlusMetricsMixin(SmallGlyphMetrics) # Data that is bit aligned can be tricky to deal with. These classes implement # helper functionality for dealing with the data and getting a particular row # of bitwise data. Also helps implement fancy data export/import in XML. class BitAlignedBitmapMixin(object): def _getBitRange(self, row, bitDepth, metrics): rowBits = bitDepth * metrics.width bitOffset = row * rowBits return (bitOffset, bitOffset + rowBits) def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False): if metrics is None: metrics = self.metrics assert 0 <= row and row < metrics.height, "Illegal row access in bitmap" # Loop through each byte. This can cover two bytes in the original data or # a single byte if things happen to be aligned. The very last entry might # not be aligned so take care to trim the binary data to size and pad with # zeros in the row data. Bit aligned data is somewhat tricky. # # Example of data cut. Data cut represented in x's. # '|' represents byte boundary. # data = ...0XX|XXXXXX00|000... => XXXXXXXX # or # data = ...0XX|XXXX0000|000... => XXXXXX00 # or # data = ...000|XXXXXXXX|000... => XXXXXXXX # or # data = ...000|00XXXX00|000... => XXXX0000 # dataList = [] bitRange = self._getBitRange(row, bitDepth, metrics) stepRange = bitRange + (8,) for curBit in range(*stepRange): endBit = min(curBit + 8, bitRange[1]) numBits = endBit - curBit cutPoint = curBit % 8 firstByteLoc = curBit // 8 secondByteLoc = endBit // 8 if firstByteLoc < secondByteLoc: numBitsCut = 8 - cutPoint else: numBitsCut = endBit - curBit curByte = _reverseBytes(self.imageData[firstByteLoc]) firstHalf = byteord(curByte) >> cutPoint firstHalf = ((1 << numBitsCut) - 1) & firstHalf newByte = firstHalf if firstByteLoc < secondByteLoc and secondByteLoc < len(self.imageData): curByte = _reverseBytes(self.imageData[secondByteLoc]) secondHalf = byteord(curByte) << numBitsCut newByte = (firstHalf | secondHalf) & ((1 << numBits) - 1) dataList.append(bytechr(newByte)) # The way the data is kept is opposite the algorithm used. data = bytesjoin(dataList) if not reverseBytes: data = _reverseBytes(data) return data def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False): if metrics is None: metrics = self.metrics if not reverseBytes: dataRows = list(map(_reverseBytes, dataRows)) # Keep track of a list of ordinal values as they are easier to modify # than a list of strings. Map to actual strings later. numBytes = (self._getBitRange(len(dataRows), bitDepth, metrics)[0] + 7) // 8 ordDataList = [0] * numBytes for row, data in enumerate(dataRows): bitRange = self._getBitRange(row, bitDepth, metrics) stepRange = bitRange + (8,) for curBit, curByte in zip(range(*stepRange), data): endBit = min(curBit + 8, bitRange[1]) cutPoint = curBit % 8 firstByteLoc = curBit // 8 secondByteLoc = endBit // 8 if firstByteLoc < secondByteLoc: numBitsCut = 8 - cutPoint else: numBitsCut = endBit - curBit curByte = byteord(curByte) firstByte = curByte & ((1 << numBitsCut) - 1) ordDataList[firstByteLoc] |= firstByte << cutPoint if firstByteLoc < secondByteLoc and secondByteLoc < numBytes: secondByte = (curByte >> numBitsCut) & ((1 << 8 - numBitsCut) - 1) ordDataList[secondByteLoc] |= secondByte # Save the image data with the bits going the correct way. self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList))) class ByteAlignedBitmapMixin(object): def _getByteRange(self, row, bitDepth, metrics): rowBytes = (bitDepth * metrics.width + 7) // 8 byteOffset = row * rowBytes return (byteOffset, byteOffset + rowBytes) def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False): if metrics is None: metrics = self.metrics assert 0 <= row and row < metrics.height, "Illegal row access in bitmap" byteRange = self._getByteRange(row, bitDepth, metrics) data = self.imageData[slice(*byteRange)] if reverseBytes: data = _reverseBytes(data) return data def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False): if metrics is None: metrics = self.metrics if reverseBytes: dataRows = map(_reverseBytes, dataRows) self.imageData = bytesjoin(dataRows) class ebdt_bitmap_format_1( ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph ): def decompile(self): self.metrics = SmallGlyphMetrics() dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) self.imageData = data def compile(self, ttFont): data = sstruct.pack(smallGlyphMetricsFormat, self.metrics) return data + self.imageData class ebdt_bitmap_format_2( BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph ): def decompile(self): self.metrics = SmallGlyphMetrics() dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) self.imageData = data def compile(self, ttFont): data = sstruct.pack(smallGlyphMetricsFormat, self.metrics) return data + self.imageData class ebdt_bitmap_format_5(BitAlignedBitmapMixin, BitmapGlyph): def decompile(self): self.imageData = self.data def compile(self, ttFont): return self.imageData class ebdt_bitmap_format_6( ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph ): def decompile(self): self.metrics = BigGlyphMetrics() dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) self.imageData = data def compile(self, ttFont): data = sstruct.pack(bigGlyphMetricsFormat, self.metrics) return data + self.imageData class ebdt_bitmap_format_7( BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph ): def decompile(self): self.metrics = BigGlyphMetrics() dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) self.imageData = data def compile(self, ttFont): data = sstruct.pack(bigGlyphMetricsFormat, self.metrics) return data + self.imageData class ComponentBitmapGlyph(BitmapGlyph): def toXML(self, strikeIndex, glyphName, writer, ttFont): writer.begintag(self.__class__.__name__, [("name", glyphName)]) writer.newline() self.writeMetrics(writer, ttFont) writer.begintag("components") writer.newline() for curComponent in self.componentArray: curComponent.toXML(writer, ttFont) writer.endtag("components") writer.newline() writer.endtag(self.__class__.__name__) writer.newline() def fromXML(self, name, attrs, content, ttFont): self.readMetrics(name, attrs, content, ttFont) for element in content: if not isinstance(element, tuple): continue name, attr, content = element if name == "components": self.componentArray = [] for compElement in content: if not isinstance(compElement, tuple): continue name, attrs, content = compElement if name == "ebdtComponent": curComponent = EbdtComponent() curComponent.fromXML(name, attrs, content, ttFont) self.componentArray.append(curComponent) else: log.warning("'%s' being ignored in component array.", name) class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph): def decompile(self): self.metrics = SmallGlyphMetrics() dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) data = data[1:] (numComponents,) = struct.unpack(">H", data[:2]) data = data[2:] self.componentArray = [] for i in range(numComponents): curComponent = EbdtComponent() dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent) curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode) self.componentArray.append(curComponent) def compile(self, ttFont): dataList = [] dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics)) dataList.append(b"\0") dataList.append(struct.pack(">H", len(self.componentArray))) for curComponent in self.componentArray: curComponent.glyphCode = ttFont.getGlyphID(curComponent.name) dataList.append(sstruct.pack(ebdtComponentFormat, curComponent)) return bytesjoin(dataList) class ebdt_bitmap_format_9(BitmapPlusBigMetricsMixin, ComponentBitmapGlyph): def decompile(self): self.metrics = BigGlyphMetrics() dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) (numComponents,) = struct.unpack(">H", data[:2]) data = data[2:] self.componentArray = [] for i in range(numComponents): curComponent = EbdtComponent() dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent) curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode) self.componentArray.append(curComponent) def compile(self, ttFont): dataList = [] dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) dataList.append(struct.pack(">H", len(self.componentArray))) for curComponent in self.componentArray: curComponent.glyphCode = ttFont.getGlyphID(curComponent.name) dataList.append(sstruct.pack(ebdtComponentFormat, curComponent)) return bytesjoin(dataList) # Dictionary of bitmap formats to the class representing that format # currently only the ones listed in this map are the ones supported. ebdt_bitmap_classes = { 1: ebdt_bitmap_format_1, 2: ebdt_bitmap_format_2, 5: ebdt_bitmap_format_5, 6: ebdt_bitmap_format_6, 7: ebdt_bitmap_format_7, 8: ebdt_bitmap_format_8, 9: ebdt_bitmap_format_9, } PKaZZZ�M�sAtAt"fontTools/ttLib/tables/E_B_L_C_.pyfrom fontTools.misc import sstruct from . import DefaultTable from fontTools.misc.textTools import bytesjoin, safeEval from .BitmapGlyphMetrics import ( BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat, ) import struct import itertools from collections import deque import logging log = logging.getLogger(__name__) eblcHeaderFormat = """ > # big endian version: 16.16F numSizes: I """ # The table format string is split to handle sbitLineMetrics simply. bitmapSizeTableFormatPart1 = """ > # big endian indexSubTableArrayOffset: I indexTablesSize: I numberOfIndexSubTables: I colorRef: I """ # The compound type for hori and vert. sbitLineMetricsFormat = """ > # big endian ascender: b descender: b widthMax: B caretSlopeNumerator: b caretSlopeDenominator: b caretOffset: b minOriginSB: b minAdvanceSB: b maxBeforeBL: b minAfterBL: b pad1: b pad2: b """ # hori and vert go between the two parts. bitmapSizeTableFormatPart2 = """ > # big endian startGlyphIndex: H endGlyphIndex: H ppemX: B ppemY: B bitDepth: B flags: b """ indexSubTableArrayFormat = ">HHL" indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat) indexSubHeaderFormat = ">HHL" indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat) codeOffsetPairFormat = ">HH" codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat) class table_E_B_L_C_(DefaultTable.DefaultTable): dependencies = ["EBDT"] # This method can be overridden in subclasses to support new formats # without changing the other implementation. Also can be used as a # convenience method for coverting a font file to an alternative format. def getIndexFormatClass(self, indexFormat): return eblc_sub_table_classes[indexFormat] def decompile(self, data, ttFont): # Save the original data because offsets are from the start of the table. origData = data i = 0 dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self) i += 8 self.strikes = [] for curStrikeIndex in range(self.numSizes): curStrike = Strike() self.strikes.append(curStrike) curTable = curStrike.bitmapSizeTable dummy = sstruct.unpack2( bitmapSizeTableFormatPart1, data[i : i + 16], curTable ) i += 16 for metric in ("hori", "vert"): metricObj = SbitLineMetrics() vars(curTable)[metric] = metricObj dummy = sstruct.unpack2( sbitLineMetricsFormat, data[i : i + 12], metricObj ) i += 12 dummy = sstruct.unpack( bitmapSizeTableFormatPart2, data[i : i + 8], curTable ) i += 8 for curStrike in self.strikes: curTable = curStrike.bitmapSizeTable for subtableIndex in range(curTable.numberOfIndexSubTables): i = ( curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize ) tup = struct.unpack( indexSubTableArrayFormat, data[i : i + indexSubTableArraySize] ) (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable tup = struct.unpack( indexSubHeaderFormat, data[i : i + indexSubHeaderSize] ) (indexFormat, imageFormat, imageDataOffset) = tup indexFormatClass = self.getIndexFormatClass(indexFormat) indexSubTable = indexFormatClass(data[i + indexSubHeaderSize :], ttFont) indexSubTable.firstGlyphIndex = firstGlyphIndex indexSubTable.lastGlyphIndex = lastGlyphIndex indexSubTable.additionalOffsetToIndexSubtable = ( additionalOffsetToIndexSubtable ) indexSubTable.indexFormat = indexFormat indexSubTable.imageFormat = imageFormat indexSubTable.imageDataOffset = imageDataOffset indexSubTable.decompile() # https://github.com/fonttools/fonttools/issues/317 curStrike.indexSubTables.append(indexSubTable) def compile(self, ttFont): dataList = [] self.numSizes = len(self.strikes) dataList.append(sstruct.pack(eblcHeaderFormat, self)) # Data size of the header + bitmapSizeTable needs to be calculated # in order to form offsets. This value will hold the size of the data # in dataList after all the data is consolidated in dataList. dataSize = len(dataList[0]) # The table will be structured in the following order: # (0) header # (1) Each bitmapSizeTable [1 ... self.numSizes] # (2) Alternate between indexSubTableArray and indexSubTable # for each bitmapSizeTable present. # # The issue is maintaining the proper offsets when table information # gets moved around. All offsets and size information must be recalculated # when building the table to allow editing within ttLib and also allow easy # import/export to and from XML. All of this offset information is lost # when exporting to XML so everything must be calculated fresh so importing # from XML will work cleanly. Only byte offset and size information is # calculated fresh. Count information like numberOfIndexSubTables is # checked through assertions. If the information in this table was not # touched or was changed properly then these types of values should match. # # The table will be rebuilt the following way: # (0) Precompute the size of all the bitmapSizeTables. This is needed to # compute the offsets properly. # (1) For each bitmapSizeTable compute the indexSubTable and # indexSubTableArray pair. The indexSubTable must be computed first # so that the offset information in indexSubTableArray can be # calculated. Update the data size after each pairing. # (2) Build each bitmapSizeTable. # (3) Consolidate all the data into the main dataList in the correct order. for _ in self.strikes: dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1) dataSize += len(("hori", "vert")) * sstruct.calcsize(sbitLineMetricsFormat) dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2) indexSubTablePairDataList = [] for curStrike in self.strikes: curTable = curStrike.bitmapSizeTable curTable.numberOfIndexSubTables = len(curStrike.indexSubTables) curTable.indexSubTableArrayOffset = dataSize # Precompute the size of the indexSubTableArray. This information # is important for correctly calculating the new value for # additionalOffsetToIndexSubtable. sizeOfSubTableArray = ( curTable.numberOfIndexSubTables * indexSubTableArraySize ) lowerBound = dataSize dataSize += sizeOfSubTableArray upperBound = dataSize indexSubTableDataList = [] for indexSubTable in curStrike.indexSubTables: indexSubTable.additionalOffsetToIndexSubtable = ( dataSize - curTable.indexSubTableArrayOffset ) glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names)) indexSubTable.firstGlyphIndex = min(glyphIds) indexSubTable.lastGlyphIndex = max(glyphIds) data = indexSubTable.compile(ttFont) indexSubTableDataList.append(data) dataSize += len(data) curTable.startGlyphIndex = min( ist.firstGlyphIndex for ist in curStrike.indexSubTables ) curTable.endGlyphIndex = max( ist.lastGlyphIndex for ist in curStrike.indexSubTables ) for i in curStrike.indexSubTables: data = struct.pack( indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable, ) indexSubTablePairDataList.append(data) indexSubTablePairDataList.extend(indexSubTableDataList) curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset for curStrike in self.strikes: curTable = curStrike.bitmapSizeTable data = sstruct.pack(bitmapSizeTableFormatPart1, curTable) dataList.append(data) for metric in ("hori", "vert"): metricObj = vars(curTable)[metric] data = sstruct.pack(sbitLineMetricsFormat, metricObj) dataList.append(data) data = sstruct.pack(bitmapSizeTableFormatPart2, curTable) dataList.append(data) dataList.extend(indexSubTablePairDataList) return bytesjoin(dataList) def toXML(self, writer, ttFont): writer.simpletag("header", [("version", self.version)]) writer.newline() for curIndex, curStrike in enumerate(self.strikes): curStrike.toXML(curIndex, writer, ttFont) def fromXML(self, name, attrs, content, ttFont): if name == "header": self.version = safeEval(attrs["version"]) elif name == "strike": if not hasattr(self, "strikes"): self.strikes = [] strikeIndex = safeEval(attrs["index"]) curStrike = Strike() curStrike.fromXML(name, attrs, content, ttFont, self) # Grow the strike array to the appropriate size. The XML format # allows for the strike index value to be out of order. if strikeIndex >= len(self.strikes): self.strikes += [None] * (strikeIndex + 1 - len(self.strikes)) assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices." self.strikes[strikeIndex] = curStrike class Strike(object): def __init__(self): self.bitmapSizeTable = BitmapSizeTable() self.indexSubTables = [] def toXML(self, strikeIndex, writer, ttFont): writer.begintag("strike", [("index", strikeIndex)]) writer.newline() self.bitmapSizeTable.toXML(writer, ttFont) writer.comment( "GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler." ) writer.newline() for indexSubTable in self.indexSubTables: indexSubTable.toXML(writer, ttFont) writer.endtag("strike") writer.newline() def fromXML(self, name, attrs, content, ttFont, locator): for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name == "bitmapSizeTable": self.bitmapSizeTable.fromXML(name, attrs, content, ttFont) elif name.startswith(_indexSubTableSubclassPrefix): indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix) :]) indexFormatClass = locator.getIndexFormatClass(indexFormat) indexSubTable = indexFormatClass(None, None) indexSubTable.indexFormat = indexFormat indexSubTable.fromXML(name, attrs, content, ttFont) self.indexSubTables.append(indexSubTable) class BitmapSizeTable(object): # Returns all the simple metric names that bitmap size table # cares about in terms of XML creation. def _getXMLMetricNames(self): dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1] dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1] # Skip the first 3 data names because they are byte offsets and counts. return dataNames[3:] def toXML(self, writer, ttFont): writer.begintag("bitmapSizeTable") writer.newline() for metric in ("hori", "vert"): getattr(self, metric).toXML(metric, writer, ttFont) for metricName in self._getXMLMetricNames(): writer.simpletag(metricName, value=getattr(self, metricName)) writer.newline() writer.endtag("bitmapSizeTable") writer.newline() def fromXML(self, name, attrs, content, ttFont): # Create a lookup for all the simple names that make sense to # bitmap size table. Only read the information from these names. dataNames = set(self._getXMLMetricNames()) for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name == "sbitLineMetrics": direction = attrs["direction"] assert direction in ( "hori", "vert", ), "SbitLineMetrics direction specified invalid." metricObj = SbitLineMetrics() metricObj.fromXML(name, attrs, content, ttFont) vars(self)[direction] = metricObj elif name in dataNames: vars(self)[name] = safeEval(attrs["value"]) else: log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name) class SbitLineMetrics(object): def toXML(self, name, writer, ttFont): writer.begintag("sbitLineMetrics", [("direction", name)]) writer.newline() for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]: writer.simpletag(metricName, value=getattr(self, metricName)) writer.newline() writer.endtag("sbitLineMetrics") writer.newline() def fromXML(self, name, attrs, content, ttFont): metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1]) for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name in metricNames: vars(self)[name] = safeEval(attrs["value"]) # Important information about the naming scheme. Used for identifying subtables. _indexSubTableSubclassPrefix = "eblc_index_sub_table_" class EblcIndexSubTable(object): def __init__(self, data, ttFont): self.data = data self.ttFont = ttFont # TODO Currently non-lazy decompiling doesn't work for this class... # if not ttFont.lazy: # self.decompile() # del self.data, self.ttFont def __getattr__(self, attr): # Allow lazy decompile. if attr[:2] == "__": raise AttributeError(attr) if attr == "data": raise AttributeError(attr) self.decompile() return getattr(self, attr) def ensureDecompiled(self, recurse=False): if hasattr(self, "data"): self.decompile() # This method just takes care of the indexSubHeader. Implementing subclasses # should call it to compile the indexSubHeader and then continue compiling # the remainder of their unique format. def compile(self, ttFont): return struct.pack( indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset, ) # Creates the XML for bitmap glyphs. Each index sub table basically makes # the same XML except for specific metric information that is written # out via a method call that a subclass implements optionally. def toXML(self, writer, ttFont): writer.begintag( self.__class__.__name__, [ ("imageFormat", self.imageFormat), ("firstGlyphIndex", self.firstGlyphIndex), ("lastGlyphIndex", self.lastGlyphIndex), ], ) writer.newline() self.writeMetrics(writer, ttFont) # Write out the names as thats all thats needed to rebuild etc. # For font debugging of consecutive formats the ids are also written. # The ids are not read when moving from the XML format. glyphIds = map(ttFont.getGlyphID, self.names) for glyphName, glyphId in zip(self.names, glyphIds): writer.simpletag("glyphLoc", name=glyphName, id=glyphId) writer.newline() writer.endtag(self.__class__.__name__) writer.newline() def fromXML(self, name, attrs, content, ttFont): # Read all the attributes. Even though the glyph indices are # recalculated, they are still read in case there needs to # be an immediate export of the data. self.imageFormat = safeEval(attrs["imageFormat"]) self.firstGlyphIndex = safeEval(attrs["firstGlyphIndex"]) self.lastGlyphIndex = safeEval(attrs["lastGlyphIndex"]) self.readMetrics(name, attrs, content, ttFont) self.names = [] for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name == "glyphLoc": self.names.append(attrs["name"]) # A helper method that writes the metrics for the index sub table. It also # is responsible for writing the image size for fixed size data since fixed # size is not recalculated on compile. Default behavior is to do nothing. def writeMetrics(self, writer, ttFont): pass # A helper method that is the inverse of writeMetrics. def readMetrics(self, name, attrs, content, ttFont): pass # This method is for fixed glyph data sizes. There are formats where # the glyph data is fixed but are actually composite glyphs. To handle # this the font spec in indexSubTable makes the data the size of the # fixed size by padding the component arrays. This function abstracts # out this padding process. Input is data unpadded. Output is data # padded only in fixed formats. Default behavior is to return the data. def padBitmapData(self, data): return data # Remove any of the glyph locations and names that are flagged as skipped. # This only occurs in formats {1,3}. def removeSkipGlyphs(self): # Determines if a name, location pair is a valid data location. # Skip glyphs are marked when the size is equal to zero. def isValidLocation(args): (name, (startByte, endByte)) = args return startByte < endByte # Remove all skip glyphs. dataPairs = list(filter(isValidLocation, zip(self.names, self.locations))) self.names, self.locations = list(map(list, zip(*dataPairs))) # A closure for creating a custom mixin. This is done because formats 1 and 3 # are very similar. The only difference between them is the size per offset # value. Code put in here should handle both cases generally. def _createOffsetArrayIndexSubTableMixin(formatStringForDataType): # Prep the data size for the offset array data format. dataFormat = ">" + formatStringForDataType offsetDataSize = struct.calcsize(dataFormat) class OffsetArrayIndexSubTableMixin(object): def decompile(self): numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1 indexingOffsets = [ glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs + 2) ] indexingLocations = zip(indexingOffsets, indexingOffsets[1:]) offsetArray = [ struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations ] glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1)) modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray] self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:])) self.names = list(map(self.ttFont.getGlyphName, glyphIds)) self.removeSkipGlyphs() del self.data, self.ttFont def compile(self, ttFont): # First make sure that all the data lines up properly. Formats 1 and 3 # must have all its data lined up consecutively. If not this will fail. for curLoc, nxtLoc in zip(self.locations, self.locations[1:]): assert ( curLoc[1] == nxtLoc[0] ), "Data must be consecutive in indexSubTable offset formats" glyphIds = list(map(ttFont.getGlyphID, self.names)) # Make sure that all ids are sorted strictly increasing. assert all(glyphIds[i] < glyphIds[i + 1] for i in range(len(glyphIds) - 1)) # Run a simple algorithm to add skip glyphs to the data locations at # the places where an id is not present. idQueue = deque(glyphIds) locQueue = deque(self.locations) allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1)) allLocations = [] for curId in allGlyphIds: if curId != idQueue[0]: allLocations.append((locQueue[0][0], locQueue[0][0])) else: idQueue.popleft() allLocations.append(locQueue.popleft()) # Now that all the locations are collected, pack them appropriately into # offsets. This is the form where offset[i] is the location and # offset[i+1]-offset[i] is the size of the data location. offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]] # Image data offset must be less than or equal to the minimum of locations. # This offset may change the value for round tripping but is safer and # allows imageDataOffset to not be required to be in the XML version. self.imageDataOffset = min(offsets) offsetArray = [offset - self.imageDataOffset for offset in offsets] dataList = [EblcIndexSubTable.compile(self, ttFont)] dataList += [ struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray ] # Take care of any padding issues. Only occurs in format 3. if offsetDataSize * len(offsetArray) % 4 != 0: dataList.append(struct.pack(dataFormat, 0)) return bytesjoin(dataList) return OffsetArrayIndexSubTableMixin # A Mixin for functionality shared between the different kinds # of fixed sized data handling. Both kinds have big metrics so # that kind of special processing is also handled in this mixin. class FixedSizeIndexSubTableMixin(object): def writeMetrics(self, writer, ttFont): writer.simpletag("imageSize", value=self.imageSize) writer.newline() self.metrics.toXML(writer, ttFont) def readMetrics(self, name, attrs, content, ttFont): for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name == "imageSize": self.imageSize = safeEval(attrs["value"]) elif name == BigGlyphMetrics.__name__: self.metrics = BigGlyphMetrics() self.metrics.fromXML(name, attrs, content, ttFont) elif name == SmallGlyphMetrics.__name__: log.warning( "SmallGlyphMetrics being ignored in format %d.", self.indexFormat ) def padBitmapData(self, data): # Make sure that the data isn't bigger than the fixed size. assert len(data) <= self.imageSize, ( "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat ) # Pad the data so that it matches the fixed size. pad = (self.imageSize - len(data)) * b"\0" return data + pad class eblc_index_sub_table_1( _createOffsetArrayIndexSubTableMixin("L"), EblcIndexSubTable ): pass class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable): def decompile(self): (self.imageSize,) = struct.unpack(">L", self.data[:4]) self.metrics = BigGlyphMetrics() sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics) glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1)) offsets = [ self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1) ] self.locations = list(zip(offsets, offsets[1:])) self.names = list(map(self.ttFont.getGlyphName, glyphIds)) del self.data, self.ttFont def compile(self, ttFont): glyphIds = list(map(ttFont.getGlyphID, self.names)) # Make sure all the ids are consecutive. This is required by Format 2. assert glyphIds == list( range(self.firstGlyphIndex, self.lastGlyphIndex + 1) ), "Format 2 ids must be consecutive." self.imageDataOffset = min(next(iter(zip(*self.locations)))) dataList = [EblcIndexSubTable.compile(self, ttFont)] dataList.append(struct.pack(">L", self.imageSize)) dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) return bytesjoin(dataList) class eblc_index_sub_table_3( _createOffsetArrayIndexSubTableMixin("H"), EblcIndexSubTable ): pass class eblc_index_sub_table_4(EblcIndexSubTable): def decompile(self): (numGlyphs,) = struct.unpack(">L", self.data[:4]) data = self.data[4:] indexingOffsets = [ glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs + 2) ] indexingLocations = zip(indexingOffsets, indexingOffsets[1:]) glyphArray = [ struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations ] glyphIds, offsets = list(map(list, zip(*glyphArray))) # There are one too many glyph ids. Get rid of the last one. glyphIds.pop() offsets = [offset + self.imageDataOffset for offset in offsets] self.locations = list(zip(offsets, offsets[1:])) self.names = list(map(self.ttFont.getGlyphName, glyphIds)) del self.data, self.ttFont def compile(self, ttFont): # First make sure that all the data lines up properly. Format 4 # must have all its data lined up consecutively. If not this will fail. for curLoc, nxtLoc in zip(self.locations, self.locations[1:]): assert ( curLoc[1] == nxtLoc[0] ), "Data must be consecutive in indexSubTable format 4" offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]] # Image data offset must be less than or equal to the minimum of locations. # Resetting this offset may change the value for round tripping but is safer # and allows imageDataOffset to not be required to be in the XML version. self.imageDataOffset = min(offsets) offsets = [offset - self.imageDataOffset for offset in offsets] glyphIds = list(map(ttFont.getGlyphID, self.names)) # Create an iterator over the ids plus a padding value. idsPlusPad = list(itertools.chain(glyphIds, [0])) dataList = [EblcIndexSubTable.compile(self, ttFont)] dataList.append(struct.pack(">L", len(glyphIds))) tmp = [ struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets) ] dataList += tmp data = bytesjoin(dataList) return data class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable): def decompile(self): self.origDataLen = 0 (self.imageSize,) = struct.unpack(">L", self.data[:4]) data = self.data[4:] self.metrics, data = sstruct.unpack2( bigGlyphMetricsFormat, data, BigGlyphMetrics() ) (numGlyphs,) = struct.unpack(">L", data[:4]) data = data[4:] glyphIds = [ struct.unpack(">H", data[2 * i : 2 * (i + 1)])[0] for i in range(numGlyphs) ] offsets = [ self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1) ] self.locations = list(zip(offsets, offsets[1:])) self.names = list(map(self.ttFont.getGlyphName, glyphIds)) del self.data, self.ttFont def compile(self, ttFont): self.imageDataOffset = min(next(iter(zip(*self.locations)))) dataList = [EblcIndexSubTable.compile(self, ttFont)] dataList.append(struct.pack(">L", self.imageSize)) dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) glyphIds = list(map(ttFont.getGlyphID, self.names)) dataList.append(struct.pack(">L", len(glyphIds))) dataList += [struct.pack(">H", curId) for curId in glyphIds] if len(glyphIds) % 2 == 1: dataList.append(struct.pack(">H", 0)) return bytesjoin(dataList) # Dictionary of indexFormat to the class representing that format. eblc_sub_table_classes = { 1: eblc_index_sub_table_1, 2: eblc_index_sub_table_2, 3: eblc_index_sub_table_3, 4: eblc_index_sub_table_4, 5: eblc_index_sub_table_5, } PKaZZZ�p�mJJ"fontTools/ttLib/tables/F_F_T_M_.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import safeEval from fontTools.misc.timeTools import timestampFromString, timestampToString from . import DefaultTable FFTMFormat = """ > # big endian version: I FFTimeStamp: Q sourceCreated: Q sourceModified: Q """ class table_F_F_T_M_(DefaultTable.DefaultTable): def decompile(self, data, ttFont): dummy, rest = sstruct.unpack2(FFTMFormat, data, self) def compile(self, ttFont): data = sstruct.pack(FFTMFormat, self) return data def toXML(self, writer, ttFont): writer.comment( "FontForge's timestamp, font source creation and modification dates" ) writer.newline() formatstring, names, fixes = sstruct.getformat(FFTMFormat) for name in names: value = getattr(self, name) if name in ("FFTimeStamp", "sourceCreated", "sourceModified"): value = timestampToString(value) writer.simpletag(name, value=value) writer.newline() def fromXML(self, name, attrs, content, ttFont): value = attrs["value"] if name in ("FFTimeStamp", "sourceCreated", "sourceModified"): value = timestampFromString(value) else: value = safeEval(value) setattr(self, name, value) PKaZZZ� "fontTools/ttLib/tables/F__e_a_t.pyfrom fontTools.misc import sstruct from fontTools.misc.fixedTools import floatToFixedToStr from fontTools.misc.textTools import safeEval from . import DefaultTable from . import grUtils import struct Feat_hdr_format = """ > version: 16.16F """ class table_F__e_a_t(DefaultTable.DefaultTable): """The ``Feat`` table is used exclusively by the Graphite shaping engine to store features and possible settings specified in GDL. Graphite features determine what rules are applied to transform a glyph stream. Not to be confused with ``feat``, or the OpenType Layout tables ``GSUB``/``GPOS``.""" def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.features = {} def decompile(self, data, ttFont): (_, data) = sstruct.unpack2(Feat_hdr_format, data, self) self.version = float(floatToFixedToStr(self.version, precisionBits=16)) (numFeats,) = struct.unpack(">H", data[:2]) data = data[8:] allfeats = [] maxsetting = 0 for i in range(numFeats): if self.version >= 2.0: (fid, nums, _, offset, flags, lid) = struct.unpack( ">LHHLHH", data[16 * i : 16 * (i + 1)] ) offset = int((offset - 12 - 16 * numFeats) / 4) else: (fid, nums, offset, flags, lid) = struct.unpack( ">HHLHH", data[12 * i : 12 * (i + 1)] ) offset = int((offset - 12 - 12 * numFeats) / 4) allfeats.append((fid, nums, offset, flags, lid)) maxsetting = max(maxsetting, offset + nums) data = data[16 * numFeats :] allsettings = [] for i in range(maxsetting): if len(data) >= 4 * (i + 1): (val, lid) = struct.unpack(">HH", data[4 * i : 4 * (i + 1)]) allsettings.append((val, lid)) for i, f in enumerate(allfeats): (fid, nums, offset, flags, lid) = f fobj = Feature() fobj.flags = flags fobj.label = lid self.features[grUtils.num2tag(fid)] = fobj fobj.settings = {} fobj.default = None fobj.index = i for i in range(offset, offset + nums): if i >= len(allsettings): continue (vid, vlid) = allsettings[i] fobj.settings[vid] = vlid if fobj.default is None: fobj.default = vid def compile(self, ttFont): fdat = b"" vdat = b"" offset = 0 for f, v in sorted(self.features.items(), key=lambda x: x[1].index): fnum = grUtils.tag2num(f) if self.version >= 2.0: fdat += struct.pack( ">LHHLHH", grUtils.tag2num(f), len(v.settings), 0, offset * 4 + 12 + 16 * len(self.features), v.flags, v.label, ) elif fnum > 65535: # self healing for alphabetic ids self.version = 2.0 return self.compile(ttFont) else: fdat += struct.pack( ">HHLHH", grUtils.tag2num(f), len(v.settings), offset * 4 + 12 + 12 * len(self.features), v.flags, v.label, ) for s, l in sorted( v.settings.items(), key=lambda x: (-1, x[1]) if x[0] == v.default else x ): vdat += struct.pack(">HH", s, l) offset += len(v.settings) hdr = sstruct.pack(Feat_hdr_format, self) return hdr + struct.pack(">HHL", len(self.features), 0, 0) + fdat + vdat def toXML(self, writer, ttFont): writer.simpletag("version", version=self.version) writer.newline() for f, v in sorted(self.features.items(), key=lambda x: x[1].index): writer.begintag( "feature", fid=f, label=v.label, flags=v.flags, default=(v.default if v.default else 0), ) writer.newline() for s, l in sorted(v.settings.items()): writer.simpletag("setting", value=s, label=l) writer.newline() writer.endtag("feature") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "version": self.version = float(safeEval(attrs["version"])) elif name == "feature": fid = attrs["fid"] fobj = Feature() fobj.flags = int(safeEval(attrs["flags"])) fobj.label = int(safeEval(attrs["label"])) fobj.default = int(safeEval(attrs.get("default", "0"))) fobj.index = len(self.features) self.features[fid] = fobj fobj.settings = {} for element in content: if not isinstance(element, tuple): continue tag, a, c = element if tag == "setting": fobj.settings[int(safeEval(a["value"]))] = int(safeEval(a["label"])) class Feature(object): pass PKaZZZ!���XX"fontTools/ttLib/tables/G_D_E_F_.pyfrom .otBase import BaseTTXConverter class table_G_D_E_F_(BaseTTXConverter): pass PKaZZZ����"fontTools/ttLib/tables/G_M_A_P_.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import tobytes, tostr, safeEval from . import DefaultTable GMAPFormat = """ > # big endian tableVersionMajor: H tableVersionMinor: H flags: H recordsCount: H recordsOffset: H fontNameLength: H """ # psFontName is a byte string which follows the record above. This is zero padded # to the beginning of the records array. The recordsOffsst is 32 bit aligned. GMAPRecordFormat1 = """ > # big endian UV: L cid: H gid: H ggid: H name: 32s """ class GMAPRecord(object): def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""): self.UV = uv self.cid = cid self.gid = gid self.ggid = ggid self.name = name def toXML(self, writer, ttFont): writer.begintag("GMAPRecord") writer.newline() writer.simpletag("UV", value=self.UV) writer.newline() writer.simpletag("cid", value=self.cid) writer.newline() writer.simpletag("gid", value=self.gid) writer.newline() writer.simpletag("glyphletGid", value=self.gid) writer.newline() writer.simpletag("GlyphletName", value=self.name) writer.newline() writer.endtag("GMAPRecord") writer.newline() def fromXML(self, name, attrs, content, ttFont): value = attrs["value"] if name == "GlyphletName": self.name = value else: setattr(self, name, safeEval(value)) def compile(self, ttFont): if self.UV is None: self.UV = 0 nameLen = len(self.name) if nameLen < 32: self.name = self.name + "\0" * (32 - nameLen) data = sstruct.pack(GMAPRecordFormat1, self) return data def __repr__(self): return ( "GMAPRecord[ UV: " + str(self.UV) + ", cid: " + str(self.cid) + ", gid: " + str(self.gid) + ", ggid: " + str(self.ggid) + ", Glyphlet Name: " + str(self.name) + " ]" ) class table_G_M_A_P_(DefaultTable.DefaultTable): dependencies = [] def decompile(self, data, ttFont): dummy, newData = sstruct.unpack2(GMAPFormat, data, self) self.psFontName = tostr(newData[: self.fontNameLength]) assert ( self.recordsOffset % 4 ) == 0, "GMAP error: recordsOffset is not 32 bit aligned." newData = data[self.recordsOffset :] self.gmapRecords = [] for i in range(self.recordsCount): gmapRecord, newData = sstruct.unpack2( GMAPRecordFormat1, newData, GMAPRecord() ) gmapRecord.name = gmapRecord.name.strip("\0") self.gmapRecords.append(gmapRecord) def compile(self, ttFont): self.recordsCount = len(self.gmapRecords) self.fontNameLength = len(self.psFontName) self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4) data = sstruct.pack(GMAPFormat, self) data = data + tobytes(self.psFontName) data = data + b"\0" * (self.recordsOffset - len(data)) for record in self.gmapRecords: data = data + record.compile(ttFont) return data def toXML(self, writer, ttFont): writer.comment("Most of this table will be recalculated by the compiler") writer.newline() formatstring, names, fixes = sstruct.getformat(GMAPFormat) for name in names: value = getattr(self, name) writer.simpletag(name, value=value) writer.newline() writer.simpletag("PSFontName", value=self.psFontName) writer.newline() for gmapRecord in self.gmapRecords: gmapRecord.toXML(writer, ttFont) def fromXML(self, name, attrs, content, ttFont): if name == "GMAPRecord": if not hasattr(self, "gmapRecords"): self.gmapRecords = [] gmapRecord = GMAPRecord() self.gmapRecords.append(gmapRecord) for element in content: if isinstance(element, str): continue name, attrs, content = element gmapRecord.fromXML(name, attrs, content, ttFont) else: value = attrs["value"] if name == "PSFontName": self.psFontName = value else: setattr(self, name, safeEval(value)) PKaZZZ��YY"fontTools/ttLib/tables/G_P_K_G_.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import bytesjoin, safeEval, readHex from . import DefaultTable import sys import array GPKGFormat = """ > # big endian version: H flags: H numGMAPs: H numGlyplets: H """ # psFontName is a byte string which follows the record above. This is zero padded # to the beginning of the records array. The recordsOffsst is 32 bit aligned. class table_G_P_K_G_(DefaultTable.DefaultTable): def decompile(self, data, ttFont): dummy, newData = sstruct.unpack2(GPKGFormat, data, self) GMAPoffsets = array.array("I") endPos = (self.numGMAPs + 1) * 4 GMAPoffsets.frombytes(newData[:endPos]) if sys.byteorder != "big": GMAPoffsets.byteswap() self.GMAPs = [] for i in range(self.numGMAPs): start = GMAPoffsets[i] end = GMAPoffsets[i + 1] self.GMAPs.append(data[start:end]) pos = endPos endPos = pos + (self.numGlyplets + 1) * 4 glyphletOffsets = array.array("I") glyphletOffsets.frombytes(newData[pos:endPos]) if sys.byteorder != "big": glyphletOffsets.byteswap() self.glyphlets = [] for i in range(self.numGlyplets): start = glyphletOffsets[i] end = glyphletOffsets[i + 1] self.glyphlets.append(data[start:end]) def compile(self, ttFont): self.numGMAPs = len(self.GMAPs) self.numGlyplets = len(self.glyphlets) GMAPoffsets = [0] * (self.numGMAPs + 1) glyphletOffsets = [0] * (self.numGlyplets + 1) dataList = [sstruct.pack(GPKGFormat, self)] pos = len(dataList[0]) + (self.numGMAPs + 1) * 4 + (self.numGlyplets + 1) * 4 GMAPoffsets[0] = pos for i in range(1, self.numGMAPs + 1): pos += len(self.GMAPs[i - 1]) GMAPoffsets[i] = pos gmapArray = array.array("I", GMAPoffsets) if sys.byteorder != "big": gmapArray.byteswap() dataList.append(gmapArray.tobytes()) glyphletOffsets[0] = pos for i in range(1, self.numGlyplets + 1): pos += len(self.glyphlets[i - 1]) glyphletOffsets[i] = pos glyphletArray = array.array("I", glyphletOffsets) if sys.byteorder != "big": glyphletArray.byteswap() dataList.append(glyphletArray.tobytes()) dataList += self.GMAPs dataList += self.glyphlets data = bytesjoin(dataList) return data def toXML(self, writer, ttFont): writer.comment("Most of this table will be recalculated by the compiler") writer.newline() formatstring, names, fixes = sstruct.getformat(GPKGFormat) for name in names: value = getattr(self, name) writer.simpletag(name, value=value) writer.newline() writer.begintag("GMAPs") writer.newline() for gmapData in self.GMAPs: writer.begintag("hexdata") writer.newline() writer.dumphex(gmapData) writer.endtag("hexdata") writer.newline() writer.endtag("GMAPs") writer.newline() writer.begintag("glyphlets") writer.newline() for glyphletData in self.glyphlets: writer.begintag("hexdata") writer.newline() writer.dumphex(glyphletData) writer.endtag("hexdata") writer.newline() writer.endtag("glyphlets") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "GMAPs": if not hasattr(self, "GMAPs"): self.GMAPs = [] for element in content: if isinstance(element, str): continue itemName, itemAttrs, itemContent = element if itemName == "hexdata": self.GMAPs.append(readHex(itemContent)) elif name == "glyphlets": if not hasattr(self, "glyphlets"): self.glyphlets = [] for element in content: if isinstance(element, str): continue itemName, itemAttrs, itemContent = element if itemName == "hexdata": self.glyphlets.append(readHex(itemContent)) else: setattr(self, name, safeEval(attrs["value"])) PKaZZZr���XX"fontTools/ttLib/tables/G_P_O_S_.pyfrom .otBase import BaseTTXConverter class table_G_P_O_S_(BaseTTXConverter): pass PKaZZZ'ahXX"fontTools/ttLib/tables/G_S_U_B_.pyfrom .otBase import BaseTTXConverter class table_G_S_U_B_(BaseTTXConverter): pass PKaZZZp��x!x!"fontTools/ttLib/tables/G__l_a_t.pyfrom fontTools.misc import sstruct from fontTools.misc.fixedTools import floatToFixedToStr from fontTools.misc.textTools import safeEval # from itertools import * from functools import partial from . import DefaultTable from . import grUtils import struct Glat_format_0 = """ > # big endian version: 16.16F """ Glat_format_3 = """ > version: 16.16F compression:L # compression scheme or reserved """ Glat_format_1_entry = """ > attNum: B # Attribute number of first attribute num: B # Number of attributes in this run """ Glat_format_23_entry = """ > attNum: H # Attribute number of first attribute num: H # Number of attributes in this run """ Glat_format_3_octabox_metrics = """ > subboxBitmap: H # Which subboxes exist on 4x4 grid diagNegMin: B # Defines minimum negatively-sloped diagonal (si) diagNegMax: B # Defines maximum negatively-sloped diagonal (sa) diagPosMin: B # Defines minimum positively-sloped diagonal (di) diagPosMax: B # Defines maximum positively-sloped diagonal (da) """ Glat_format_3_subbox_entry = """ > left: B # xi right: B # xa bottom: B # yi top: B # ya diagNegMin: B # Defines minimum negatively-sloped diagonal (si) diagNegMax: B # Defines maximum negatively-sloped diagonal (sa) diagPosMin: B # Defines minimum positively-sloped diagonal (di) diagPosMax: B # Defines maximum positively-sloped diagonal (da) """ class _Object: pass class _Dict(dict): pass class table_G__l_a_t(DefaultTable.DefaultTable): """ Support Graphite Glat tables """ def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.scheme = 0 def decompile(self, data, ttFont): sstruct.unpack2(Glat_format_0, data, self) self.version = float(floatToFixedToStr(self.version, precisionBits=16)) if self.version <= 1.9: decoder = partial(self.decompileAttributes12, fmt=Glat_format_1_entry) elif self.version <= 2.9: decoder = partial(self.decompileAttributes12, fmt=Glat_format_23_entry) elif self.version >= 3.0: (data, self.scheme) = grUtils.decompress(data) sstruct.unpack2(Glat_format_3, data, self) self.hasOctaboxes = (self.compression & 1) == 1 decoder = self.decompileAttributes3 gloc = ttFont["Gloc"] self.attributes = {} count = 0 for s, e in zip(gloc, gloc[1:]): self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e]) count += 1 def decompileAttributes12(self, data, fmt): attributes = _Dict() while len(data) > 3: e, data = sstruct.unpack2(fmt, data, _Object()) keys = range(e.attNum, e.attNum + e.num) if len(data) >= 2 * e.num: vals = struct.unpack_from((">%dh" % e.num), data) attributes.update(zip(keys, vals)) data = data[2 * e.num :] return attributes def decompileAttributes3(self, data): if self.hasOctaboxes: o, data = sstruct.unpack2(Glat_format_3_octabox_metrics, data, _Object()) numsub = bin(o.subboxBitmap).count("1") o.subboxes = [] for b in range(numsub): if len(data) >= 8: subbox, data = sstruct.unpack2( Glat_format_3_subbox_entry, data, _Object() ) o.subboxes.append(subbox) attrs = self.decompileAttributes12(data, Glat_format_23_entry) if self.hasOctaboxes: attrs.octabox = o return attrs def compile(self, ttFont): data = sstruct.pack(Glat_format_0, self) if self.version <= 1.9: encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry) elif self.version <= 2.9: encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry) elif self.version >= 3.0: self.compression = (self.scheme << 27) + (1 if self.hasOctaboxes else 0) data = sstruct.pack(Glat_format_3, self) encoder = self.compileAttributes3 glocs = [] for n in range(len(self.attributes)): glocs.append(len(data)) data += encoder(self.attributes[ttFont.getGlyphName(n)]) glocs.append(len(data)) ttFont["Gloc"].set(glocs) if self.version >= 3.0: data = grUtils.compress(self.scheme, data) return data def compileAttributes12(self, attrs, fmt): data = b"" for e in grUtils.entries(attrs): data += sstruct.pack(fmt, {"attNum": e[0], "num": e[1]}) + struct.pack( (">%dh" % len(e[2])), *e[2] ) return data def compileAttributes3(self, attrs): if self.hasOctaboxes: o = attrs.octabox data = sstruct.pack(Glat_format_3_octabox_metrics, o) numsub = bin(o.subboxBitmap).count("1") for b in range(numsub): data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b]) else: data = "" return data + self.compileAttributes12(attrs, Glat_format_23_entry) def toXML(self, writer, ttFont): writer.simpletag("version", version=self.version, compressionScheme=self.scheme) writer.newline() for n, a in sorted( self.attributes.items(), key=lambda x: ttFont.getGlyphID(x[0]) ): writer.begintag("glyph", name=n) writer.newline() if hasattr(a, "octabox"): o = a.octabox formatstring, names, fixes = sstruct.getformat( Glat_format_3_octabox_metrics ) vals = {} for k in names: if k == "subboxBitmap": continue vals[k] = "{:.3f}%".format(getattr(o, k) * 100.0 / 255) vals["bitmap"] = "{:0X}".format(o.subboxBitmap) writer.begintag("octaboxes", **vals) writer.newline() formatstring, names, fixes = sstruct.getformat( Glat_format_3_subbox_entry ) for s in o.subboxes: vals = {} for k in names: vals[k] = "{:.3f}%".format(getattr(s, k) * 100.0 / 255) writer.simpletag("octabox", **vals) writer.newline() writer.endtag("octaboxes") writer.newline() for k, v in sorted(a.items()): writer.simpletag("attribute", index=k, value=v) writer.newline() writer.endtag("glyph") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "version": self.version = float(safeEval(attrs["version"])) self.scheme = int(safeEval(attrs["compressionScheme"])) if name != "glyph": return if not hasattr(self, "attributes"): self.attributes = {} gname = attrs["name"] attributes = _Dict() for element in content: if not isinstance(element, tuple): continue tag, attrs, subcontent = element if tag == "attribute": k = int(safeEval(attrs["index"])) v = int(safeEval(attrs["value"])) attributes[k] = v elif tag == "octaboxes": self.hasOctaboxes = True o = _Object() o.subboxBitmap = int(attrs["bitmap"], 16) o.subboxes = [] del attrs["bitmap"] for k, v in attrs.items(): setattr(o, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5)) for element in subcontent: if not isinstance(element, tuple): continue (tag, attrs, subcontent) = element so = _Object() for k, v in attrs.items(): setattr(so, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5)) o.subboxes.append(so) attributes.octabox = o self.attributes[gname] = attributes PKaZZZ��M& & "fontTools/ttLib/tables/G__l_o_c.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import safeEval from . import DefaultTable import array import sys Gloc_header = """ > # big endian version: 16.16F # Table version flags: H # bit 0: 1=long format, 0=short format # bit 1: 1=attribute names, 0=no names numAttribs: H # NUmber of attributes """ class table_G__l_o_c(DefaultTable.DefaultTable): """ Support Graphite Gloc tables """ dependencies = ["Glat"] def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.attribIds = None self.numAttribs = 0 def decompile(self, data, ttFont): _, data = sstruct.unpack2(Gloc_header, data, self) flags = self.flags del self.flags self.locations = array.array("I" if flags & 1 else "H") self.locations.frombytes(data[: len(data) - self.numAttribs * (flags & 2)]) if sys.byteorder != "big": self.locations.byteswap() self.attribIds = array.array("H") if flags & 2: self.attribIds.frombytes(data[-self.numAttribs * 2 :]) if sys.byteorder != "big": self.attribIds.byteswap() def compile(self, ttFont): data = sstruct.pack( Gloc_header, dict( version=1.0, flags=(bool(self.attribIds) << 1) + (self.locations.typecode == "I"), numAttribs=self.numAttribs, ), ) if sys.byteorder != "big": self.locations.byteswap() data += self.locations.tobytes() if sys.byteorder != "big": self.locations.byteswap() if self.attribIds: if sys.byteorder != "big": self.attribIds.byteswap() data += self.attribIds.tobytes() if sys.byteorder != "big": self.attribIds.byteswap() return data def set(self, locations): long_format = max(locations) >= 65536 self.locations = array.array("I" if long_format else "H", locations) def toXML(self, writer, ttFont): writer.simpletag("attributes", number=self.numAttribs) writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "attributes": self.numAttribs = int(safeEval(attrs["number"])) def __getitem__(self, index): return self.locations[index] def __len__(self): return len(self.locations) def __iter__(self): return iter(self.locations) PKaZZZ�2�+XX"fontTools/ttLib/tables/H_V_A_R_.pyfrom .otBase import BaseTTXConverter class table_H_V_A_R_(BaseTTXConverter): pass PKaZZZN� WXX"fontTools/ttLib/tables/J_S_T_F_.pyfrom .otBase import BaseTTXConverter class table_J_S_T_F_(BaseTTXConverter): pass PKaZZZ��"""fontTools/ttLib/tables/L_T_S_H_.pyfrom fontTools.misc.textTools import safeEval from . import DefaultTable import struct import array # XXX I've lowered the strictness, to make sure Apple's own Chicago # XXX gets through. They're looking into it, I hope to raise the standards # XXX back to normal eventually. class table_L_T_S_H_(DefaultTable.DefaultTable): def decompile(self, data, ttFont): version, numGlyphs = struct.unpack(">HH", data[:4]) data = data[4:] assert version == 0, "unknown version: %s" % version assert (len(data) % numGlyphs) < 4, "numGlyphs doesn't match data length" # ouch: the assertion is not true in Chicago! # assert numGlyphs == ttFont['maxp'].numGlyphs yPels = array.array("B") yPels.frombytes(data) self.yPels = {} for i in range(numGlyphs): self.yPels[ttFont.getGlyphName(i)] = yPels[i] def compile(self, ttFont): version = 0 names = list(self.yPels.keys()) numGlyphs = len(names) yPels = [0] * numGlyphs # ouch: the assertion is not true in Chicago! # assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs for name in names: yPels[ttFont.getGlyphID(name)] = self.yPels[name] yPels = array.array("B", yPels) return struct.pack(">HH", version, numGlyphs) + yPels.tobytes() def toXML(self, writer, ttFont): names = sorted(self.yPels.keys()) for name in names: writer.simpletag("yPel", name=name, value=self.yPels[name]) writer.newline() def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "yPels"): self.yPels = {} if name != "yPel": return # ignore unknown tags self.yPels[attrs["name"]] = safeEval(attrs["value"]) PKaZZZe��9XX"fontTools/ttLib/tables/M_A_T_H_.pyfrom .otBase import BaseTTXConverter class table_M_A_T_H_(BaseTTXConverter): pass PKaZZZ� ��.."fontTools/ttLib/tables/M_E_T_A_.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import byteord, safeEval from . import DefaultTable import pdb import struct METAHeaderFormat = """ > # big endian tableVersionMajor: H tableVersionMinor: H metaEntriesVersionMajor: H metaEntriesVersionMinor: H unicodeVersion: L metaFlags: H nMetaRecs: H """ # This record is followed by nMetaRecs of METAGlyphRecordFormat. # This in turn is followd by as many METAStringRecordFormat entries # as specified by the METAGlyphRecordFormat entries # this is followed by the strings specifried in the METAStringRecordFormat METAGlyphRecordFormat = """ > # big endian glyphID: H nMetaEntry: H """ # This record is followd by a variable data length field: # USHORT or ULONG hdrOffset # Offset from start of META table to the beginning # of this glyphs array of ns Metadata string entries. # Size determined by metaFlags field # METAGlyphRecordFormat entries must be sorted by glyph ID METAStringRecordFormat = """ > # big endian labelID: H stringLen: H """ # This record is followd by a variable data length field: # USHORT or ULONG stringOffset # METAStringRecordFormat entries must be sorted in order of labelID # There may be more than one entry with the same labelID # There may be more than one strign with the same content. # Strings shall be Unicode UTF-8 encoded, and null-terminated. METALabelDict = { 0: "MojikumiX4051", # An integer in the range 1-20 1: "UNIUnifiedBaseChars", 2: "BaseFontName", 3: "Language", 4: "CreationDate", 5: "FoundryName", 6: "FoundryCopyright", 7: "OwnerURI", 8: "WritingScript", 10: "StrokeCount", 11: "IndexingRadical", } def getLabelString(labelID): try: label = METALabelDict[labelID] except KeyError: label = "Unknown label" return str(label) class table_M_E_T_A_(DefaultTable.DefaultTable): dependencies = [] def decompile(self, data, ttFont): dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self) self.glyphRecords = [] for i in range(self.nMetaRecs): glyphRecord, newData = sstruct.unpack2( METAGlyphRecordFormat, newData, GlyphRecord() ) if self.metaFlags == 0: [glyphRecord.offset] = struct.unpack(">H", newData[:2]) newData = newData[2:] elif self.metaFlags == 1: [glyphRecord.offset] = struct.unpack(">H", newData[:4]) newData = newData[4:] else: assert 0, ( "The metaFlags field in the META table header has a value other than 0 or 1 :" + str(self.metaFlags) ) glyphRecord.stringRecs = [] newData = data[glyphRecord.offset :] for j in range(glyphRecord.nMetaEntry): stringRec, newData = sstruct.unpack2( METAStringRecordFormat, newData, StringRecord() ) if self.metaFlags == 0: [stringRec.offset] = struct.unpack(">H", newData[:2]) newData = newData[2:] else: [stringRec.offset] = struct.unpack(">H", newData[:4]) newData = newData[4:] stringRec.string = data[ stringRec.offset : stringRec.offset + stringRec.stringLen ] glyphRecord.stringRecs.append(stringRec) self.glyphRecords.append(glyphRecord) def compile(self, ttFont): offsetOK = 0 self.nMetaRecs = len(self.glyphRecords) count = 0 while offsetOK != 1: count = count + 1 if count > 4: pdb.set_trace() metaData = sstruct.pack(METAHeaderFormat, self) stringRecsOffset = len(metaData) + self.nMetaRecs * ( 6 + 2 * (self.metaFlags & 1) ) stringRecSize = 6 + 2 * (self.metaFlags & 1) for glyphRec in self.glyphRecords: glyphRec.offset = stringRecsOffset if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0): self.metaFlags = self.metaFlags + 1 offsetOK = -1 break metaData = metaData + glyphRec.compile(self) stringRecsOffset = stringRecsOffset + ( glyphRec.nMetaEntry * stringRecSize ) # this will be the String Record offset for the next GlyphRecord. if offsetOK == -1: offsetOK = 0 continue # metaData now contains the header and all of the GlyphRecords. Its length should bw # the offset to the first StringRecord. stringOffset = stringRecsOffset for glyphRec in self.glyphRecords: assert glyphRec.offset == len( metaData ), "Glyph record offset did not compile correctly! for rec:" + str( glyphRec ) for stringRec in glyphRec.stringRecs: stringRec.offset = stringOffset if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0): self.metaFlags = self.metaFlags + 1 offsetOK = -1 break metaData = metaData + stringRec.compile(self) stringOffset = stringOffset + stringRec.stringLen if offsetOK == -1: offsetOK = 0 continue if ((self.metaFlags & 1) == 1) and (stringOffset < 65536): self.metaFlags = self.metaFlags - 1 continue else: offsetOK = 1 # metaData now contains the header and all of the GlyphRecords and all of the String Records. # Its length should be the offset to the first string datum. for glyphRec in self.glyphRecords: for stringRec in glyphRec.stringRecs: assert stringRec.offset == len( metaData ), "String offset did not compile correctly! for string:" + str( stringRec.string ) metaData = metaData + stringRec.string return metaData def toXML(self, writer, ttFont): writer.comment( "Lengths and number of entries in this table will be recalculated by the compiler" ) writer.newline() formatstring, names, fixes = sstruct.getformat(METAHeaderFormat) for name in names: value = getattr(self, name) writer.simpletag(name, value=value) writer.newline() for glyphRec in self.glyphRecords: glyphRec.toXML(writer, ttFont) def fromXML(self, name, attrs, content, ttFont): if name == "GlyphRecord": if not hasattr(self, "glyphRecords"): self.glyphRecords = [] glyphRec = GlyphRecord() self.glyphRecords.append(glyphRec) for element in content: if isinstance(element, str): continue name, attrs, content = element glyphRec.fromXML(name, attrs, content, ttFont) glyphRec.offset = -1 glyphRec.nMetaEntry = len(glyphRec.stringRecs) else: setattr(self, name, safeEval(attrs["value"])) class GlyphRecord(object): def __init__(self): self.glyphID = -1 self.nMetaEntry = -1 self.offset = -1 self.stringRecs = [] def toXML(self, writer, ttFont): writer.begintag("GlyphRecord") writer.newline() writer.simpletag("glyphID", value=self.glyphID) writer.newline() writer.simpletag("nMetaEntry", value=self.nMetaEntry) writer.newline() for stringRec in self.stringRecs: stringRec.toXML(writer, ttFont) writer.endtag("GlyphRecord") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "StringRecord": stringRec = StringRecord() self.stringRecs.append(stringRec) for element in content: if isinstance(element, str): continue stringRec.fromXML(name, attrs, content, ttFont) stringRec.stringLen = len(stringRec.string) else: setattr(self, name, safeEval(attrs["value"])) def compile(self, parentTable): data = sstruct.pack(METAGlyphRecordFormat, self) if parentTable.metaFlags == 0: datum = struct.pack(">H", self.offset) elif parentTable.metaFlags == 1: datum = struct.pack(">L", self.offset) data = data + datum return data def __repr__(self): return ( "GlyphRecord[ glyphID: " + str(self.glyphID) + ", nMetaEntry: " + str(self.nMetaEntry) + ", offset: " + str(self.offset) + " ]" ) # XXX The following two functions are really broken around UTF-8 vs Unicode def mapXMLToUTF8(string): uString = str() strLen = len(string) i = 0 while i < strLen: prefixLen = 0 if string[i : i + 3] == "&#x": prefixLen = 3 elif string[i : i + 7] == "&amp;#x": prefixLen = 7 if prefixLen: i = i + prefixLen j = i while string[i] != ";": i = i + 1 valStr = string[j:i] uString = uString + chr(eval("0x" + valStr)) else: uString = uString + chr(byteord(string[i])) i = i + 1 return uString.encode("utf_8") def mapUTF8toXML(string): uString = string.decode("utf_8") string = "" for uChar in uString: i = ord(uChar) if (i < 0x80) and (i > 0x1F): string = string + uChar else: string = string + "&#x" + hex(i)[2:] + ";" return string class StringRecord(object): def toXML(self, writer, ttFont): writer.begintag("StringRecord") writer.newline() writer.simpletag("labelID", value=self.labelID) writer.comment(getLabelString(self.labelID)) writer.newline() writer.newline() writer.simpletag("string", value=mapUTF8toXML(self.string)) writer.newline() writer.endtag("StringRecord") writer.newline() def fromXML(self, name, attrs, content, ttFont): for element in content: if isinstance(element, str): continue name, attrs, content = element value = attrs["value"] if name == "string": self.string = mapXMLToUTF8(value) else: setattr(self, name, safeEval(value)) def compile(self, parentTable): data = sstruct.pack(METAStringRecordFormat, self) if parentTable.metaFlags == 0: datum = struct.pack(">H", self.offset) elif parentTable.metaFlags == 1: datum = struct.pack(">L", self.offset) data = data + datum return data def __repr__(self): return ( "StringRecord [ labelID: " + str(self.labelID) + " aka " + getLabelString(self.labelID) + ", offset: " + str(self.offset) + ", length: " + str(self.stringLen) + ", string: " + self.string + " ]" ) PKaZZZ"s-�XX"fontTools/ttLib/tables/M_V_A_R_.pyfrom .otBase import BaseTTXConverter class table_M_V_A_R_(BaseTTXConverter): pass PKaZZZ?J6ahlhl"fontTools/ttLib/tables/O_S_2f_2.pyfrom fontTools.misc import sstruct from fontTools.misc.roundTools import otRound from fontTools.misc.textTools import safeEval, num2binary, binary2num from fontTools.ttLib.tables import DefaultTable import bisect import logging log = logging.getLogger(__name__) # panose classification panoseFormat = """ bFamilyType: B bSerifStyle: B bWeight: B bProportion: B bContrast: B bStrokeVariation: B bArmStyle: B bLetterForm: B bMidline: B bXHeight: B """ class Panose(object): def __init__(self, **kwargs): _, names, _ = sstruct.getformat(panoseFormat) for name in names: setattr(self, name, kwargs.pop(name, 0)) for k in kwargs: raise TypeError(f"Panose() got an unexpected keyword argument {k!r}") def toXML(self, writer, ttFont): formatstring, names, fixes = sstruct.getformat(panoseFormat) for name in names: writer.simpletag(name, value=getattr(self, name)) writer.newline() def fromXML(self, name, attrs, content, ttFont): setattr(self, name, safeEval(attrs["value"])) # 'sfnt' OS/2 and Windows Metrics table - 'OS/2' OS2_format_0 = """ > # big endian version: H # version xAvgCharWidth: h # average character width usWeightClass: H # degree of thickness of strokes usWidthClass: H # aspect ratio fsType: H # type flags ySubscriptXSize: h # subscript horizontal font size ySubscriptYSize: h # subscript vertical font size ySubscriptXOffset: h # subscript x offset ySubscriptYOffset: h # subscript y offset ySuperscriptXSize: h # superscript horizontal font size ySuperscriptYSize: h # superscript vertical font size ySuperscriptXOffset: h # superscript x offset ySuperscriptYOffset: h # superscript y offset yStrikeoutSize: h # strikeout size yStrikeoutPosition: h # strikeout position sFamilyClass: h # font family class and subclass panose: 10s # panose classification number ulUnicodeRange1: L # character range ulUnicodeRange2: L # character range ulUnicodeRange3: L # character range ulUnicodeRange4: L # character range achVendID: 4s # font vendor identification fsSelection: H # font selection flags usFirstCharIndex: H # first unicode character index usLastCharIndex: H # last unicode character index sTypoAscender: h # typographic ascender sTypoDescender: h # typographic descender sTypoLineGap: h # typographic line gap usWinAscent: H # Windows ascender usWinDescent: H # Windows descender """ OS2_format_1_addition = """ ulCodePageRange1: L ulCodePageRange2: L """ OS2_format_2_addition = ( OS2_format_1_addition + """ sxHeight: h sCapHeight: h usDefaultChar: H usBreakChar: H usMaxContext: H """ ) OS2_format_5_addition = ( OS2_format_2_addition + """ usLowerOpticalPointSize: H usUpperOpticalPointSize: H """ ) bigendian = " > # big endian\n" OS2_format_1 = OS2_format_0 + OS2_format_1_addition OS2_format_2 = OS2_format_0 + OS2_format_2_addition OS2_format_5 = OS2_format_0 + OS2_format_5_addition OS2_format_1_addition = bigendian + OS2_format_1_addition OS2_format_2_addition = bigendian + OS2_format_2_addition OS2_format_5_addition = bigendian + OS2_format_5_addition class table_O_S_2f_2(DefaultTable.DefaultTable): """the OS/2 table""" dependencies = ["head"] def decompile(self, data, ttFont): dummy, data = sstruct.unpack2(OS2_format_0, data, self) if self.version == 1: dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self) elif self.version in (2, 3, 4): dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self) elif self.version == 5: dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self) self.usLowerOpticalPointSize /= 20 self.usUpperOpticalPointSize /= 20 elif self.version != 0: from fontTools import ttLib raise ttLib.TTLibError( "unknown format for OS/2 table: version %s" % self.version ) if len(data): log.warning("too much 'OS/2' table data") self.panose = sstruct.unpack(panoseFormat, self.panose, Panose()) def compile(self, ttFont): self.updateFirstAndLastCharIndex(ttFont) panose = self.panose head = ttFont["head"] if (self.fsSelection & 1) and not (head.macStyle & 1 << 1): log.warning( "fsSelection bit 0 (italic) and " "head table macStyle bit 1 (italic) should match" ) if (self.fsSelection & 1 << 5) and not (head.macStyle & 1): log.warning( "fsSelection bit 5 (bold) and " "head table macStyle bit 0 (bold) should match" ) if (self.fsSelection & 1 << 6) and (self.fsSelection & 1 + (1 << 5)): log.warning( "fsSelection bit 6 (regular) is set, " "bits 0 (italic) and 5 (bold) must be clear" ) if self.version < 4 and self.fsSelection & 0b1110000000: log.warning( "fsSelection bits 7, 8 and 9 are only defined in " "OS/2 table version 4 and up: version %s", self.version, ) self.panose = sstruct.pack(panoseFormat, self.panose) if self.version == 0: data = sstruct.pack(OS2_format_0, self) elif self.version == 1: data = sstruct.pack(OS2_format_1, self) elif self.version in (2, 3, 4): data = sstruct.pack(OS2_format_2, self) elif self.version == 5: d = self.__dict__.copy() d["usLowerOpticalPointSize"] = round(self.usLowerOpticalPointSize * 20) d["usUpperOpticalPointSize"] = round(self.usUpperOpticalPointSize * 20) data = sstruct.pack(OS2_format_5, d) else: from fontTools import ttLib raise ttLib.TTLibError( "unknown format for OS/2 table: version %s" % self.version ) self.panose = panose return data def toXML(self, writer, ttFont): writer.comment( "The fields 'usFirstCharIndex' and 'usLastCharIndex'\n" "will be recalculated by the compiler" ) writer.newline() if self.version == 1: format = OS2_format_1 elif self.version in (2, 3, 4): format = OS2_format_2 elif self.version == 5: format = OS2_format_5 else: format = OS2_format_0 formatstring, names, fixes = sstruct.getformat(format) for name in names: value = getattr(self, name) if name == "panose": writer.begintag("panose") writer.newline() value.toXML(writer, ttFont) writer.endtag("panose") elif name in ( "ulUnicodeRange1", "ulUnicodeRange2", "ulUnicodeRange3", "ulUnicodeRange4", "ulCodePageRange1", "ulCodePageRange2", ): writer.simpletag(name, value=num2binary(value)) elif name in ("fsType", "fsSelection"): writer.simpletag(name, value=num2binary(value, 16)) elif name == "achVendID": writer.simpletag(name, value=repr(value)[1:-1]) else: writer.simpletag(name, value=value) writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "panose": self.panose = panose = Panose() for element in content: if isinstance(element, tuple): name, attrs, content = element panose.fromXML(name, attrs, content, ttFont) elif name in ( "ulUnicodeRange1", "ulUnicodeRange2", "ulUnicodeRange3", "ulUnicodeRange4", "ulCodePageRange1", "ulCodePageRange2", "fsType", "fsSelection", ): setattr(self, name, binary2num(attrs["value"])) elif name == "achVendID": setattr(self, name, safeEval("'''" + attrs["value"] + "'''")) else: setattr(self, name, safeEval(attrs["value"])) def updateFirstAndLastCharIndex(self, ttFont): if "cmap" not in ttFont: return codes = set() for table in getattr(ttFont["cmap"], "tables", []): if table.isUnicode(): codes.update(table.cmap.keys()) if codes: minCode = min(codes) maxCode = max(codes) # USHORT cannot hold codepoints greater than 0xFFFF self.usFirstCharIndex = min(0xFFFF, minCode) self.usLastCharIndex = min(0xFFFF, maxCode) # misspelled attributes kept for legacy reasons @property def usMaxContex(self): return self.usMaxContext @usMaxContex.setter def usMaxContex(self, value): self.usMaxContext = value @property def fsFirstCharIndex(self): return self.usFirstCharIndex @fsFirstCharIndex.setter def fsFirstCharIndex(self, value): self.usFirstCharIndex = value @property def fsLastCharIndex(self): return self.usLastCharIndex @fsLastCharIndex.setter def fsLastCharIndex(self, value): self.usLastCharIndex = value def getUnicodeRanges(self): """Return the set of 'ulUnicodeRange*' bits currently enabled.""" bits = set() ul1, ul2 = self.ulUnicodeRange1, self.ulUnicodeRange2 ul3, ul4 = self.ulUnicodeRange3, self.ulUnicodeRange4 for i in range(32): if ul1 & (1 << i): bits.add(i) if ul2 & (1 << i): bits.add(i + 32) if ul3 & (1 << i): bits.add(i + 64) if ul4 & (1 << i): bits.add(i + 96) return bits def setUnicodeRanges(self, bits): """Set the 'ulUnicodeRange*' fields to the specified 'bits'.""" ul1, ul2, ul3, ul4 = 0, 0, 0, 0 for bit in bits: if 0 <= bit < 32: ul1 |= 1 << bit elif 32 <= bit < 64: ul2 |= 1 << (bit - 32) elif 64 <= bit < 96: ul3 |= 1 << (bit - 64) elif 96 <= bit < 123: ul4 |= 1 << (bit - 96) else: raise ValueError("expected 0 <= int <= 122, found: %r" % bit) self.ulUnicodeRange1, self.ulUnicodeRange2 = ul1, ul2 self.ulUnicodeRange3, self.ulUnicodeRange4 = ul3, ul4 def recalcUnicodeRanges(self, ttFont, pruneOnly=False): """Intersect the codepoints in the font's Unicode cmap subtables with the Unicode block ranges defined in the OpenType specification (v1.7), and set the respective 'ulUnicodeRange*' bits if there is at least ONE intersection. If 'pruneOnly' is True, only clear unused bits with NO intersection. """ unicodes = set() for table in ttFont["cmap"].tables: if table.isUnicode(): unicodes.update(table.cmap.keys()) if pruneOnly: empty = intersectUnicodeRanges(unicodes, inverse=True) bits = self.getUnicodeRanges() - empty else: bits = intersectUnicodeRanges(unicodes) self.setUnicodeRanges(bits) return bits def getCodePageRanges(self): """Return the set of 'ulCodePageRange*' bits currently enabled.""" bits = set() if self.version < 1: return bits ul1, ul2 = self.ulCodePageRange1, self.ulCodePageRange2 for i in range(32): if ul1 & (1 << i): bits.add(i) if ul2 & (1 << i): bits.add(i + 32) return bits def setCodePageRanges(self, bits): """Set the 'ulCodePageRange*' fields to the specified 'bits'.""" ul1, ul2 = 0, 0 for bit in bits: if 0 <= bit < 32: ul1 |= 1 << bit elif 32 <= bit < 64: ul2 |= 1 << (bit - 32) else: raise ValueError(f"expected 0 <= int <= 63, found: {bit:r}") if self.version < 1: self.version = 1 self.ulCodePageRange1, self.ulCodePageRange2 = ul1, ul2 def recalcCodePageRanges(self, ttFont, pruneOnly=False): unicodes = set() for table in ttFont["cmap"].tables: if table.isUnicode(): unicodes.update(table.cmap.keys()) bits = calcCodePageRanges(unicodes) if pruneOnly: bits &= self.getCodePageRanges() # when no codepage ranges can be enabled, fall back to enabling bit 0 # (Latin 1) so that the font works in MS Word: # https://github.com/googlei18n/fontmake/issues/468 if not bits: bits = {0} self.setCodePageRanges(bits) return bits def recalcAvgCharWidth(self, ttFont): """Recalculate xAvgCharWidth using metrics from ttFont's 'hmtx' table. Set it to 0 if the unlikely event 'hmtx' table is not found. """ avg_width = 0 hmtx = ttFont.get("hmtx") if hmtx is not None: widths = [width for width, _ in hmtx.metrics.values() if width > 0] if widths: avg_width = otRound(sum(widths) / len(widths)) self.xAvgCharWidth = avg_width return avg_width # Unicode ranges data from the OpenType OS/2 table specification v1.7 OS2_UNICODE_RANGES = ( (("Basic Latin", (0x0000, 0x007F)),), (("Latin-1 Supplement", (0x0080, 0x00FF)),), (("Latin Extended-A", (0x0100, 0x017F)),), (("Latin Extended-B", (0x0180, 0x024F)),), ( ("IPA Extensions", (0x0250, 0x02AF)), ("Phonetic Extensions", (0x1D00, 0x1D7F)), ("Phonetic Extensions Supplement", (0x1D80, 0x1DBF)), ), ( ("Spacing Modifier Letters", (0x02B0, 0x02FF)), ("Modifier Tone Letters", (0xA700, 0xA71F)), ), ( ("Combining Diacritical Marks", (0x0300, 0x036F)), ("Combining Diacritical Marks Supplement", (0x1DC0, 0x1DFF)), ), (("Greek and Coptic", (0x0370, 0x03FF)),), (("Coptic", (0x2C80, 0x2CFF)),), ( ("Cyrillic", (0x0400, 0x04FF)), ("Cyrillic Supplement", (0x0500, 0x052F)), ("Cyrillic Extended-A", (0x2DE0, 0x2DFF)), ("Cyrillic Extended-B", (0xA640, 0xA69F)), ), (("Armenian", (0x0530, 0x058F)),), (("Hebrew", (0x0590, 0x05FF)),), (("Vai", (0xA500, 0xA63F)),), (("Arabic", (0x0600, 0x06FF)), ("Arabic Supplement", (0x0750, 0x077F))), (("NKo", (0x07C0, 0x07FF)),), (("Devanagari", (0x0900, 0x097F)),), (("Bengali", (0x0980, 0x09FF)),), (("Gurmukhi", (0x0A00, 0x0A7F)),), (("Gujarati", (0x0A80, 0x0AFF)),), (("Oriya", (0x0B00, 0x0B7F)),), (("Tamil", (0x0B80, 0x0BFF)),), (("Telugu", (0x0C00, 0x0C7F)),), (("Kannada", (0x0C80, 0x0CFF)),), (("Malayalam", (0x0D00, 0x0D7F)),), (("Thai", (0x0E00, 0x0E7F)),), (("Lao", (0x0E80, 0x0EFF)),), (("Georgian", (0x10A0, 0x10FF)), ("Georgian Supplement", (0x2D00, 0x2D2F))), (("Balinese", (0x1B00, 0x1B7F)),), (("Hangul Jamo", (0x1100, 0x11FF)),), ( ("Latin Extended Additional", (0x1E00, 0x1EFF)), ("Latin Extended-C", (0x2C60, 0x2C7F)), ("Latin Extended-D", (0xA720, 0xA7FF)), ), (("Greek Extended", (0x1F00, 0x1FFF)),), ( ("General Punctuation", (0x2000, 0x206F)), ("Supplemental Punctuation", (0x2E00, 0x2E7F)), ), (("Superscripts And Subscripts", (0x2070, 0x209F)),), (("Currency Symbols", (0x20A0, 0x20CF)),), (("Combining Diacritical Marks For Symbols", (0x20D0, 0x20FF)),), (("Letterlike Symbols", (0x2100, 0x214F)),), (("Number Forms", (0x2150, 0x218F)),), ( ("Arrows", (0x2190, 0x21FF)), ("Supplemental Arrows-A", (0x27F0, 0x27FF)), ("Supplemental Arrows-B", (0x2900, 0x297F)), ("Miscellaneous Symbols and Arrows", (0x2B00, 0x2BFF)), ), ( ("Mathematical Operators", (0x2200, 0x22FF)), ("Supplemental Mathematical Operators", (0x2A00, 0x2AFF)), ("Miscellaneous Mathematical Symbols-A", (0x27C0, 0x27EF)), ("Miscellaneous Mathematical Symbols-B", (0x2980, 0x29FF)), ), (("Miscellaneous Technical", (0x2300, 0x23FF)),), (("Control Pictures", (0x2400, 0x243F)),), (("Optical Character Recognition", (0x2440, 0x245F)),), (("Enclosed Alphanumerics", (0x2460, 0x24FF)),), (("Box Drawing", (0x2500, 0x257F)),), (("Block Elements", (0x2580, 0x259F)),), (("Geometric Shapes", (0x25A0, 0x25FF)),), (("Miscellaneous Symbols", (0x2600, 0x26FF)),), (("Dingbats", (0x2700, 0x27BF)),), (("CJK Symbols And Punctuation", (0x3000, 0x303F)),), (("Hiragana", (0x3040, 0x309F)),), ( ("Katakana", (0x30A0, 0x30FF)), ("Katakana Phonetic Extensions", (0x31F0, 0x31FF)), ), (("Bopomofo", (0x3100, 0x312F)), ("Bopomofo Extended", (0x31A0, 0x31BF))), (("Hangul Compatibility Jamo", (0x3130, 0x318F)),), (("Phags-pa", (0xA840, 0xA87F)),), (("Enclosed CJK Letters And Months", (0x3200, 0x32FF)),), (("CJK Compatibility", (0x3300, 0x33FF)),), (("Hangul Syllables", (0xAC00, 0xD7AF)),), (("Non-Plane 0 *", (0xD800, 0xDFFF)),), (("Phoenician", (0x10900, 0x1091F)),), ( ("CJK Unified Ideographs", (0x4E00, 0x9FFF)), ("CJK Radicals Supplement", (0x2E80, 0x2EFF)), ("Kangxi Radicals", (0x2F00, 0x2FDF)), ("Ideographic Description Characters", (0x2FF0, 0x2FFF)), ("CJK Unified Ideographs Extension A", (0x3400, 0x4DBF)), ("CJK Unified Ideographs Extension B", (0x20000, 0x2A6DF)), ("Kanbun", (0x3190, 0x319F)), ), (("Private Use Area (plane 0)", (0xE000, 0xF8FF)),), ( ("CJK Strokes", (0x31C0, 0x31EF)), ("CJK Compatibility Ideographs", (0xF900, 0xFAFF)), ("CJK Compatibility Ideographs Supplement", (0x2F800, 0x2FA1F)), ), (("Alphabetic Presentation Forms", (0xFB00, 0xFB4F)),), (("Arabic Presentation Forms-A", (0xFB50, 0xFDFF)),), (("Combining Half Marks", (0xFE20, 0xFE2F)),), ( ("Vertical Forms", (0xFE10, 0xFE1F)), ("CJK Compatibility Forms", (0xFE30, 0xFE4F)), ), (("Small Form Variants", (0xFE50, 0xFE6F)),), (("Arabic Presentation Forms-B", (0xFE70, 0xFEFF)),), (("Halfwidth And Fullwidth Forms", (0xFF00, 0xFFEF)),), (("Specials", (0xFFF0, 0xFFFF)),), (("Tibetan", (0x0F00, 0x0FFF)),), (("Syriac", (0x0700, 0x074F)),), (("Thaana", (0x0780, 0x07BF)),), (("Sinhala", (0x0D80, 0x0DFF)),), (("Myanmar", (0x1000, 0x109F)),), ( ("Ethiopic", (0x1200, 0x137F)), ("Ethiopic Supplement", (0x1380, 0x139F)), ("Ethiopic Extended", (0x2D80, 0x2DDF)), ), (("Cherokee", (0x13A0, 0x13FF)),), (("Unified Canadian Aboriginal Syllabics", (0x1400, 0x167F)),), (("Ogham", (0x1680, 0x169F)),), (("Runic", (0x16A0, 0x16FF)),), (("Khmer", (0x1780, 0x17FF)), ("Khmer Symbols", (0x19E0, 0x19FF))), (("Mongolian", (0x1800, 0x18AF)),), (("Braille Patterns", (0x2800, 0x28FF)),), (("Yi Syllables", (0xA000, 0xA48F)), ("Yi Radicals", (0xA490, 0xA4CF))), ( ("Tagalog", (0x1700, 0x171F)), ("Hanunoo", (0x1720, 0x173F)), ("Buhid", (0x1740, 0x175F)), ("Tagbanwa", (0x1760, 0x177F)), ), (("Old Italic", (0x10300, 0x1032F)),), (("Gothic", (0x10330, 0x1034F)),), (("Deseret", (0x10400, 0x1044F)),), ( ("Byzantine Musical Symbols", (0x1D000, 0x1D0FF)), ("Musical Symbols", (0x1D100, 0x1D1FF)), ("Ancient Greek Musical Notation", (0x1D200, 0x1D24F)), ), (("Mathematical Alphanumeric Symbols", (0x1D400, 0x1D7FF)),), ( ("Private Use (plane 15)", (0xF0000, 0xFFFFD)), ("Private Use (plane 16)", (0x100000, 0x10FFFD)), ), ( ("Variation Selectors", (0xFE00, 0xFE0F)), ("Variation Selectors Supplement", (0xE0100, 0xE01EF)), ), (("Tags", (0xE0000, 0xE007F)),), (("Limbu", (0x1900, 0x194F)),), (("Tai Le", (0x1950, 0x197F)),), (("New Tai Lue", (0x1980, 0x19DF)),), (("Buginese", (0x1A00, 0x1A1F)),), (("Glagolitic", (0x2C00, 0x2C5F)),), (("Tifinagh", (0x2D30, 0x2D7F)),), (("Yijing Hexagram Symbols", (0x4DC0, 0x4DFF)),), (("Syloti Nagri", (0xA800, 0xA82F)),), ( ("Linear B Syllabary", (0x10000, 0x1007F)), ("Linear B Ideograms", (0x10080, 0x100FF)), ("Aegean Numbers", (0x10100, 0x1013F)), ), (("Ancient Greek Numbers", (0x10140, 0x1018F)),), (("Ugaritic", (0x10380, 0x1039F)),), (("Old Persian", (0x103A0, 0x103DF)),), (("Shavian", (0x10450, 0x1047F)),), (("Osmanya", (0x10480, 0x104AF)),), (("Cypriot Syllabary", (0x10800, 0x1083F)),), (("Kharoshthi", (0x10A00, 0x10A5F)),), (("Tai Xuan Jing Symbols", (0x1D300, 0x1D35F)),), ( ("Cuneiform", (0x12000, 0x123FF)), ("Cuneiform Numbers and Punctuation", (0x12400, 0x1247F)), ), (("Counting Rod Numerals", (0x1D360, 0x1D37F)),), (("Sundanese", (0x1B80, 0x1BBF)),), (("Lepcha", (0x1C00, 0x1C4F)),), (("Ol Chiki", (0x1C50, 0x1C7F)),), (("Saurashtra", (0xA880, 0xA8DF)),), (("Kayah Li", (0xA900, 0xA92F)),), (("Rejang", (0xA930, 0xA95F)),), (("Cham", (0xAA00, 0xAA5F)),), (("Ancient Symbols", (0x10190, 0x101CF)),), (("Phaistos Disc", (0x101D0, 0x101FF)),), ( ("Carian", (0x102A0, 0x102DF)), ("Lycian", (0x10280, 0x1029F)), ("Lydian", (0x10920, 0x1093F)), ), (("Domino Tiles", (0x1F030, 0x1F09F)), ("Mahjong Tiles", (0x1F000, 0x1F02F))), ) _unicodeStarts = [] _unicodeValues = [None] def _getUnicodeRanges(): # build the ranges of codepoints for each unicode range bit, and cache result if not _unicodeStarts: unicodeRanges = [ (start, (stop, bit)) for bit, blocks in enumerate(OS2_UNICODE_RANGES) for _, (start, stop) in blocks ] for start, (stop, bit) in sorted(unicodeRanges): _unicodeStarts.append(start) _unicodeValues.append((stop, bit)) return _unicodeStarts, _unicodeValues def intersectUnicodeRanges(unicodes, inverse=False): """Intersect a sequence of (int) Unicode codepoints with the Unicode block ranges defined in the OpenType specification v1.7, and return the set of 'ulUnicodeRanges' bits for which there is at least ONE intersection. If 'inverse' is True, return the the bits for which there is NO intersection. >>> intersectUnicodeRanges([0x0410]) == {9} True >>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122} True >>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == ( ... set(range(len(OS2_UNICODE_RANGES))) - {9, 57, 122}) True """ unicodes = set(unicodes) unicodestarts, unicodevalues = _getUnicodeRanges() bits = set() for code in unicodes: stop, bit = unicodevalues[bisect.bisect(unicodestarts, code)] if code <= stop: bits.add(bit) # The spec says that bit 57 ("Non Plane 0") implies that there's # at least one codepoint beyond the BMP; so I also include all # the non-BMP codepoints here if any(0x10000 <= code < 0x110000 for code in unicodes): bits.add(57) return set(range(len(OS2_UNICODE_RANGES))) - bits if inverse else bits def calcCodePageRanges(unicodes): """Given a set of Unicode codepoints (integers), calculate the corresponding OS/2 CodePage range bits. This is a direct translation of FontForge implementation: https://github.com/fontforge/fontforge/blob/7b2c074/fontforge/tottf.c#L3158 """ bits = set() hasAscii = set(range(0x20, 0x7E)).issubset(unicodes) hasLineart = ord("┤") in unicodes for uni in unicodes: if uni == ord("Þ") and hasAscii: bits.add(0) # Latin 1 elif uni == ord("Ľ") and hasAscii: bits.add(1) # Latin 2: Eastern Europe if hasLineart: bits.add(58) # Latin 2 elif uni == ord("Б"): bits.add(2) # Cyrillic if ord("Ѕ") in unicodes and hasLineart: bits.add(57) # IBM Cyrillic if ord("╜") in unicodes and hasLineart: bits.add(49) # MS-DOS Russian elif uni == ord("Ά"): bits.add(3) # Greek if hasLineart and ord("½") in unicodes: bits.add(48) # IBM Greek if hasLineart and ord("√") in unicodes: bits.add(60) # Greek, former 437 G elif uni == ord("İ") and hasAscii: bits.add(4) # Turkish if hasLineart: bits.add(56) # IBM turkish elif uni == ord("א"): bits.add(5) # Hebrew if hasLineart and ord("√") in unicodes: bits.add(53) # Hebrew elif uni == ord("ر"): bits.add(6) # Arabic if ord("√") in unicodes: bits.add(51) # Arabic if hasLineart: bits.add(61) # Arabic; ASMO 708 elif uni == ord("ŗ") and hasAscii: bits.add(7) # Windows Baltic if hasLineart: bits.add(59) # MS-DOS Baltic elif uni == ord("₫") and hasAscii: bits.add(8) # Vietnamese elif uni == ord("ๅ"): bits.add(16) # Thai elif uni == ord("エ"): bits.add(17) # JIS/Japan elif uni == ord("ㄅ"): bits.add(18) # Chinese: Simplified elif uni == ord("ㄱ"): bits.add(19) # Korean wansung elif uni == ord("央"): bits.add(20) # Chinese: Traditional elif uni == ord("곴"): bits.add(21) # Korean Johab elif uni == ord("♥") and hasAscii: bits.add(30) # OEM Character Set # TODO: Symbol bit has a special meaning (check the spec), we need # to confirm if this is wanted by default. # elif chr(0xF000) <= char <= chr(0xF0FF): # codepageRanges.add(31) # Symbol Character Set elif uni == ord("þ") and hasAscii and hasLineart: bits.add(54) # MS-DOS Icelandic elif uni == ord("╚") and hasAscii: bits.add(62) # WE/Latin 1 bits.add(63) # US elif hasAscii and hasLineart and ord("√") in unicodes: if uni == ord("Å"): bits.add(50) # MS-DOS Nordic elif uni == ord("é"): bits.add(52) # MS-DOS Canadian French elif uni == ord("õ"): bits.add(55) # MS-DOS Portuguese if hasAscii and ord("‰") in unicodes and ord("∑") in unicodes: bits.add(29) # Macintosh Character Set (US Roman) return bits if __name__ == "__main__": import doctest, sys sys.exit(doctest.testmod().failed) PKaZZZ�b��( ( "fontTools/ttLib/tables/S_I_N_G_.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval from . import DefaultTable SINGFormat = """ > # big endian tableVersionMajor: H tableVersionMinor: H glyphletVersion: H permissions: h mainGID: H unitsPerEm: H vertAdvance: h vertOrigin: h uniqueName: 28s METAMD5: 16s nameLength: 1s """ # baseGlyphName is a byte string which follows the record above. class table_S_I_N_G_(DefaultTable.DefaultTable): dependencies = [] def decompile(self, data, ttFont): dummy, rest = sstruct.unpack2(SINGFormat, data, self) self.uniqueName = self.decompileUniqueName(self.uniqueName) self.nameLength = byteord(self.nameLength) assert len(rest) == self.nameLength self.baseGlyphName = tostr(rest) rawMETAMD5 = self.METAMD5 self.METAMD5 = "[" + hex(byteord(self.METAMD5[0])) for char in rawMETAMD5[1:]: self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char)) self.METAMD5 = self.METAMD5 + "]" def decompileUniqueName(self, data): name = "" for char in data: val = byteord(char) if val == 0: break if (val > 31) or (val < 128): name += chr(val) else: octString = oct(val) if len(octString) > 3: octString = octString[1:] # chop off that leading zero. elif len(octString) < 3: octString.zfill(3) name += "\\" + octString return name def compile(self, ttFont): d = self.__dict__.copy() d["nameLength"] = bytechr(len(self.baseGlyphName)) d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28) METAMD5List = eval(self.METAMD5) d["METAMD5"] = b"" for val in METAMD5List: d["METAMD5"] += bytechr(val) assert len(d["METAMD5"]) == 16, "Failed to pack 16 byte MD5 hash in SING table" data = sstruct.pack(SINGFormat, d) data = data + tobytes(self.baseGlyphName) return data def compilecompileUniqueName(self, name, length): nameLen = len(name) if length <= nameLen: name = name[: length - 1] + "\000" else: name += (nameLen - length) * "\000" return name def toXML(self, writer, ttFont): writer.comment("Most of this table will be recalculated by the compiler") writer.newline() formatstring, names, fixes = sstruct.getformat(SINGFormat) for name in names: value = getattr(self, name) writer.simpletag(name, value=value) writer.newline() writer.simpletag("baseGlyphName", value=self.baseGlyphName) writer.newline() def fromXML(self, name, attrs, content, ttFont): value = attrs["value"] if name in ["uniqueName", "METAMD5", "baseGlyphName"]: setattr(self, name, value) else: setattr(self, name, safeEval(value)) PKaZZZ�h�XX"fontTools/ttLib/tables/S_T_A_T_.pyfrom .otBase import BaseTTXConverter class table_S_T_A_T_(BaseTTXConverter): pass PKaZZZmb�'' fontTools/ttLib/tables/S_V_G_.py"""Compiles/decompiles SVG table. https://docs.microsoft.com/en-us/typography/opentype/spec/svg The XML format is: .. code-block:: xml <SVG> <svgDoc endGlyphID="1" startGlyphID="1"> <![CDATA[ <complete SVG doc> ]] </svgDoc> ... <svgDoc endGlyphID="n" startGlyphID="m"> <![CDATA[ <complete SVG doc> ]] </svgDoc> </SVG> """ from fontTools.misc.textTools import bytesjoin, safeEval, strjoin, tobytes, tostr from fontTools.misc import sstruct from . import DefaultTable from collections.abc import Sequence from dataclasses import dataclass, astuple from io import BytesIO import struct import logging log = logging.getLogger(__name__) SVG_format_0 = """ > # big endian version: H offsetToSVGDocIndex: L reserved: L """ SVG_format_0Size = sstruct.calcsize(SVG_format_0) doc_index_entry_format_0 = """ > # big endian startGlyphID: H endGlyphID: H svgDocOffset: L svgDocLength: L """ doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0) class table_S_V_G_(DefaultTable.DefaultTable): def decompile(self, data, ttFont): self.docList = [] # Version 0 is the standardized version of the table; and current. # https://www.microsoft.com/typography/otspec/svg.htm sstruct.unpack(SVG_format_0, data[:SVG_format_0Size], self) if self.version != 0: log.warning( "Unknown SVG table version '%s'. Decompiling as version 0.", self.version, ) # read in SVG Documents Index # data starts with the first entry of the entry list. pos = subTableStart = self.offsetToSVGDocIndex self.numEntries = struct.unpack(">H", data[pos : pos + 2])[0] pos += 2 if self.numEntries > 0: data2 = data[pos:] entries = [] for i in range(self.numEntries): record_data = data2[ i * doc_index_entry_format_0Size : (i + 1) * doc_index_entry_format_0Size ] docIndexEntry = sstruct.unpack( doc_index_entry_format_0, record_data, DocumentIndexEntry() ) entries.append(docIndexEntry) for entry in entries: start = entry.svgDocOffset + subTableStart end = start + entry.svgDocLength doc = data[start:end] compressed = False if doc.startswith(b"\x1f\x8b"): import gzip bytesIO = BytesIO(doc) with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper: doc = gunzipper.read() del bytesIO compressed = True doc = tostr(doc, "utf_8") self.docList.append( SVGDocument(doc, entry.startGlyphID, entry.endGlyphID, compressed) ) def compile(self, ttFont): version = 0 offsetToSVGDocIndex = ( SVG_format_0Size # I start the SVGDocIndex right after the header. ) # get SGVDoc info. docList = [] entryList = [] numEntries = len(self.docList) datum = struct.pack(">H", numEntries) entryList.append(datum) curOffset = len(datum) + doc_index_entry_format_0Size * numEntries seenDocs = {} allCompressed = getattr(self, "compressed", False) for i, doc in enumerate(self.docList): if isinstance(doc, (list, tuple)): doc = SVGDocument(*doc) self.docList[i] = doc docBytes = tobytes(doc.data, encoding="utf_8") if (allCompressed or doc.compressed) and not docBytes.startswith( b"\x1f\x8b" ): import gzip bytesIO = BytesIO() # mtime=0 strips the useless timestamp and makes gzip output reproducible; # equivalent to `gzip -n` with gzip.GzipFile(None, "w", fileobj=bytesIO, mtime=0) as gzipper: gzipper.write(docBytes) gzipped = bytesIO.getvalue() if len(gzipped) < len(docBytes): docBytes = gzipped del gzipped, bytesIO docLength = len(docBytes) if docBytes in seenDocs: docOffset = seenDocs[docBytes] else: docOffset = curOffset curOffset += docLength seenDocs[docBytes] = docOffset docList.append(docBytes) entry = struct.pack( ">HHLL", doc.startGlyphID, doc.endGlyphID, docOffset, docLength ) entryList.append(entry) entryList.extend(docList) svgDocData = bytesjoin(entryList) reserved = 0 header = struct.pack(">HLL", version, offsetToSVGDocIndex, reserved) data = [header, svgDocData] data = bytesjoin(data) return data def toXML(self, writer, ttFont): for i, doc in enumerate(self.docList): if isinstance(doc, (list, tuple)): doc = SVGDocument(*doc) self.docList[i] = doc attrs = {"startGlyphID": doc.startGlyphID, "endGlyphID": doc.endGlyphID} if doc.compressed: attrs["compressed"] = 1 writer.begintag("svgDoc", **attrs) writer.newline() writer.writecdata(doc.data) writer.newline() writer.endtag("svgDoc") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "svgDoc": if not hasattr(self, "docList"): self.docList = [] doc = strjoin(content) doc = doc.strip() startGID = int(attrs["startGlyphID"]) endGID = int(attrs["endGlyphID"]) compressed = bool(safeEval(attrs.get("compressed", "0"))) self.docList.append(SVGDocument(doc, startGID, endGID, compressed)) else: log.warning("Unknown %s %s", name, content) class DocumentIndexEntry(object): def __init__(self): self.startGlyphID = None # USHORT self.endGlyphID = None # USHORT self.svgDocOffset = None # ULONG self.svgDocLength = None # ULONG def __repr__(self): return ( "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s" % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength) ) @dataclass class SVGDocument(Sequence): data: str startGlyphID: int endGlyphID: int compressed: bool = False # Previously, the SVG table's docList attribute contained a lists of 3 items: # [doc, startGlyphID, endGlyphID]; later, we added a `compressed` attribute. # For backward compatibility with code that depends of them being sequences of # fixed length=3, we subclass the Sequence abstract base class and pretend only # the first three items are present. 'compressed' is only accessible via named # attribute lookup like regular dataclasses: i.e. `doc.compressed`, not `doc[3]` def __getitem__(self, index): return astuple(self)[:3][index] def __len__(self): return 3 PKaZZZ���U�U�"fontTools/ttLib/tables/S__i_l_f.pyfrom fontTools.misc import sstruct from fontTools.misc.fixedTools import floatToFixedToStr from fontTools.misc.textTools import byteord, safeEval # from itertools import * from . import DefaultTable from . import grUtils from array import array from functools import reduce import struct, re, sys Silf_hdr_format = """ > version: 16.16F """ Silf_hdr_format_3 = """ > version: 16.16F compilerVersion: L numSilf: H x x """ Silf_part1_format_v3 = """ > ruleVersion: 16.16F passOffset: H pseudosOffset: H """ Silf_part1_format = """ > maxGlyphID: H extraAscent: h extraDescent: h numPasses: B iSubst: B iPos: B iJust: B iBidi: B flags: B maxPreContext: B maxPostContext: B attrPseudo: B attrBreakWeight: B attrDirectionality: B attrMirroring: B attrSkipPasses: B numJLevels: B """ Silf_justify_format = """ > attrStretch: B attrShrink: B attrStep: B attrWeight: B runto: B x x x """ Silf_part2_format = """ > numLigComp: H numUserDefn: B maxCompPerLig: B direction: B attCollisions: B x x x numCritFeatures: B """ Silf_pseudomap_format = """ > unicode: L nPseudo: H """ Silf_pseudomap_format_h = """ > unicode: H nPseudo: H """ Silf_classmap_format = """ > numClass: H numLinear: H """ Silf_lookupclass_format = """ > numIDs: H searchRange: H entrySelector: H rangeShift: H """ Silf_lookuppair_format = """ > glyphId: H index: H """ Silf_pass_format = """ > flags: B maxRuleLoop: B maxRuleContext: B maxBackup: B numRules: H fsmOffset: H pcCode: L rcCode: L aCode: L oDebug: L numRows: H numTransitional: H numSuccess: H numColumns: H """ aCode_info = ( ("NOP", 0), ("PUSH_BYTE", "b"), ("PUSH_BYTE_U", "B"), ("PUSH_SHORT", ">h"), ("PUSH_SHORT_U", ">H"), ("PUSH_LONG", ">L"), ("ADD", 0), ("SUB", 0), ("MUL", 0), ("DIV", 0), ("MIN", 0), ("MAX", 0), ("NEG", 0), ("TRUNC8", 0), ("TRUNC16", 0), ("COND", 0), ("AND", 0), # x10 ("OR", 0), ("NOT", 0), ("EQUAL", 0), ("NOT_EQ", 0), ("LESS", 0), ("GTR", 0), ("LESS_EQ", 0), ("GTR_EQ", 0), ("NEXT", 0), ("NEXT_N", "b"), ("COPY_NEXT", 0), ("PUT_GLYPH_8BIT_OBS", "B"), ("PUT_SUBS_8BIT_OBS", "bBB"), ("PUT_COPY", "b"), ("INSERT", 0), ("DELETE", 0), # x20 ("ASSOC", -1), ("CNTXT_ITEM", "bB"), ("ATTR_SET", "B"), ("ATTR_ADD", "B"), ("ATTR_SUB", "B"), ("ATTR_SET_SLOT", "B"), ("IATTR_SET_SLOT", "BB"), ("PUSH_SLOT_ATTR", "Bb"), ("PUSH_GLYPH_ATTR_OBS", "Bb"), ("PUSH_GLYPH_METRIC", "Bbb"), ("PUSH_FEAT", "Bb"), ("PUSH_ATT_TO_GATTR_OBS", "Bb"), ("PUSH_ATT_TO_GLYPH_METRIC", "Bbb"), ("PUSH_ISLOT_ATTR", "Bbb"), ("PUSH_IGLYPH_ATTR", "Bbb"), ("POP_RET", 0), # x30 ("RET_ZERO", 0), ("RET_TRUE", 0), ("IATTR_SET", "BB"), ("IATTR_ADD", "BB"), ("IATTR_SUB", "BB"), ("PUSH_PROC_STATE", "B"), ("PUSH_VERSION", 0), ("PUT_SUBS", ">bHH"), ("PUT_SUBS2", 0), ("PUT_SUBS3", 0), ("PUT_GLYPH", ">H"), ("PUSH_GLYPH_ATTR", ">Hb"), ("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"), ("BITOR", 0), ("BITAND", 0), ("BITNOT", 0), # x40 ("BITSET", ">HH"), ("SET_FEAT", "Bb"), ) aCode_map = dict([(x[0], (i, x[1])) for i, x in enumerate(aCode_info)]) def disassemble(aCode): codelen = len(aCode) pc = 0 res = [] while pc < codelen: opcode = byteord(aCode[pc : pc + 1]) if opcode > len(aCode_info): instr = aCode_info[0] else: instr = aCode_info[opcode] pc += 1 if instr[1] != 0 and pc >= codelen: return res if instr[1] == -1: count = byteord(aCode[pc]) fmt = "%dB" % count pc += 1 elif instr[1] == 0: fmt = "" else: fmt = instr[1] if fmt == "": res.append(instr[0]) continue parms = struct.unpack_from(fmt, aCode[pc:]) res.append(instr[0] + "(" + ", ".join(map(str, parms)) + ")") pc += struct.calcsize(fmt) return res instre = re.compile(r"^\s*([^(]+)\s*(?:\(([^)]+)\))?") def assemble(instrs): res = b"" for inst in instrs: m = instre.match(inst) if not m or not m.group(1) in aCode_map: continue opcode, parmfmt = aCode_map[m.group(1)] res += struct.pack("B", opcode) if m.group(2): if parmfmt == 0: continue parms = [int(x) for x in re.split(r",\s*", m.group(2))] if parmfmt == -1: l = len(parms) res += struct.pack(("%dB" % (l + 1)), l, *parms) else: res += struct.pack(parmfmt, *parms) return res def writecode(tag, writer, instrs): writer.begintag(tag) writer.newline() for l in disassemble(instrs): writer.write(l) writer.newline() writer.endtag(tag) writer.newline() def readcode(content): res = [] for e in content_string(content).split("\n"): e = e.strip() if not len(e): continue res.append(e) return assemble(res) attrs_info = ( "flags", "extraAscent", "extraDescent", "maxGlyphID", "numLigComp", "numUserDefn", "maxCompPerLig", "direction", "lbGID", ) attrs_passindexes = ("iSubst", "iPos", "iJust", "iBidi") attrs_contexts = ("maxPreContext", "maxPostContext") attrs_attributes = ( "attrPseudo", "attrBreakWeight", "attrDirectionality", "attrMirroring", "attrSkipPasses", "attCollisions", ) pass_attrs_info = ( "flags", "maxRuleLoop", "maxRuleContext", "maxBackup", "minRulePreContext", "maxRulePreContext", "collisionThreshold", ) pass_attrs_fsm = ("numRows", "numTransitional", "numSuccess", "numColumns") def writesimple(tag, self, writer, *attrkeys): attrs = dict([(k, getattr(self, k)) for k in attrkeys]) writer.simpletag(tag, **attrs) writer.newline() def getSimple(self, attrs, *attr_list): for k in attr_list: if k in attrs: setattr(self, k, int(safeEval(attrs[k]))) def content_string(contents): res = "" for element in contents: if isinstance(element, tuple): continue res += element return res.strip() def wrapline(writer, dat, length=80): currline = "" for d in dat: if len(currline) > length: writer.write(currline[:-1]) writer.newline() currline = "" currline += d + " " if len(currline): writer.write(currline[:-1]) writer.newline() class _Object: pass class table_S__i_l_f(DefaultTable.DefaultTable): """Silf table support""" def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.silfs = [] def decompile(self, data, ttFont): sstruct.unpack2(Silf_hdr_format, data, self) self.version = float(floatToFixedToStr(self.version, precisionBits=16)) if self.version >= 5.0: (data, self.scheme) = grUtils.decompress(data) sstruct.unpack2(Silf_hdr_format_3, data, self) base = sstruct.calcsize(Silf_hdr_format_3) elif self.version < 3.0: self.numSilf = struct.unpack(">H", data[4:6]) self.scheme = 0 self.compilerVersion = 0 base = 8 else: self.scheme = 0 sstruct.unpack2(Silf_hdr_format_3, data, self) base = sstruct.calcsize(Silf_hdr_format_3) silfoffsets = struct.unpack_from((">%dL" % self.numSilf), data[base:]) for offset in silfoffsets: s = Silf() self.silfs.append(s) s.decompile(data[offset:], ttFont, self.version) def compile(self, ttFont): self.numSilf = len(self.silfs) if self.version < 3.0: hdr = sstruct.pack(Silf_hdr_format, self) hdr += struct.pack(">HH", self.numSilf, 0) else: hdr = sstruct.pack(Silf_hdr_format_3, self) offset = len(hdr) + 4 * self.numSilf data = b"" for s in self.silfs: hdr += struct.pack(">L", offset) subdata = s.compile(ttFont, self.version) offset += len(subdata) data += subdata if self.version >= 5.0: return grUtils.compress(self.scheme, hdr + data) return hdr + data def toXML(self, writer, ttFont): writer.comment("Attributes starting with _ are informative only") writer.newline() writer.simpletag( "version", version=self.version, compilerVersion=self.compilerVersion, compressionScheme=self.scheme, ) writer.newline() for s in self.silfs: writer.begintag("silf") writer.newline() s.toXML(writer, ttFont, self.version) writer.endtag("silf") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "version": self.scheme = int(safeEval(attrs["compressionScheme"])) self.version = float(safeEval(attrs["version"])) self.compilerVersion = int(safeEval(attrs["compilerVersion"])) return if name == "silf": s = Silf() self.silfs.append(s) for element in content: if not isinstance(element, tuple): continue tag, attrs, subcontent = element s.fromXML(tag, attrs, subcontent, ttFont, self.version) class Silf(object): """A particular Silf subtable""" def __init__(self): self.passes = [] self.scriptTags = [] self.critFeatures = [] self.jLevels = [] self.pMap = {} def decompile(self, data, ttFont, version=2.0): if version >= 3.0: _, data = sstruct.unpack2(Silf_part1_format_v3, data, self) self.ruleVersion = float( floatToFixedToStr(self.ruleVersion, precisionBits=16) ) _, data = sstruct.unpack2(Silf_part1_format, data, self) for jlevel in range(self.numJLevels): j, data = sstruct.unpack2(Silf_justify_format, data, _Object()) self.jLevels.append(j) _, data = sstruct.unpack2(Silf_part2_format, data, self) if self.numCritFeatures: self.critFeatures = struct.unpack_from( (">%dH" % self.numCritFeatures), data ) data = data[self.numCritFeatures * 2 + 1 :] (numScriptTag,) = struct.unpack_from("B", data) if numScriptTag: self.scriptTags = [ struct.unpack("4s", data[x : x + 4])[0].decode("ascii") for x in range(1, 1 + 4 * numScriptTag, 4) ] data = data[1 + 4 * numScriptTag :] (self.lbGID,) = struct.unpack(">H", data[:2]) if self.numPasses: self.oPasses = struct.unpack( (">%dL" % (self.numPasses + 1)), data[2 : 6 + 4 * self.numPasses] ) data = data[6 + 4 * self.numPasses :] (numPseudo,) = struct.unpack(">H", data[:2]) for i in range(numPseudo): if version >= 3.0: pseudo = sstruct.unpack( Silf_pseudomap_format, data[8 + 6 * i : 14 + 6 * i], _Object() ) else: pseudo = sstruct.unpack( Silf_pseudomap_format_h, data[8 + 4 * i : 12 + 4 * i], _Object() ) self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo) data = data[8 + 6 * numPseudo :] currpos = ( sstruct.calcsize(Silf_part1_format) + sstruct.calcsize(Silf_justify_format) * self.numJLevels + sstruct.calcsize(Silf_part2_format) + 2 * self.numCritFeatures + 1 + 1 + 4 * numScriptTag + 6 + 4 * self.numPasses + 8 + 6 * numPseudo ) if version >= 3.0: currpos += sstruct.calcsize(Silf_part1_format_v3) self.classes = Classes() self.classes.decompile(data, ttFont, version) for i in range(self.numPasses): p = Pass() self.passes.append(p) p.decompile( data[self.oPasses[i] - currpos : self.oPasses[i + 1] - currpos], ttFont, version, ) def compile(self, ttFont, version=2.0): self.numPasses = len(self.passes) self.numJLevels = len(self.jLevels) self.numCritFeatures = len(self.critFeatures) numPseudo = len(self.pMap) data = b"" if version >= 3.0: hdroffset = sstruct.calcsize(Silf_part1_format_v3) else: hdroffset = 0 data += sstruct.pack(Silf_part1_format, self) for j in self.jLevels: data += sstruct.pack(Silf_justify_format, j) data += sstruct.pack(Silf_part2_format, self) if self.numCritFeatures: data += struct.pack((">%dH" % self.numCritFeaturs), *self.critFeatures) data += struct.pack("BB", 0, len(self.scriptTags)) if len(self.scriptTags): tdata = [struct.pack("4s", x.encode("ascii")) for x in self.scriptTags] data += b"".join(tdata) data += struct.pack(">H", self.lbGID) self.passOffset = len(data) data1 = grUtils.bininfo(numPseudo, 6) currpos = hdroffset + len(data) + 4 * (self.numPasses + 1) self.pseudosOffset = currpos + len(data1) for u, p in sorted(self.pMap.items()): data1 += struct.pack( (">LH" if version >= 3.0 else ">HH"), u, ttFont.getGlyphID(p) ) data1 += self.classes.compile(ttFont, version) currpos += len(data1) data2 = b"" datao = b"" for i, p in enumerate(self.passes): base = currpos + len(data2) datao += struct.pack(">L", base) data2 += p.compile(ttFont, base, version) datao += struct.pack(">L", currpos + len(data2)) if version >= 3.0: data3 = sstruct.pack(Silf_part1_format_v3, self) else: data3 = b"" return data3 + data + datao + data1 + data2 def toXML(self, writer, ttFont, version=2.0): if version >= 3.0: writer.simpletag("version", ruleVersion=self.ruleVersion) writer.newline() writesimple("info", self, writer, *attrs_info) writesimple("passindexes", self, writer, *attrs_passindexes) writesimple("contexts", self, writer, *attrs_contexts) writesimple("attributes", self, writer, *attrs_attributes) if len(self.jLevels): writer.begintag("justifications") writer.newline() jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format) for i, j in enumerate(self.jLevels): attrs = dict([(k, getattr(j, k)) for k in jnames]) writer.simpletag("justify", **attrs) writer.newline() writer.endtag("justifications") writer.newline() if len(self.critFeatures): writer.begintag("critFeatures") writer.newline() writer.write(" ".join(map(str, self.critFeatures))) writer.newline() writer.endtag("critFeatures") writer.newline() if len(self.scriptTags): writer.begintag("scriptTags") writer.newline() writer.write(" ".join(self.scriptTags)) writer.newline() writer.endtag("scriptTags") writer.newline() if self.pMap: writer.begintag("pseudoMap") writer.newline() for k, v in sorted(self.pMap.items()): writer.simpletag("pseudo", unicode=hex(k), pseudo=v) writer.newline() writer.endtag("pseudoMap") writer.newline() self.classes.toXML(writer, ttFont, version) if len(self.passes): writer.begintag("passes") writer.newline() for i, p in enumerate(self.passes): writer.begintag("pass", _index=i) writer.newline() p.toXML(writer, ttFont, version) writer.endtag("pass") writer.newline() writer.endtag("passes") writer.newline() def fromXML(self, name, attrs, content, ttFont, version=2.0): if name == "version": self.ruleVersion = float(safeEval(attrs.get("ruleVersion", "0"))) if name == "info": getSimple(self, attrs, *attrs_info) elif name == "passindexes": getSimple(self, attrs, *attrs_passindexes) elif name == "contexts": getSimple(self, attrs, *attrs_contexts) elif name == "attributes": getSimple(self, attrs, *attrs_attributes) elif name == "justifications": for element in content: if not isinstance(element, tuple): continue (tag, attrs, subcontent) = element if tag == "justify": j = _Object() for k, v in attrs.items(): setattr(j, k, int(v)) self.jLevels.append(j) elif name == "critFeatures": self.critFeatures = [] element = content_string(content) self.critFeatures.extend(map(int, element.split())) elif name == "scriptTags": self.scriptTags = [] element = content_string(content) for n in element.split(): self.scriptTags.append(n) elif name == "pseudoMap": self.pMap = {} for element in content: if not isinstance(element, tuple): continue (tag, attrs, subcontent) = element if tag == "pseudo": k = int(attrs["unicode"], 16) v = attrs["pseudo"] self.pMap[k] = v elif name == "classes": self.classes = Classes() for element in content: if not isinstance(element, tuple): continue tag, attrs, subcontent = element self.classes.fromXML(tag, attrs, subcontent, ttFont, version) elif name == "passes": for element in content: if not isinstance(element, tuple): continue tag, attrs, subcontent = element if tag == "pass": p = Pass() for e in subcontent: if not isinstance(e, tuple): continue p.fromXML(e[0], e[1], e[2], ttFont, version) self.passes.append(p) class Classes(object): def __init__(self): self.linear = [] self.nonLinear = [] def decompile(self, data, ttFont, version=2.0): sstruct.unpack2(Silf_classmap_format, data, self) if version >= 4.0: oClasses = struct.unpack( (">%dL" % (self.numClass + 1)), data[4 : 8 + 4 * self.numClass] ) else: oClasses = struct.unpack( (">%dH" % (self.numClass + 1)), data[4 : 6 + 2 * self.numClass] ) for s, e in zip(oClasses[: self.numLinear], oClasses[1 : self.numLinear + 1]): self.linear.append( ttFont.getGlyphName(x) for x in struct.unpack((">%dH" % ((e - s) / 2)), data[s:e]) ) for s, e in zip( oClasses[self.numLinear : self.numClass], oClasses[self.numLinear + 1 : self.numClass + 1], ): nonLinids = [ struct.unpack(">HH", data[x : x + 4]) for x in range(s + 8, e, 4) ] nonLin = dict([(ttFont.getGlyphName(x[0]), x[1]) for x in nonLinids]) self.nonLinear.append(nonLin) def compile(self, ttFont, version=2.0): data = b"" oClasses = [] if version >= 4.0: offset = 8 + 4 * (len(self.linear) + len(self.nonLinear)) else: offset = 6 + 2 * (len(self.linear) + len(self.nonLinear)) for l in self.linear: oClasses.append(len(data) + offset) gs = [ttFont.getGlyphID(x) for x in l] data += struct.pack((">%dH" % len(l)), *gs) for l in self.nonLinear: oClasses.append(len(data) + offset) gs = [(ttFont.getGlyphID(x[0]), x[1]) for x in l.items()] data += grUtils.bininfo(len(gs)) data += b"".join([struct.pack(">HH", *x) for x in sorted(gs)]) oClasses.append(len(data) + offset) self.numClass = len(oClasses) - 1 self.numLinear = len(self.linear) return ( sstruct.pack(Silf_classmap_format, self) + struct.pack( ((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)), *oClasses ) + data ) def toXML(self, writer, ttFont, version=2.0): writer.begintag("classes") writer.newline() writer.begintag("linearClasses") writer.newline() for i, l in enumerate(self.linear): writer.begintag("linear", _index=i) writer.newline() wrapline(writer, l) writer.endtag("linear") writer.newline() writer.endtag("linearClasses") writer.newline() writer.begintag("nonLinearClasses") writer.newline() for i, l in enumerate(self.nonLinear): writer.begintag("nonLinear", _index=i + self.numLinear) writer.newline() for inp, ind in l.items(): writer.simpletag("map", glyph=inp, index=ind) writer.newline() writer.endtag("nonLinear") writer.newline() writer.endtag("nonLinearClasses") writer.newline() writer.endtag("classes") writer.newline() def fromXML(self, name, attrs, content, ttFont, version=2.0): if name == "linearClasses": for element in content: if not isinstance(element, tuple): continue tag, attrs, subcontent = element if tag == "linear": l = content_string(subcontent).split() self.linear.append(l) elif name == "nonLinearClasses": for element in content: if not isinstance(element, tuple): continue tag, attrs, subcontent = element if tag == "nonLinear": l = {} for e in subcontent: if not isinstance(e, tuple): continue tag, attrs, subsubcontent = e if tag == "map": l[attrs["glyph"]] = int(safeEval(attrs["index"])) self.nonLinear.append(l) class Pass(object): def __init__(self): self.colMap = {} self.rules = [] self.rulePreContexts = [] self.ruleSortKeys = [] self.ruleConstraints = [] self.passConstraints = b"" self.actions = [] self.stateTrans = [] self.startStates = [] def decompile(self, data, ttFont, version=2.0): _, data = sstruct.unpack2(Silf_pass_format, data, self) (numRange, _, _, _) = struct.unpack(">4H", data[:8]) data = data[8:] for i in range(numRange): (first, last, col) = struct.unpack(">3H", data[6 * i : 6 * i + 6]) for g in range(first, last + 1): self.colMap[ttFont.getGlyphName(g)] = col data = data[6 * numRange :] oRuleMap = struct.unpack_from((">%dH" % (self.numSuccess + 1)), data) data = data[2 + 2 * self.numSuccess :] rules = struct.unpack_from((">%dH" % oRuleMap[-1]), data) self.rules = [rules[s:e] for (s, e) in zip(oRuleMap, oRuleMap[1:])] data = data[2 * oRuleMap[-1] :] (self.minRulePreContext, self.maxRulePreContext) = struct.unpack("BB", data[:2]) numStartStates = self.maxRulePreContext - self.minRulePreContext + 1 self.startStates = struct.unpack( (">%dH" % numStartStates), data[2 : 2 + numStartStates * 2] ) data = data[2 + numStartStates * 2 :] self.ruleSortKeys = struct.unpack( (">%dH" % self.numRules), data[: 2 * self.numRules] ) data = data[2 * self.numRules :] self.rulePreContexts = struct.unpack( ("%dB" % self.numRules), data[: self.numRules] ) data = data[self.numRules :] (self.collisionThreshold, pConstraint) = struct.unpack(">BH", data[:3]) oConstraints = list( struct.unpack( (">%dH" % (self.numRules + 1)), data[3 : 5 + self.numRules * 2] ) ) data = data[5 + self.numRules * 2 :] oActions = list( struct.unpack((">%dH" % (self.numRules + 1)), data[: 2 + self.numRules * 2]) ) data = data[2 * self.numRules + 2 :] for i in range(self.numTransitional): a = array( "H", data[i * self.numColumns * 2 : (i + 1) * self.numColumns * 2] ) if sys.byteorder != "big": a.byteswap() self.stateTrans.append(a) data = data[self.numTransitional * self.numColumns * 2 + 1 :] self.passConstraints = data[:pConstraint] data = data[pConstraint:] for i in range(len(oConstraints) - 2, -1, -1): if oConstraints[i] == 0: oConstraints[i] = oConstraints[i + 1] self.ruleConstraints = [ (data[s:e] if (e - s > 1) else b"") for (s, e) in zip(oConstraints, oConstraints[1:]) ] data = data[oConstraints[-1] :] self.actions = [ (data[s:e] if (e - s > 1) else "") for (s, e) in zip(oActions, oActions[1:]) ] data = data[oActions[-1] :] # not using debug def compile(self, ttFont, base, version=2.0): # build it all up backwards oActions = reduce( lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.actions + [b""], (0, []) )[1] oConstraints = reduce( lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.ruleConstraints + [b""], (1, []), )[1] constraintCode = b"\000" + b"".join(self.ruleConstraints) transes = [] for t in self.stateTrans: if sys.byteorder != "big": t.byteswap() transes.append(t.tobytes()) if sys.byteorder != "big": t.byteswap() if not len(transes): self.startStates = [0] oRuleMap = reduce( lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.rules + [[]], (0, []) )[1] passRanges = [] gidcolmap = dict([(ttFont.getGlyphID(x[0]), x[1]) for x in self.colMap.items()]) for e in grUtils.entries(gidcolmap, sameval=True): if e[1]: passRanges.append((e[0], e[0] + e[1] - 1, e[2][0])) self.numRules = len(self.actions) self.fsmOffset = ( sstruct.calcsize(Silf_pass_format) + 8 + len(passRanges) * 6 + len(oRuleMap) * 2 + 2 * oRuleMap[-1] + 2 + 2 * len(self.startStates) + 3 * self.numRules + 3 + 4 * self.numRules + 4 ) self.pcCode = ( self.fsmOffset + 2 * self.numTransitional * self.numColumns + 1 + base ) self.rcCode = self.pcCode + len(self.passConstraints) self.aCode = self.rcCode + len(constraintCode) self.oDebug = 0 # now generate output data = sstruct.pack(Silf_pass_format, self) data += grUtils.bininfo(len(passRanges), 6) data += b"".join(struct.pack(">3H", *p) for p in passRanges) data += struct.pack((">%dH" % len(oRuleMap)), *oRuleMap) flatrules = reduce(lambda a, x: a + x, self.rules, []) data += struct.pack((">%dH" % oRuleMap[-1]), *flatrules) data += struct.pack("BB", self.minRulePreContext, self.maxRulePreContext) data += struct.pack((">%dH" % len(self.startStates)), *self.startStates) data += struct.pack((">%dH" % self.numRules), *self.ruleSortKeys) data += struct.pack(("%dB" % self.numRules), *self.rulePreContexts) data += struct.pack(">BH", self.collisionThreshold, len(self.passConstraints)) data += struct.pack((">%dH" % (self.numRules + 1)), *oConstraints) data += struct.pack((">%dH" % (self.numRules + 1)), *oActions) return ( data + b"".join(transes) + struct.pack("B", 0) + self.passConstraints + constraintCode + b"".join(self.actions) ) def toXML(self, writer, ttFont, version=2.0): writesimple("info", self, writer, *pass_attrs_info) writesimple("fsminfo", self, writer, *pass_attrs_fsm) writer.begintag("colmap") writer.newline() wrapline( writer, [ "{}={}".format(*x) for x in sorted( self.colMap.items(), key=lambda x: ttFont.getGlyphID(x[0]) ) ], ) writer.endtag("colmap") writer.newline() writer.begintag("staterulemap") writer.newline() for i, r in enumerate(self.rules): writer.simpletag( "state", number=self.numRows - self.numSuccess + i, rules=" ".join(map(str, r)), ) writer.newline() writer.endtag("staterulemap") writer.newline() writer.begintag("rules") writer.newline() for i in range(len(self.actions)): writer.begintag( "rule", index=i, precontext=self.rulePreContexts[i], sortkey=self.ruleSortKeys[i], ) writer.newline() if len(self.ruleConstraints[i]): writecode("constraint", writer, self.ruleConstraints[i]) writecode("action", writer, self.actions[i]) writer.endtag("rule") writer.newline() writer.endtag("rules") writer.newline() if len(self.passConstraints): writecode("passConstraint", writer, self.passConstraints) if len(self.stateTrans): writer.begintag("fsm") writer.newline() writer.begintag("starts") writer.write(" ".join(map(str, self.startStates))) writer.endtag("starts") writer.newline() for i, s in enumerate(self.stateTrans): writer.begintag("row", _i=i) # no newlines here writer.write(" ".join(map(str, s))) writer.endtag("row") writer.newline() writer.endtag("fsm") writer.newline() def fromXML(self, name, attrs, content, ttFont, version=2.0): if name == "info": getSimple(self, attrs, *pass_attrs_info) elif name == "fsminfo": getSimple(self, attrs, *pass_attrs_fsm) elif name == "colmap": e = content_string(content) for w in e.split(): x = w.split("=") if len(x) != 2 or x[0] == "" or x[1] == "": continue self.colMap[x[0]] = int(x[1]) elif name == "staterulemap": for e in content: if not isinstance(e, tuple): continue tag, a, c = e if tag == "state": self.rules.append([int(x) for x in a["rules"].split(" ")]) elif name == "rules": for element in content: if not isinstance(element, tuple): continue tag, a, c = element if tag != "rule": continue self.rulePreContexts.append(int(a["precontext"])) self.ruleSortKeys.append(int(a["sortkey"])) con = b"" act = b"" for e in c: if not isinstance(e, tuple): continue tag, a, subc = e if tag == "constraint": con = readcode(subc) elif tag == "action": act = readcode(subc) self.actions.append(act) self.ruleConstraints.append(con) elif name == "passConstraint": self.passConstraints = readcode(content) elif name == "fsm": for element in content: if not isinstance(element, tuple): continue tag, a, c = element if tag == "row": s = array("H") e = content_string(c) s.extend(map(int, e.split())) self.stateTrans.append(s) elif tag == "starts": s = [] e = content_string(c) s.extend(map(int, e.split())) self.startStates = s PKaZZZo��; "fontTools/ttLib/tables/S__i_l_l.pyfrom fontTools.misc import sstruct from fontTools.misc.fixedTools import floatToFixedToStr from fontTools.misc.textTools import safeEval from . import DefaultTable from . import grUtils import struct Sill_hdr = """ > version: 16.16F """ class table_S__i_l_l(DefaultTable.DefaultTable): def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.langs = {} def decompile(self, data, ttFont): (_, data) = sstruct.unpack2(Sill_hdr, data, self) self.version = float(floatToFixedToStr(self.version, precisionBits=16)) (numLangs,) = struct.unpack(">H", data[:2]) data = data[8:] maxsetting = 0 langinfo = [] for i in range(numLangs): (langcode, numsettings, offset) = struct.unpack( ">4sHH", data[i * 8 : (i + 1) * 8] ) offset = int(offset / 8) - (numLangs + 1) langcode = langcode.replace(b"\000", b"") langinfo.append((langcode.decode("utf-8"), numsettings, offset)) maxsetting = max(maxsetting, offset + numsettings) data = data[numLangs * 8 :] finfo = [] for i in range(maxsetting): (fid, val, _) = struct.unpack(">LHH", data[i * 8 : (i + 1) * 8]) finfo.append((fid, val)) self.langs = {} for c, n, o in langinfo: self.langs[c] = [] for i in range(o, o + n): self.langs[c].append(finfo[i]) def compile(self, ttFont): ldat = b"" fdat = b"" offset = len(self.langs) for c, inf in sorted(self.langs.items()): ldat += struct.pack(">4sHH", c.encode("utf8"), len(inf), 8 * offset + 20) for fid, val in inf: fdat += struct.pack(">LHH", fid, val, 0) offset += len(inf) ldat += struct.pack(">LHH", 0x80808080, 0, 8 * offset + 20) return ( sstruct.pack(Sill_hdr, self) + grUtils.bininfo(len(self.langs)) + ldat + fdat ) def toXML(self, writer, ttFont): writer.simpletag("version", version=self.version) writer.newline() for c, inf in sorted(self.langs.items()): writer.begintag("lang", name=c) writer.newline() for fid, val in inf: writer.simpletag("feature", fid=grUtils.num2tag(fid), val=val) writer.newline() writer.endtag("lang") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "version": self.version = float(safeEval(attrs["version"])) elif name == "lang": c = attrs["name"] self.langs[c] = [] for element in content: if not isinstance(element, tuple): continue tag, a, subcontent = element if tag == "feature": self.langs[c].append( (grUtils.tag2num(a["fid"]), int(safeEval(a["val"]))) ) PKaZZZ\�X�VV"fontTools/ttLib/tables/T_S_I_B_.pyfrom .T_S_I_V_ import table_T_S_I_V_ class table_T_S_I_B_(table_T_S_I_V_): pass PKaZZZ�5?0XX"fontTools/ttLib/tables/T_S_I_C_.pyfrom .otBase import BaseTTXConverter class table_T_S_I_C_(BaseTTXConverter): pass PKaZZZ�]9VV"fontTools/ttLib/tables/T_S_I_D_.pyfrom .T_S_I_V_ import table_T_S_I_V_ class table_T_S_I_D_(table_T_S_I_V_): pass PKaZZZ�H� VV"fontTools/ttLib/tables/T_S_I_J_.pyfrom .T_S_I_V_ import table_T_S_I_V_ class table_T_S_I_J_(table_T_S_I_V_): pass PKaZZZ���VV"fontTools/ttLib/tables/T_S_I_P_.pyfrom .T_S_I_V_ import table_T_S_I_V_ class table_T_S_I_P_(table_T_S_I_V_): pass PKaZZZ}ٗkVV"fontTools/ttLib/tables/T_S_I_S_.pyfrom .T_S_I_V_ import table_T_S_I_V_ class table_T_S_I_S_(table_T_S_I_V_): pass PKaZZZ˷:ď�"fontTools/ttLib/tables/T_S_I_V_.pyfrom fontTools.misc.textTools import strjoin, tobytes, tostr from . import asciiTable class table_T_S_I_V_(asciiTable.asciiTable): def toXML(self, writer, ttFont): data = tostr(self.data) # removing null bytes. XXX needed?? data = data.split("\0") data = strjoin(data) writer.begintag("source") writer.newline() writer.write_noindent(data.replace("\r", "\n")) writer.newline() writer.endtag("source") writer.newline() def fromXML(self, name, attrs, content, ttFont): lines = strjoin(content).split("\n") self.data = tobytes("\r".join(lines[1:-1])) PKaZZZs��[��"fontTools/ttLib/tables/T_S_I__0.py""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) tool to store its hinting source data. TSI0 is the index table containing the lengths and offsets for the glyph programs and 'extra' programs ('fpgm', 'prep', and 'cvt') that are contained in the TSI1 table. """ from . import DefaultTable import struct tsi0Format = ">HHL" def fixlongs(glyphID, textLength, textOffset): return int(glyphID), int(textLength), textOffset class table_T_S_I__0(DefaultTable.DefaultTable): dependencies = ["TSI1"] def decompile(self, data, ttFont): numGlyphs = ttFont["maxp"].numGlyphs indices = [] size = struct.calcsize(tsi0Format) for i in range(numGlyphs + 5): glyphID, textLength, textOffset = fixlongs( *struct.unpack(tsi0Format, data[:size]) ) indices.append((glyphID, textLength, textOffset)) data = data[size:] assert len(data) == 0 assert indices[-5] == (0xFFFE, 0, 0xABFC1F34), "bad magic number" self.indices = indices[:-5] self.extra_indices = indices[-4:] def compile(self, ttFont): if not hasattr(self, "indices"): # We have no corresponding table (TSI1 or TSI3); let's return # no data, which effectively means "ignore us". return b"" data = b"" for index, textLength, textOffset in self.indices: data = data + struct.pack(tsi0Format, index, textLength, textOffset) data = data + struct.pack(tsi0Format, 0xFFFE, 0, 0xABFC1F34) for index, textLength, textOffset in self.extra_indices: data = data + struct.pack(tsi0Format, index, textLength, textOffset) return data def set(self, indices, extra_indices): # gets called by 'TSI1' or 'TSI3' self.indices = indices self.extra_indices = extra_indices def toXML(self, writer, ttFont): writer.comment("This table will be calculated by the compiler") writer.newline() PKaZZZ�Z�6FF"fontTools/ttLib/tables/T_S_I__1.py""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) tool to store its hinting source data. TSI1 contains the text of the glyph programs in the form of low-level assembly code, as well as the 'extra' programs 'fpgm', 'ppgm' (i.e. 'prep'), and 'cvt'. """ from . import DefaultTable from fontTools.misc.loggingTools import LogMixin from fontTools.misc.textTools import strjoin, tobytes, tostr class table_T_S_I__1(LogMixin, DefaultTable.DefaultTable): extras = {0xFFFA: "ppgm", 0xFFFB: "cvt", 0xFFFC: "reserved", 0xFFFD: "fpgm"} indextable = "TSI0" def decompile(self, data, ttFont): totalLength = len(data) indextable = ttFont[self.indextable] for indices, isExtra in zip( (indextable.indices, indextable.extra_indices), (False, True) ): programs = {} for i, (glyphID, textLength, textOffset) in enumerate(indices): if isExtra: name = self.extras[glyphID] else: name = ttFont.getGlyphName(glyphID) if textOffset > totalLength: self.log.warning("textOffset > totalLength; %r skipped" % name) continue if textLength < 0x8000: # If the length stored in the record is less than 32768, then use # that as the length of the record. pass elif textLength == 0x8000: # If the length is 32768, compute the actual length as follows: isLast = i == (len(indices) - 1) if isLast: if isExtra: # For the last "extra" record (the very last record of the # table), the length is the difference between the total # length of the TSI1 table and the textOffset of the final # record. nextTextOffset = totalLength else: # For the last "normal" record (the last record just prior # to the record containing the "magic number"), the length # is the difference between the textOffset of the record # following the "magic number" (0xFFFE) record (i.e. the # first "extra" record), and the textOffset of the last # "normal" record. nextTextOffset = indextable.extra_indices[0][2] else: # For all other records with a length of 0x8000, the length is # the difference between the textOffset of the record in # question and the textOffset of the next record. nextTextOffset = indices[i + 1][2] assert nextTextOffset >= textOffset, "entries not sorted by offset" if nextTextOffset > totalLength: self.log.warning( "nextTextOffset > totalLength; %r truncated" % name ) nextTextOffset = totalLength textLength = nextTextOffset - textOffset else: from fontTools import ttLib raise ttLib.TTLibError( "%r textLength (%d) must not be > 32768" % (name, textLength) ) text = data[textOffset : textOffset + textLength] assert len(text) == textLength text = tostr(text, encoding="utf-8") if text: programs[name] = text if isExtra: self.extraPrograms = programs else: self.glyphPrograms = programs def compile(self, ttFont): if not hasattr(self, "glyphPrograms"): self.glyphPrograms = {} self.extraPrograms = {} data = b"" indextable = ttFont[self.indextable] glyphNames = ttFont.getGlyphOrder() indices = [] for i in range(len(glyphNames)): if len(data) % 2: data = ( data + b"\015" ) # align on 2-byte boundaries, fill with return chars. Yum. name = glyphNames[i] if name in self.glyphPrograms: text = tobytes(self.glyphPrograms[name], encoding="utf-8") else: text = b"" textLength = len(text) if textLength >= 0x8000: textLength = 0x8000 indices.append((i, textLength, len(data))) data = data + text extra_indices = [] codes = sorted(self.extras.items()) for i in range(len(codes)): if len(data) % 2: data = ( data + b"\015" ) # align on 2-byte boundaries, fill with return chars. code, name = codes[i] if name in self.extraPrograms: text = tobytes(self.extraPrograms[name], encoding="utf-8") else: text = b"" textLength = len(text) if textLength >= 0x8000: textLength = 0x8000 extra_indices.append((code, textLength, len(data))) data = data + text indextable.set(indices, extra_indices) return data def toXML(self, writer, ttFont): names = sorted(self.glyphPrograms.keys()) writer.newline() for name in names: text = self.glyphPrograms[name] if not text: continue writer.begintag("glyphProgram", name=name) writer.newline() writer.write_noindent(text.replace("\r", "\n")) writer.newline() writer.endtag("glyphProgram") writer.newline() writer.newline() extra_names = sorted(self.extraPrograms.keys()) for name in extra_names: text = self.extraPrograms[name] if not text: continue writer.begintag("extraProgram", name=name) writer.newline() writer.write_noindent(text.replace("\r", "\n")) writer.newline() writer.endtag("extraProgram") writer.newline() writer.newline() def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "glyphPrograms"): self.glyphPrograms = {} self.extraPrograms = {} lines = strjoin(content).replace("\r", "\n").split("\n") text = "\r".join(lines[1:-1]) if name == "glyphProgram": self.glyphPrograms[attrs["name"]] = text elif name == "extraProgram": self.extraPrograms[attrs["name"]] = text PKaZZZ�����"fontTools/ttLib/tables/T_S_I__2.py""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) tool to store its hinting source data. TSI2 is the index table containing the lengths and offsets for the glyph programs that are contained in the TSI3 table. It uses the same format as the TSI0 table. """ from fontTools import ttLib superclass = ttLib.getTableClass("TSI0") class table_T_S_I__2(superclass): dependencies = ["TSI3"] PKaZZZi�����"fontTools/ttLib/tables/T_S_I__3.py""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) tool to store its hinting source data. TSI3 contains the text of the glyph programs in the form of 'VTTTalk' code. """ from fontTools import ttLib superclass = ttLib.getTableClass("TSI1") class table_T_S_I__3(superclass): extras = { 0xFFFA: "reserved0", 0xFFFB: "reserved1", 0xFFFC: "reserved2", 0xFFFD: "reserved3", } indextable = "TSI2" PKaZZZ������"fontTools/ttLib/tables/T_S_I__5.py""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) tool to store its hinting source data. TSI5 contains the VTT character groups. """ from fontTools.misc.textTools import safeEval from . import DefaultTable import sys import array class table_T_S_I__5(DefaultTable.DefaultTable): def decompile(self, data, ttFont): numGlyphs = ttFont["maxp"].numGlyphs assert len(data) == 2 * numGlyphs a = array.array("H") a.frombytes(data) if sys.byteorder != "big": a.byteswap() self.glyphGrouping = {} for i in range(numGlyphs): self.glyphGrouping[ttFont.getGlyphName(i)] = a[i] def compile(self, ttFont): glyphNames = ttFont.getGlyphOrder() a = array.array("H") for i in range(len(glyphNames)): a.append(self.glyphGrouping.get(glyphNames[i], 0)) if sys.byteorder != "big": a.byteswap() return a.tobytes() def toXML(self, writer, ttFont): names = sorted(self.glyphGrouping.keys()) for glyphName in names: writer.simpletag( "glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName] ) writer.newline() def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "glyphGrouping"): self.glyphGrouping = {} if name != "glyphgroup": return self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"]) PKaZZZ'�(QQ"fontTools/ttLib/tables/T_T_F_A_.pyfrom . import asciiTable class table_T_T_F_A_(asciiTable.asciiTable): pass PKaZZZ�d� CsCs(fontTools/ttLib/tables/TupleVariation.pyfrom fontTools.misc.fixedTools import ( fixedToFloat as fi2fl, floatToFixed as fl2fi, floatToFixedToStr as fl2str, strToFixedToFloat as str2fl, otRound, ) from fontTools.misc.textTools import safeEval import array from collections import Counter, defaultdict import io import logging import struct import sys # https://www.microsoft.com/typography/otspec/otvarcommonformats.htm EMBEDDED_PEAK_TUPLE = 0x8000 INTERMEDIATE_REGION = 0x4000 PRIVATE_POINT_NUMBERS = 0x2000 DELTAS_ARE_ZERO = 0x80 DELTAS_ARE_WORDS = 0x40 DELTA_RUN_COUNT_MASK = 0x3F POINTS_ARE_WORDS = 0x80 POINT_RUN_COUNT_MASK = 0x7F TUPLES_SHARE_POINT_NUMBERS = 0x8000 TUPLE_COUNT_MASK = 0x0FFF TUPLE_INDEX_MASK = 0x0FFF log = logging.getLogger(__name__) class TupleVariation(object): def __init__(self, axes, coordinates): self.axes = axes.copy() self.coordinates = list(coordinates) def __repr__(self): axes = ",".join( sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()]) ) return "<TupleVariation %s %s>" % (axes, self.coordinates) def __eq__(self, other): return self.coordinates == other.coordinates and self.axes == other.axes def getUsedPoints(self): # Empty set means "all points used". if None not in self.coordinates: return frozenset() used = frozenset([i for i, p in enumerate(self.coordinates) if p is not None]) # Return None if no points used. return used if used else None def hasImpact(self): """Returns True if this TupleVariation has any visible impact. If the result is False, the TupleVariation can be omitted from the font without making any visible difference. """ return any(c is not None for c in self.coordinates) def toXML(self, writer, axisTags): writer.begintag("tuple") writer.newline() for axis in axisTags: value = self.axes.get(axis) if value is not None: minValue, value, maxValue = value defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 if minValue == defaultMinValue and maxValue == defaultMaxValue: writer.simpletag("coord", axis=axis, value=fl2str(value, 14)) else: attrs = [ ("axis", axis), ("min", fl2str(minValue, 14)), ("value", fl2str(value, 14)), ("max", fl2str(maxValue, 14)), ] writer.simpletag("coord", attrs) writer.newline() wrote_any_deltas = False for i, delta in enumerate(self.coordinates): if type(delta) == tuple and len(delta) == 2: writer.simpletag("delta", pt=i, x=delta[0], y=delta[1]) writer.newline() wrote_any_deltas = True elif type(delta) == int: writer.simpletag("delta", cvt=i, value=delta) writer.newline() wrote_any_deltas = True elif delta is not None: log.error("bad delta format") writer.comment("bad delta #%d" % i) writer.newline() wrote_any_deltas = True if not wrote_any_deltas: writer.comment("no deltas") writer.newline() writer.endtag("tuple") writer.newline() def fromXML(self, name, attrs, _content): if name == "coord": axis = attrs["axis"] value = str2fl(attrs["value"], 14) defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 minValue = str2fl(attrs.get("min", defaultMinValue), 14) maxValue = str2fl(attrs.get("max", defaultMaxValue), 14) self.axes[axis] = (minValue, value, maxValue) elif name == "delta": if "pt" in attrs: point = safeEval(attrs["pt"]) x = safeEval(attrs["x"]) y = safeEval(attrs["y"]) self.coordinates[point] = (x, y) elif "cvt" in attrs: cvt = safeEval(attrs["cvt"]) value = safeEval(attrs["value"]) self.coordinates[cvt] = value else: log.warning("bad delta format: %s" % ", ".join(sorted(attrs.keys()))) def compile(self, axisTags, sharedCoordIndices={}, pointData=None): assert set(self.axes.keys()) <= set(axisTags), ( "Unknown axis tag found.", self.axes.keys(), axisTags, ) tupleData = [] auxData = [] if pointData is None: usedPoints = self.getUsedPoints() if usedPoints is None: # Nothing to encode return b"", b"" pointData = self.compilePoints(usedPoints) coord = self.compileCoord(axisTags) flags = sharedCoordIndices.get(coord) if flags is None: flags = EMBEDDED_PEAK_TUPLE tupleData.append(coord) intermediateCoord = self.compileIntermediateCoord(axisTags) if intermediateCoord is not None: flags |= INTERMEDIATE_REGION tupleData.append(intermediateCoord) # pointData of b'' implies "use shared points". if pointData: flags |= PRIVATE_POINT_NUMBERS auxData.append(pointData) auxData.append(self.compileDeltas()) auxData = b"".join(auxData) tupleData.insert(0, struct.pack(">HH", len(auxData), flags)) return b"".join(tupleData), auxData def compileCoord(self, axisTags): result = [] axes = self.axes for axis in axisTags: triple = axes.get(axis) if triple is None: result.append(b"\0\0") else: result.append(struct.pack(">h", fl2fi(triple[1], 14))) return b"".join(result) def compileIntermediateCoord(self, axisTags): needed = False for axis in axisTags: minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 if (minValue != defaultMinValue) or (maxValue != defaultMaxValue): needed = True break if not needed: return None minCoords = [] maxCoords = [] for axis in axisTags: minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) minCoords.append(struct.pack(">h", fl2fi(minValue, 14))) maxCoords.append(struct.pack(">h", fl2fi(maxValue, 14))) return b"".join(minCoords + maxCoords) @staticmethod def decompileCoord_(axisTags, data, offset): coord = {} pos = offset for axis in axisTags: coord[axis] = fi2fl(struct.unpack(">h", data[pos : pos + 2])[0], 14) pos += 2 return coord, pos @staticmethod def compilePoints(points): # If the set consists of all points in the glyph, it gets encoded with # a special encoding: a single zero byte. # # To use this optimization, points passed in must be empty set. # The following two lines are not strictly necessary as the main code # below would emit the same. But this is most common and faster. if not points: return b"\0" # In the 'gvar' table, the packing of point numbers is a little surprising. # It consists of multiple runs, each being a delta-encoded list of integers. # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1. # There are two types of runs, with values being either 8 or 16 bit unsigned # integers. points = list(points) points.sort() numPoints = len(points) result = bytearray() # The binary representation starts with the total number of points in the set, # encoded into one or two bytes depending on the value. if numPoints < 0x80: result.append(numPoints) else: result.append((numPoints >> 8) | 0x80) result.append(numPoints & 0xFF) MAX_RUN_LENGTH = 127 pos = 0 lastValue = 0 while pos < numPoints: runLength = 0 headerPos = len(result) result.append(0) useByteEncoding = None while pos < numPoints and runLength <= MAX_RUN_LENGTH: curValue = points[pos] delta = curValue - lastValue if useByteEncoding is None: useByteEncoding = 0 <= delta <= 0xFF if useByteEncoding and (delta > 0xFF or delta < 0): # we need to start a new run (which will not use byte encoding) break # TODO This never switches back to a byte-encoding from a short-encoding. # That's suboptimal. if useByteEncoding: result.append(delta) else: result.append(delta >> 8) result.append(delta & 0xFF) lastValue = curValue pos += 1 runLength += 1 if useByteEncoding: result[headerPos] = runLength - 1 else: result[headerPos] = (runLength - 1) | POINTS_ARE_WORDS return result @staticmethod def decompilePoints_(numPoints, data, offset, tableTag): """(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)""" assert tableTag in ("cvar", "gvar") pos = offset numPointsInData = data[pos] pos += 1 if (numPointsInData & POINTS_ARE_WORDS) != 0: numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | data[pos] pos += 1 if numPointsInData == 0: return (range(numPoints), pos) result = [] while len(result) < numPointsInData: runHeader = data[pos] pos += 1 numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1 point = 0 if (runHeader & POINTS_ARE_WORDS) != 0: points = array.array("H") pointsSize = numPointsInRun * 2 else: points = array.array("B") pointsSize = numPointsInRun points.frombytes(data[pos : pos + pointsSize]) if sys.byteorder != "big": points.byteswap() assert len(points) == numPointsInRun pos += pointsSize result.extend(points) # Convert relative to absolute absolute = [] current = 0 for delta in result: current += delta absolute.append(current) result = absolute del absolute badPoints = {str(p) for p in result if p < 0 or p >= numPoints} if badPoints: log.warning( "point %s out of range in '%s' table" % (",".join(sorted(badPoints)), tableTag) ) return (result, pos) def compileDeltas(self): deltaX = [] deltaY = [] if self.getCoordWidth() == 2: for c in self.coordinates: if c is None: continue deltaX.append(c[0]) deltaY.append(c[1]) else: for c in self.coordinates: if c is None: continue deltaX.append(c) bytearr = bytearray() self.compileDeltaValues_(deltaX, bytearr) self.compileDeltaValues_(deltaY, bytearr) return bytearr @staticmethod def compileDeltaValues_(deltas, bytearr=None): """[value1, value2, value3, ...] --> bytearray Emits a sequence of runs. Each run starts with a byte-sized header whose 6 least significant bits (header & 0x3F) indicate how many values are encoded in this run. The stored length is the actual length minus one; run lengths are thus in the range [1..64]. If the header byte has its most significant bit (0x80) set, all values in this run are zero, and no data follows. Otherwise, the header byte is followed by ((header & 0x3F) + 1) signed values. If (header & 0x40) is clear, the delta values are stored as signed bytes; if (header & 0x40) is set, the delta values are signed 16-bit integers. """ # Explaining the format because the 'gvar' spec is hard to understand. if bytearr is None: bytearr = bytearray() pos = 0 numDeltas = len(deltas) while pos < numDeltas: value = deltas[pos] if value == 0: pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr) elif -128 <= value <= 127: pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, bytearr) else: pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, bytearr) return bytearr @staticmethod def encodeDeltaRunAsZeroes_(deltas, offset, bytearr): pos = offset numDeltas = len(deltas) while pos < numDeltas and deltas[pos] == 0: pos += 1 runLength = pos - offset while runLength >= 64: bytearr.append(DELTAS_ARE_ZERO | 63) runLength -= 64 if runLength: bytearr.append(DELTAS_ARE_ZERO | (runLength - 1)) return pos @staticmethod def encodeDeltaRunAsBytes_(deltas, offset, bytearr): pos = offset numDeltas = len(deltas) while pos < numDeltas: value = deltas[pos] if not (-128 <= value <= 127): break # Within a byte-encoded run of deltas, a single zero # is best stored literally as 0x00 value. However, # if are two or more zeroes in a sequence, it is # better to start a new run. For example, the sequence # of deltas [15, 15, 0, 15, 15] becomes 6 bytes # (04 0F 0F 00 0F 0F) when storing the zero value # literally, but 7 bytes (01 0F 0F 80 01 0F 0F) # when starting a new run. if value == 0 and pos + 1 < numDeltas and deltas[pos + 1] == 0: break pos += 1 runLength = pos - offset while runLength >= 64: bytearr.append(63) bytearr.extend(array.array("b", deltas[offset : offset + 64])) offset += 64 runLength -= 64 if runLength: bytearr.append(runLength - 1) bytearr.extend(array.array("b", deltas[offset:pos])) return pos @staticmethod def encodeDeltaRunAsWords_(deltas, offset, bytearr): pos = offset numDeltas = len(deltas) while pos < numDeltas: value = deltas[pos] # Within a word-encoded run of deltas, it is easiest # to start a new run (with a different encoding) # whenever we encounter a zero value. For example, # the sequence [0x6666, 0, 0x7777] needs 7 bytes when # storing the zero literally (42 66 66 00 00 77 77), # and equally 7 bytes when starting a new run # (40 66 66 80 40 77 77). if value == 0: break # Within a word-encoded run of deltas, a single value # in the range (-128..127) should be encoded literally # because it is more compact. For example, the sequence # [0x6666, 2, 0x7777] becomes 7 bytes when storing # the value literally (42 66 66 00 02 77 77), but 8 bytes # when starting a new run (40 66 66 00 02 40 77 77). if ( (-128 <= value <= 127) and pos + 1 < numDeltas and (-128 <= deltas[pos + 1] <= 127) ): break pos += 1 runLength = pos - offset while runLength >= 64: bytearr.append(DELTAS_ARE_WORDS | 63) a = array.array("h", deltas[offset : offset + 64]) if sys.byteorder != "big": a.byteswap() bytearr.extend(a) offset += 64 runLength -= 64 if runLength: bytearr.append(DELTAS_ARE_WORDS | (runLength - 1)) a = array.array("h", deltas[offset:pos]) if sys.byteorder != "big": a.byteswap() bytearr.extend(a) return pos @staticmethod def decompileDeltas_(numDeltas, data, offset): """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)""" result = [] pos = offset while len(result) < numDeltas: runHeader = data[pos] pos += 1 numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1 if (runHeader & DELTAS_ARE_ZERO) != 0: result.extend([0] * numDeltasInRun) else: if (runHeader & DELTAS_ARE_WORDS) != 0: deltas = array.array("h") deltasSize = numDeltasInRun * 2 else: deltas = array.array("b") deltasSize = numDeltasInRun deltas.frombytes(data[pos : pos + deltasSize]) if sys.byteorder != "big": deltas.byteswap() assert len(deltas) == numDeltasInRun pos += deltasSize result.extend(deltas) assert len(result) == numDeltas return (result, pos) @staticmethod def getTupleSize_(flags, axisCount): size = 4 if (flags & EMBEDDED_PEAK_TUPLE) != 0: size += axisCount * 2 if (flags & INTERMEDIATE_REGION) != 0: size += axisCount * 4 return size def getCoordWidth(self): """Return 2 if coordinates are (x, y) as in gvar, 1 if single values as in cvar, or 0 if empty. """ firstDelta = next((c for c in self.coordinates if c is not None), None) if firstDelta is None: return 0 # empty or has no impact if type(firstDelta) in (int, float): return 1 if type(firstDelta) is tuple and len(firstDelta) == 2: return 2 raise TypeError( "invalid type of delta; expected (int or float) number, or " "Tuple[number, number]: %r" % firstDelta ) def scaleDeltas(self, scalar): if scalar == 1.0: return # no change coordWidth = self.getCoordWidth() self.coordinates = [ ( None if d is None else d * scalar if coordWidth == 1 else (d[0] * scalar, d[1] * scalar) ) for d in self.coordinates ] def roundDeltas(self): coordWidth = self.getCoordWidth() self.coordinates = [ ( None if d is None else otRound(d) if coordWidth == 1 else (otRound(d[0]), otRound(d[1])) ) for d in self.coordinates ] def calcInferredDeltas(self, origCoords, endPts): from fontTools.varLib.iup import iup_delta if self.getCoordWidth() == 1: raise TypeError("Only 'gvar' TupleVariation can have inferred deltas") if None in self.coordinates: if len(self.coordinates) != len(origCoords): raise ValueError( "Expected len(origCoords) == %d; found %d" % (len(self.coordinates), len(origCoords)) ) self.coordinates = iup_delta(self.coordinates, origCoords, endPts) def optimize(self, origCoords, endPts, tolerance=0.5, isComposite=False): from fontTools.varLib.iup import iup_delta_optimize if None in self.coordinates: return # already optimized deltaOpt = iup_delta_optimize( self.coordinates, origCoords, endPts, tolerance=tolerance ) if None in deltaOpt: if isComposite and all(d is None for d in deltaOpt): # Fix for macOS composites # https://github.com/fonttools/fonttools/issues/1381 deltaOpt = [(0, 0)] + [None] * (len(deltaOpt) - 1) # Use "optimized" version only if smaller... varOpt = TupleVariation(self.axes, deltaOpt) # Shouldn't matter that this is different from fvar...? axisTags = sorted(self.axes.keys()) tupleData, auxData = self.compile(axisTags) unoptimizedLength = len(tupleData) + len(auxData) tupleData, auxData = varOpt.compile(axisTags) optimizedLength = len(tupleData) + len(auxData) if optimizedLength < unoptimizedLength: self.coordinates = varOpt.coordinates def __imul__(self, scalar): self.scaleDeltas(scalar) return self def __iadd__(self, other): if not isinstance(other, TupleVariation): return NotImplemented deltas1 = self.coordinates length = len(deltas1) deltas2 = other.coordinates if len(deltas2) != length: raise ValueError("cannot sum TupleVariation deltas with different lengths") # 'None' values have different meanings in gvar vs cvar TupleVariations: # within the gvar, when deltas are not provided explicitly for some points, # they need to be inferred; whereas for the 'cvar' table, if deltas are not # provided for some CVT values, then no adjustments are made (i.e. None == 0). # Thus, we cannot sum deltas for gvar TupleVariations if they contain # inferred inferred deltas (the latter need to be computed first using # 'calcInferredDeltas' method), but we can treat 'None' values in cvar # deltas as if they are zeros. if self.getCoordWidth() == 2: for i, d2 in zip(range(length), deltas2): d1 = deltas1[i] try: deltas1[i] = (d1[0] + d2[0], d1[1] + d2[1]) except TypeError: raise ValueError("cannot sum gvar deltas with inferred points") else: for i, d2 in zip(range(length), deltas2): d1 = deltas1[i] if d1 is not None and d2 is not None: deltas1[i] = d1 + d2 elif d1 is None and d2 is not None: deltas1[i] = d2 # elif d2 is None do nothing return self def decompileSharedTuples(axisTags, sharedTupleCount, data, offset): result = [] for _ in range(sharedTupleCount): t, offset = TupleVariation.decompileCoord_(axisTags, data, offset) result.append(t) return result def compileSharedTuples( axisTags, variations, MAX_NUM_SHARED_COORDS=TUPLE_INDEX_MASK + 1 ): coordCount = Counter() for var in variations: coord = var.compileCoord(axisTags) coordCount[coord] += 1 # In python < 3.7, most_common() ordering is non-deterministic # so apply a sort to make sure the ordering is consistent. sharedCoords = sorted( coordCount.most_common(MAX_NUM_SHARED_COORDS), key=lambda item: (-item[1], item[0]), ) return [c[0] for c in sharedCoords if c[1] > 1] def compileTupleVariationStore( variations, pointCount, axisTags, sharedTupleIndices, useSharedPoints=True ): # pointCount is actually unused. Keeping for API compat. del pointCount newVariations = [] pointDatas = [] # Compile all points and figure out sharing if desired sharedPoints = None # Collect, count, and compile point-sets for all variation sets pointSetCount = defaultdict(int) for v in variations: points = v.getUsedPoints() if points is None: # Empty variations continue pointSetCount[points] += 1 newVariations.append(v) pointDatas.append(points) variations = newVariations del newVariations if not variations: return (0, b"", b"") n = len(variations[0].coordinates) assert all( len(v.coordinates) == n for v in variations ), "Variation sets have different sizes" compiledPoints = { pointSet: TupleVariation.compilePoints(pointSet) for pointSet in pointSetCount } tupleVariationCount = len(variations) tuples = [] data = [] if useSharedPoints: # Find point-set which saves most bytes. def key(pn): pointSet = pn[0] count = pn[1] return len(compiledPoints[pointSet]) * (count - 1) sharedPoints = max(pointSetCount.items(), key=key)[0] data.append(compiledPoints[sharedPoints]) tupleVariationCount |= TUPLES_SHARE_POINT_NUMBERS # b'' implies "use shared points" pointDatas = [ compiledPoints[points] if points != sharedPoints else b"" for points in pointDatas ] for v, p in zip(variations, pointDatas): thisTuple, thisData = v.compile(axisTags, sharedTupleIndices, pointData=p) tuples.append(thisTuple) data.append(thisData) tuples = b"".join(tuples) data = b"".join(data) return tupleVariationCount, tuples, data def decompileTupleVariationStore( tableTag, axisTags, tupleVariationCount, pointCount, sharedTuples, data, pos, dataPos, ): numAxes = len(axisTags) result = [] if (tupleVariationCount & TUPLES_SHARE_POINT_NUMBERS) != 0: sharedPoints, dataPos = TupleVariation.decompilePoints_( pointCount, data, dataPos, tableTag ) else: sharedPoints = [] for _ in range(tupleVariationCount & TUPLE_COUNT_MASK): dataSize, flags = struct.unpack(">HH", data[pos : pos + 4]) tupleSize = TupleVariation.getTupleSize_(flags, numAxes) tupleData = data[pos : pos + tupleSize] pointDeltaData = data[dataPos : dataPos + dataSize] result.append( decompileTupleVariation_( pointCount, sharedTuples, sharedPoints, tableTag, axisTags, tupleData, pointDeltaData, ) ) pos += tupleSize dataPos += dataSize return result def decompileTupleVariation_( pointCount, sharedTuples, sharedPoints, tableTag, axisTags, data, tupleData ): assert tableTag in ("cvar", "gvar"), tableTag flags = struct.unpack(">H", data[2:4])[0] pos = 4 if (flags & EMBEDDED_PEAK_TUPLE) == 0: peak = sharedTuples[flags & TUPLE_INDEX_MASK] else: peak, pos = TupleVariation.decompileCoord_(axisTags, data, pos) if (flags & INTERMEDIATE_REGION) != 0: start, pos = TupleVariation.decompileCoord_(axisTags, data, pos) end, pos = TupleVariation.decompileCoord_(axisTags, data, pos) else: start, end = inferRegion_(peak) axes = {} for axis in axisTags: region = start[axis], peak[axis], end[axis] if region != (0.0, 0.0, 0.0): axes[axis] = region pos = 0 if (flags & PRIVATE_POINT_NUMBERS) != 0: points, pos = TupleVariation.decompilePoints_( pointCount, tupleData, pos, tableTag ) else: points = sharedPoints deltas = [None] * pointCount if tableTag == "cvar": deltas_cvt, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos) for p, delta in zip(points, deltas_cvt): if 0 <= p < pointCount: deltas[p] = delta elif tableTag == "gvar": deltas_x, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos) deltas_y, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos) for p, x, y in zip(points, deltas_x, deltas_y): if 0 <= p < pointCount: deltas[p] = (x, y) return TupleVariation(axes, deltas) def inferRegion_(peak): """Infer start and end for a (non-intermediate) region This helper function computes the applicability region for variation tuples whose INTERMEDIATE_REGION flag is not set in the TupleVariationHeader structure. Variation tuples apply only to certain regions of the variation space; outside that region, the tuple has no effect. To make the binary encoding more compact, TupleVariationHeaders can omit the intermediateStartTuple and intermediateEndTuple fields. """ start, end = {}, {} for axis, value in peak.items(): start[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 end[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 return (start, end) PKaZZZ����'�'"fontTools/ttLib/tables/V_D_M_X_.pyfrom . import DefaultTable from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval import struct VDMX_HeaderFmt = """ > # big endian version: H # Version number (0 or 1) numRecs: H # Number of VDMX groups present numRatios: H # Number of aspect ratio groupings """ # the VMDX header is followed by an array of RatRange[numRatios] (i.e. aspect # ratio ranges); VDMX_RatRangeFmt = """ > # big endian bCharSet: B # Character set xRatio: B # Value to use for x-Ratio yStartRatio: B # Starting y-Ratio value yEndRatio: B # Ending y-Ratio value """ # followed by an array of offset[numRatios] from start of VDMX table to the # VDMX Group for this ratio range (offsets will be re-calculated on compile); # followed by an array of Group[numRecs] records; VDMX_GroupFmt = """ > # big endian recs: H # Number of height records in this group startsz: B # Starting yPelHeight endsz: B # Ending yPelHeight """ # followed by an array of vTable[recs] records. VDMX_vTableFmt = """ > # big endian yPelHeight: H # yPelHeight to which values apply yMax: h # Maximum value (in pels) for this yPelHeight yMin: h # Minimum value (in pels) for this yPelHeight """ class table_V_D_M_X_(DefaultTable.DefaultTable): def decompile(self, data, ttFont): pos = 0 # track current position from to start of VDMX table dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self) pos += sstruct.calcsize(VDMX_HeaderFmt) self.ratRanges = [] for i in range(self.numRatios): ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data) pos += sstruct.calcsize(VDMX_RatRangeFmt) # the mapping between a ratio and a group is defined further below ratio["groupIndex"] = None self.ratRanges.append(ratio) lenOffset = struct.calcsize(">H") _offsets = [] # temporarily store offsets to groups for i in range(self.numRatios): offset = struct.unpack(">H", data[0:lenOffset])[0] data = data[lenOffset:] pos += lenOffset _offsets.append(offset) self.groups = [] for groupIndex in range(self.numRecs): # the offset to this group from beginning of the VDMX table currOffset = pos group, data = sstruct.unpack2(VDMX_GroupFmt, data) # the group lenght and bounding sizes are re-calculated on compile recs = group.pop("recs") startsz = group.pop("startsz") endsz = group.pop("endsz") pos += sstruct.calcsize(VDMX_GroupFmt) for j in range(recs): vTable, data = sstruct.unpack2(VDMX_vTableFmt, data) vTableLength = sstruct.calcsize(VDMX_vTableFmt) pos += vTableLength # group is a dict of (yMax, yMin) tuples keyed by yPelHeight group[vTable["yPelHeight"]] = (vTable["yMax"], vTable["yMin"]) # make sure startsz and endsz match the calculated values minSize = min(group.keys()) maxSize = max(group.keys()) assert ( startsz == minSize ), "startsz (%s) must equal min yPelHeight (%s): group %d" % ( group.startsz, minSize, groupIndex, ) assert ( endsz == maxSize ), "endsz (%s) must equal max yPelHeight (%s): group %d" % ( group.endsz, maxSize, groupIndex, ) self.groups.append(group) # match the defined offsets with the current group's offset for offsetIndex, offsetValue in enumerate(_offsets): # when numRecs < numRatios there can more than one ratio range # sharing the same VDMX group if currOffset == offsetValue: # map the group with the ratio range thas has the same # index as the offset to that group (it took me a while..) self.ratRanges[offsetIndex]["groupIndex"] = groupIndex # check that all ratio ranges have a group for i in range(self.numRatios): ratio = self.ratRanges[i] if ratio["groupIndex"] is None: from fontTools import ttLib raise ttLib.TTLibError("no group defined for ratRange %d" % i) def _getOffsets(self): """ Calculate offsets to VDMX_Group records. For each ratRange return a list of offset values from the beginning of the VDMX table to a VDMX_Group. """ lenHeader = sstruct.calcsize(VDMX_HeaderFmt) lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt) lenOffset = struct.calcsize(">H") lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt) lenVTable = sstruct.calcsize(VDMX_vTableFmt) # offset to the first group pos = lenHeader + self.numRatios * lenRatRange + self.numRatios * lenOffset groupOffsets = [] for group in self.groups: groupOffsets.append(pos) lenGroup = lenGroupHeader + len(group) * lenVTable pos += lenGroup # offset to next group offsets = [] for ratio in self.ratRanges: groupIndex = ratio["groupIndex"] offsets.append(groupOffsets[groupIndex]) return offsets def compile(self, ttFont): if not (self.version == 0 or self.version == 1): from fontTools import ttLib raise ttLib.TTLibError( "unknown format for VDMX table: version %s" % self.version ) data = sstruct.pack(VDMX_HeaderFmt, self) for ratio in self.ratRanges: data += sstruct.pack(VDMX_RatRangeFmt, ratio) # recalculate offsets to VDMX groups for offset in self._getOffsets(): data += struct.pack(">H", offset) for group in self.groups: recs = len(group) startsz = min(group.keys()) endsz = max(group.keys()) gHeader = {"recs": recs, "startsz": startsz, "endsz": endsz} data += sstruct.pack(VDMX_GroupFmt, gHeader) for yPelHeight, (yMax, yMin) in sorted(group.items()): vTable = {"yPelHeight": yPelHeight, "yMax": yMax, "yMin": yMin} data += sstruct.pack(VDMX_vTableFmt, vTable) return data def toXML(self, writer, ttFont): writer.simpletag("version", value=self.version) writer.newline() writer.begintag("ratRanges") writer.newline() for ratio in self.ratRanges: groupIndex = ratio["groupIndex"] writer.simpletag( "ratRange", bCharSet=ratio["bCharSet"], xRatio=ratio["xRatio"], yStartRatio=ratio["yStartRatio"], yEndRatio=ratio["yEndRatio"], groupIndex=groupIndex, ) writer.newline() writer.endtag("ratRanges") writer.newline() writer.begintag("groups") writer.newline() for groupIndex in range(self.numRecs): group = self.groups[groupIndex] recs = len(group) startsz = min(group.keys()) endsz = max(group.keys()) writer.begintag("group", index=groupIndex) writer.newline() writer.comment("recs=%d, startsz=%d, endsz=%d" % (recs, startsz, endsz)) writer.newline() for yPelHeight, (yMax, yMin) in sorted(group.items()): writer.simpletag( "record", [("yPelHeight", yPelHeight), ("yMax", yMax), ("yMin", yMin)], ) writer.newline() writer.endtag("group") writer.newline() writer.endtag("groups") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "version": self.version = safeEval(attrs["value"]) elif name == "ratRanges": if not hasattr(self, "ratRanges"): self.ratRanges = [] for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name == "ratRange": if not hasattr(self, "numRatios"): self.numRatios = 1 else: self.numRatios += 1 ratio = { "bCharSet": safeEval(attrs["bCharSet"]), "xRatio": safeEval(attrs["xRatio"]), "yStartRatio": safeEval(attrs["yStartRatio"]), "yEndRatio": safeEval(attrs["yEndRatio"]), "groupIndex": safeEval(attrs["groupIndex"]), } self.ratRanges.append(ratio) elif name == "groups": if not hasattr(self, "groups"): self.groups = [] for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name == "group": if not hasattr(self, "numRecs"): self.numRecs = 1 else: self.numRecs += 1 group = {} for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name == "record": yPelHeight = safeEval(attrs["yPelHeight"]) yMax = safeEval(attrs["yMax"]) yMin = safeEval(attrs["yMin"]) group[yPelHeight] = (yMax, yMin) self.groups.append(group) PKaZZZ�`R���"fontTools/ttLib/tables/V_O_R_G_.pyfrom fontTools.misc.textTools import bytesjoin, safeEval from . import DefaultTable import struct class table_V_O_R_G_(DefaultTable.DefaultTable): """This table is structured so that you can treat it like a dictionary keyed by glyph name. ``ttFont['VORG'][<glyphName>]`` will return the vertical origin for any glyph. ``ttFont['VORG'][<glyphName>] = <value>`` will set the vertical origin for any glyph. """ def decompile(self, data, ttFont): self.getGlyphName = ( ttFont.getGlyphName ) # for use in get/set item functions, for access by GID ( self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics, ) = struct.unpack(">HHhH", data[:8]) assert ( self.majorVersion <= 1 ), "Major version of VORG table is higher than I know how to handle" data = data[8:] vids = [] gids = [] pos = 0 for i in range(self.numVertOriginYMetrics): gid, vOrigin = struct.unpack(">Hh", data[pos : pos + 4]) pos += 4 gids.append(gid) vids.append(vOrigin) self.VOriginRecords = vOrig = {} glyphOrder = ttFont.getGlyphOrder() try: names = [glyphOrder[gid] for gid in gids] except IndexError: getGlyphName = self.getGlyphName names = map(getGlyphName, gids) for name, vid in zip(names, vids): vOrig[name] = vid def compile(self, ttFont): vorgs = list(self.VOriginRecords.values()) names = list(self.VOriginRecords.keys()) nameMap = ttFont.getReverseGlyphMap() try: gids = [nameMap[name] for name in names] except KeyError: nameMap = ttFont.getReverseGlyphMap(rebuild=True) gids = [nameMap[name] for name in names] vOriginTable = list(zip(gids, vorgs)) self.numVertOriginYMetrics = len(vorgs) vOriginTable.sort() # must be in ascending GID order dataList = [struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable] header = struct.pack( ">HHhH", self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics, ) dataList.insert(0, header) data = bytesjoin(dataList) return data def toXML(self, writer, ttFont): writer.simpletag("majorVersion", value=self.majorVersion) writer.newline() writer.simpletag("minorVersion", value=self.minorVersion) writer.newline() writer.simpletag("defaultVertOriginY", value=self.defaultVertOriginY) writer.newline() writer.simpletag("numVertOriginYMetrics", value=self.numVertOriginYMetrics) writer.newline() vOriginTable = [] glyphNames = self.VOriginRecords.keys() for glyphName in glyphNames: try: gid = ttFont.getGlyphID(glyphName) except: assert 0, ( "VORG table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName) ) vOriginTable.append([gid, glyphName, self.VOriginRecords[glyphName]]) vOriginTable.sort() for entry in vOriginTable: vOriginRec = VOriginRecord(entry[1], entry[2]) vOriginRec.toXML(writer, ttFont) def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "VOriginRecords"): self.VOriginRecords = {} self.getGlyphName = ( ttFont.getGlyphName ) # for use in get/set item functions, for access by GID if name == "VOriginRecord": vOriginRec = VOriginRecord() for element in content: if isinstance(element, str): continue name, attrs, content = element vOriginRec.fromXML(name, attrs, content, ttFont) self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin elif "value" in attrs: setattr(self, name, safeEval(attrs["value"])) def __getitem__(self, glyphSelector): if isinstance(glyphSelector, int): # its a gid, convert to glyph name glyphSelector = self.getGlyphName(glyphSelector) if glyphSelector not in self.VOriginRecords: return self.defaultVertOriginY return self.VOriginRecords[glyphSelector] def __setitem__(self, glyphSelector, value): if isinstance(glyphSelector, int): # its a gid, convert to glyph name glyphSelector = self.getGlyphName(glyphSelector) if value != self.defaultVertOriginY: self.VOriginRecords[glyphSelector] = value elif glyphSelector in self.VOriginRecords: del self.VOriginRecords[glyphSelector] def __delitem__(self, glyphSelector): del self.VOriginRecords[glyphSelector] class VOriginRecord(object): def __init__(self, name=None, vOrigin=None): self.glyphName = name self.vOrigin = vOrigin def toXML(self, writer, ttFont): writer.begintag("VOriginRecord") writer.newline() writer.simpletag("glyphName", value=self.glyphName) writer.newline() writer.simpletag("vOrigin", value=self.vOrigin) writer.newline() writer.endtag("VOriginRecord") writer.newline() def fromXML(self, name, attrs, content, ttFont): value = attrs["value"] if name == "glyphName": setattr(self, name, value) else: setattr(self, name, safeEval(value)) PKaZZZ���!XX"fontTools/ttLib/tables/V_V_A_R_.pyfrom .otBase import BaseTTXConverter class table_V_V_A_R_(BaseTTXConverter): pass PKaZZZ�b�0) ) "fontTools/ttLib/tables/__init__.py# DON'T EDIT! This file is generated by MetaTools/buildTableList.py. def _moduleFinderHint(): """Dummy function to let modulefinder know what tables may be dynamically imported. Generated by MetaTools/buildTableList.py. >>> _moduleFinderHint() """ from . import B_A_S_E_ from . import C_B_D_T_ from . import C_B_L_C_ from . import C_F_F_ from . import C_F_F__2 from . import C_O_L_R_ from . import C_P_A_L_ from . import D_S_I_G_ from . import D__e_b_g from . import E_B_D_T_ from . import E_B_L_C_ from . import F_F_T_M_ from . import F__e_a_t from . import G_D_E_F_ from . import G_M_A_P_ from . import G_P_K_G_ from . import G_P_O_S_ from . import G_S_U_B_ from . import G__l_a_t from . import G__l_o_c from . import H_V_A_R_ from . import J_S_T_F_ from . import L_T_S_H_ from . import M_A_T_H_ from . import M_E_T_A_ from . import M_V_A_R_ from . import O_S_2f_2 from . import S_I_N_G_ from . import S_T_A_T_ from . import S_V_G_ from . import S__i_l_f from . import S__i_l_l from . import T_S_I_B_ from . import T_S_I_C_ from . import T_S_I_D_ from . import T_S_I_J_ from . import T_S_I_P_ from . import T_S_I_S_ from . import T_S_I_V_ from . import T_S_I__0 from . import T_S_I__1 from . import T_S_I__2 from . import T_S_I__3 from . import T_S_I__5 from . import T_T_F_A_ from . import V_D_M_X_ from . import V_O_R_G_ from . import V_V_A_R_ from . import _a_n_k_r from . import _a_v_a_r from . import _b_s_l_n from . import _c_i_d_g from . import _c_m_a_p from . import _c_v_a_r from . import _c_v_t from . import _f_e_a_t from . import _f_p_g_m from . import _f_v_a_r from . import _g_a_s_p from . import _g_c_i_d from . import _g_l_y_f from . import _g_v_a_r from . import _h_d_m_x from . import _h_e_a_d from . import _h_h_e_a from . import _h_m_t_x from . import _k_e_r_n from . import _l_c_a_r from . import _l_o_c_a from . import _l_t_a_g from . import _m_a_x_p from . import _m_e_t_a from . import _m_o_r_t from . import _m_o_r_x from . import _n_a_m_e from . import _o_p_b_d from . import _p_o_s_t from . import _p_r_e_p from . import _p_r_o_p from . import _s_b_i_x from . import _t_r_a_k from . import _v_h_e_a from . import _v_m_t_x if __name__ == "__main__": import doctest, sys sys.exit(doctest.testmod().failed) PKaZZZ��D~��"fontTools/ttLib/tables/_a_n_k_r.pyfrom .otBase import BaseTTXConverter class table__a_n_k_r(BaseTTXConverter): """ The anchor point table provides a way to define anchor points. These are points within the coordinate space of a given glyph, independent of the control points used to render the glyph. Anchor points are used in conjunction with the 'kerx' table. See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ankr.html """ pass PKaZZZ��Q"fontTools/ttLib/tables/_a_v_a_r.pyfrom fontTools.misc import sstruct from fontTools.misc.fixedTools import ( fixedToFloat as fi2fl, floatToFixed as fl2fi, floatToFixedToStr as fl2str, strToFixedToFloat as str2fl, ) from fontTools.misc.textTools import bytesjoin, safeEval from fontTools.ttLib import TTLibError from . import DefaultTable from . import otTables import struct import logging log = logging.getLogger(__name__) from .otBase import BaseTTXConverter class table__a_v_a_r(BaseTTXConverter): """Axis Variations Table This class represents the ``avar`` table of a variable font. The object has one substantive attribute, ``segments``, which maps axis tags to a segments dictionary:: >>> font["avar"].segments # doctest: +SKIP {'wght': {-1.0: -1.0, 0.0: 0.0, 0.125: 0.11444091796875, 0.25: 0.23492431640625, 0.5: 0.35540771484375, 0.625: 0.5, 0.75: 0.6566162109375, 0.875: 0.81927490234375, 1.0: 1.0}, 'ital': {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}} Notice that the segments dictionary is made up of normalized values. A valid ``avar`` segment mapping must contain the entries ``-1.0: -1.0, 0.0: 0.0, 1.0: 1.0``. fontTools does not enforce this, so it is your responsibility to ensure that mappings are valid. """ dependencies = ["fvar"] def __init__(self, tag=None): super().__init__(tag) self.segments = {} def compile(self, ttFont): axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] if not hasattr(self, "table"): self.table = otTables.avar() if not hasattr(self.table, "Reserved"): self.table.Reserved = 0 self.table.Version = (getattr(self, "majorVersion", 1) << 16) | getattr( self, "minorVersion", 0 ) self.table.AxisCount = len(axisTags) self.table.AxisSegmentMap = [] for axis in axisTags: mappings = self.segments[axis] segmentMap = otTables.AxisSegmentMap() segmentMap.PositionMapCount = len(mappings) segmentMap.AxisValueMap = [] for key, value in sorted(mappings.items()): valueMap = otTables.AxisValueMap() valueMap.FromCoordinate = key valueMap.ToCoordinate = value segmentMap.AxisValueMap.append(valueMap) self.table.AxisSegmentMap.append(segmentMap) return super().compile(ttFont) def decompile(self, data, ttFont): super().decompile(data, ttFont) assert self.table.Version >= 0x00010000 self.majorVersion = self.table.Version >> 16 self.minorVersion = self.table.Version & 0xFFFF axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] for axis in axisTags: self.segments[axis] = {} for axis, segmentMap in zip(axisTags, self.table.AxisSegmentMap): segments = self.segments[axis] = {} for segment in segmentMap.AxisValueMap: segments[segment.FromCoordinate] = segment.ToCoordinate def toXML(self, writer, ttFont): writer.simpletag( "version", major=getattr(self, "majorVersion", 1), minor=getattr(self, "minorVersion", 0), ) writer.newline() axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] for axis in axisTags: writer.begintag("segment", axis=axis) writer.newline() for key, value in sorted(self.segments[axis].items()): key = fl2str(key, 14) value = fl2str(value, 14) writer.simpletag("mapping", **{"from": key, "to": value}) writer.newline() writer.endtag("segment") writer.newline() if getattr(self, "majorVersion", 1) >= 2: if self.table.VarIdxMap: self.table.VarIdxMap.toXML(writer, ttFont, name="VarIdxMap") if self.table.VarStore: self.table.VarStore.toXML(writer, ttFont) def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "table"): self.table = otTables.avar() if not hasattr(self.table, "Reserved"): self.table.Reserved = 0 if name == "version": self.majorVersion = safeEval(attrs["major"]) self.minorVersion = safeEval(attrs["minor"]) self.table.Version = (getattr(self, "majorVersion", 1) << 16) | getattr( self, "minorVersion", 0 ) elif name == "segment": axis = attrs["axis"] segment = self.segments[axis] = {} for element in content: if isinstance(element, tuple): elementName, elementAttrs, _ = element if elementName == "mapping": fromValue = str2fl(elementAttrs["from"], 14) toValue = str2fl(elementAttrs["to"], 14) if fromValue in segment: log.warning( "duplicate entry for %s in axis '%s'", fromValue, axis ) segment[fromValue] = toValue else: super().fromXML(name, attrs, content, ttFont) PKaZZZ.xb���"fontTools/ttLib/tables/_b_s_l_n.pyfrom .otBase import BaseTTXConverter # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bsln.html class table__b_s_l_n(BaseTTXConverter): pass PKaZZZ�&"fontTools/ttLib/tables/_c_i_d_g.py# coding: utf-8 from .otBase import BaseTTXConverter class table__c_i_d_g(BaseTTXConverter): """The AAT ``cidg`` table has almost the same structure as ``gidc``, just mapping CIDs to GlyphIDs instead of the reverse direction. It is useful for fonts that may be used by a PDF renderer in lieu of a font reference with a known glyph collection but no subsetted glyphs. For instance, a PDF can say “please use a font conforming to Adobe-Japan-1”; the ``cidg`` mapping is necessary if the font is, say, a TrueType font. ``gidc`` is lossy for this purpose and is obsoleted by ``cidg``. For example, the first font in ``/System/Library/Fonts/PingFang.ttc`` (which Apple ships pre-installed on MacOS 10.12.6) has a ``cidg`` table.""" pass PKaZZZ؏�����"fontTools/ttLib/tables/_c_m_a_p.pyfrom fontTools.misc.textTools import bytesjoin, safeEval, readHex from fontTools.misc.encodingTools import getEncoding from fontTools.ttLib import getSearchRange from fontTools.unicode import Unicode from . import DefaultTable import sys import struct import array import logging log = logging.getLogger(__name__) def _make_map(font, chars, gids): assert len(chars) == len(gids) glyphNames = font.getGlyphNameMany(gids) cmap = {} for char, gid, name in zip(chars, gids, glyphNames): if gid == 0: continue cmap[char] = name return cmap class table__c_m_a_p(DefaultTable.DefaultTable): """Character to Glyph Index Mapping Table This class represents the `cmap <https://docs.microsoft.com/en-us/typography/opentype/spec/cmap>`_ table, which maps between input characters (in Unicode or other system encodings) and glyphs within the font. The ``cmap`` table contains one or more subtables which determine the mapping of of characters to glyphs across different platforms and encoding systems. ``table__c_m_a_p`` objects expose an accessor ``.tables`` which provides access to the subtables, although it is normally easier to retrieve individual subtables through the utility methods described below. To add new subtables to a font, first determine the subtable format (if in doubt use format 4 for glyphs within the BMP, format 12 for glyphs outside the BMP, and format 14 for Unicode Variation Sequences) construct subtable objects with ``CmapSubtable.newSubtable(format)``, and append them to the ``.tables`` list. Within a subtable, the mapping of characters to glyphs is provided by the ``.cmap`` attribute. Example:: cmap4_0_3 = CmapSubtable.newSubtable(4) cmap4_0_3.platformID = 0 cmap4_0_3.platEncID = 3 cmap4_0_3.language = 0 cmap4_0_3.cmap = { 0xC1: "Aacute" } cmap = newTable("cmap") cmap.tableVersion = 0 cmap.tables = [cmap4_0_3] """ def getcmap(self, platformID, platEncID): """Returns the first subtable which matches the given platform and encoding. Args: platformID (int): The platform ID. Use 0 for Unicode, 1 for Macintosh (deprecated for new fonts), 2 for ISO (deprecated) and 3 for Windows. encodingID (int): Encoding ID. Interpretation depends on the platform ID. See the OpenType specification for details. Returns: An object which is a subclass of :py:class:`CmapSubtable` if a matching subtable is found within the font, or ``None`` otherwise. """ for subtable in self.tables: if subtable.platformID == platformID and subtable.platEncID == platEncID: return subtable return None # not found def getBestCmap( self, cmapPreferences=( (3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0), ), ): """Returns the 'best' Unicode cmap dictionary available in the font or ``None``, if no Unicode cmap subtable is available. By default it will search for the following (platformID, platEncID) pairs in order:: (3, 10), # Windows Unicode full repertoire (0, 6), # Unicode full repertoire (format 13 subtable) (0, 4), # Unicode 2.0 full repertoire (3, 1), # Windows Unicode BMP (0, 3), # Unicode 2.0 BMP (0, 2), # Unicode ISO/IEC 10646 (0, 1), # Unicode 1.1 (0, 0) # Unicode 1.0 This particular order matches what HarfBuzz uses to choose what subtable to use by default. This order prefers the largest-repertoire subtable, and among those, prefers the Windows-platform over the Unicode-platform as the former has wider support. This order can be customized via the ``cmapPreferences`` argument. """ for platformID, platEncID in cmapPreferences: cmapSubtable = self.getcmap(platformID, platEncID) if cmapSubtable is not None: return cmapSubtable.cmap return None # None of the requested cmap subtables were found def buildReversed(self): """Builds a reverse mapping dictionary Iterates over all Unicode cmap tables and returns a dictionary mapping glyphs to sets of codepoints, such as:: { 'one': {0x31} 'A': {0x41,0x391} } The values are sets of Unicode codepoints because some fonts map different codepoints to the same glyph. For example, ``U+0041 LATIN CAPITAL LETTER A`` and ``U+0391 GREEK CAPITAL LETTER ALPHA`` are sometimes the same glyph. """ result = {} for subtable in self.tables: if subtable.isUnicode(): for codepoint, name in subtable.cmap.items(): result.setdefault(name, set()).add(codepoint) return result def decompile(self, data, ttFont): tableVersion, numSubTables = struct.unpack(">HH", data[:4]) self.tableVersion = int(tableVersion) self.tables = tables = [] seenOffsets = {} for i in range(numSubTables): platformID, platEncID, offset = struct.unpack( ">HHl", data[4 + i * 8 : 4 + (i + 1) * 8] ) platformID, platEncID = int(platformID), int(platEncID) format, length = struct.unpack(">HH", data[offset : offset + 4]) if format in [8, 10, 12, 13]: format, reserved, length = struct.unpack( ">HHL", data[offset : offset + 8] ) elif format in [14]: format, length = struct.unpack(">HL", data[offset : offset + 6]) if not length: log.error( "cmap subtable is reported as having zero length: platformID %s, " "platEncID %s, format %s offset %s. Skipping table.", platformID, platEncID, format, offset, ) continue table = CmapSubtable.newSubtable(format) table.platformID = platformID table.platEncID = platEncID # Note that by default we decompile only the subtable header info; # any other data gets decompiled only when an attribute of the # subtable is referenced. table.decompileHeader(data[offset : offset + int(length)], ttFont) if offset in seenOffsets: table.data = None # Mark as decompiled table.cmap = tables[seenOffsets[offset]].cmap else: seenOffsets[offset] = i tables.append(table) if ttFont.lazy is False: # Be lazy for None and True self.ensureDecompiled() def ensureDecompiled(self, recurse=False): # The recurse argument is unused, but part of the signature of # ensureDecompiled across the library. for st in self.tables: st.ensureDecompiled() def compile(self, ttFont): self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__() numSubTables = len(self.tables) totalOffset = 4 + 8 * numSubTables data = struct.pack(">HH", self.tableVersion, numSubTables) tableData = b"" seen = ( {} ) # Some tables are the same object reference. Don't compile them twice. done = ( {} ) # Some tables are different objects, but compile to the same data chunk for table in self.tables: offset = seen.get(id(table.cmap)) if offset is None: chunk = table.compile(ttFont) offset = done.get(chunk) if offset is None: offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len( tableData ) tableData = tableData + chunk data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset) return data + tableData def toXML(self, writer, ttFont): writer.simpletag("tableVersion", version=self.tableVersion) writer.newline() for table in self.tables: table.toXML(writer, ttFont) def fromXML(self, name, attrs, content, ttFont): if name == "tableVersion": self.tableVersion = safeEval(attrs["version"]) return if name[:12] != "cmap_format_": return if not hasattr(self, "tables"): self.tables = [] format = safeEval(name[12:]) table = CmapSubtable.newSubtable(format) table.platformID = safeEval(attrs["platformID"]) table.platEncID = safeEval(attrs["platEncID"]) table.fromXML(name, attrs, content, ttFont) self.tables.append(table) class CmapSubtable(object): """Base class for all cmap subtable formats. Subclasses which handle the individual subtable formats are named ``cmap_format_0``, ``cmap_format_2`` etc. Use :py:meth:`getSubtableClass` to retrieve the concrete subclass, or :py:meth:`newSubtable` to get a new subtable object for a given format. The object exposes a ``.cmap`` attribute, which contains a dictionary mapping character codepoints to glyph names. """ @staticmethod def getSubtableClass(format): """Return the subtable class for a format.""" return cmap_classes.get(format, cmap_format_unknown) @staticmethod def newSubtable(format): """Return a new instance of a subtable for the given format .""" subtableClass = CmapSubtable.getSubtableClass(format) return subtableClass(format) def __init__(self, format): self.format = format self.data = None self.ttFont = None self.platformID = None #: The platform ID of this subtable self.platEncID = None #: The encoding ID of this subtable (interpretation depends on ``platformID``) self.language = ( None #: The language ID of this subtable (Macintosh platform only) ) def ensureDecompiled(self, recurse=False): # The recurse argument is unused, but part of the signature of # ensureDecompiled across the library. if self.data is None: return self.decompile(None, None) # use saved data. self.data = None # Once this table has been decompiled, make sure we don't # just return the original data. Also avoids recursion when # called with an attribute that the cmap subtable doesn't have. def __getattr__(self, attr): # allow lazy decompilation of subtables. if attr[:2] == "__": # don't handle requests for member functions like '__lt__' raise AttributeError(attr) if self.data is None: raise AttributeError(attr) self.ensureDecompiled() return getattr(self, attr) def decompileHeader(self, data, ttFont): format, length, language = struct.unpack(">HHH", data[:6]) assert ( len(data) == length ), "corrupt cmap table format %d (data length: %d, header length: %d)" % ( format, len(data), length, ) self.format = int(format) self.length = int(length) self.language = int(language) self.data = data[6:] self.ttFont = ttFont def toXML(self, writer, ttFont): writer.begintag( self.__class__.__name__, [ ("platformID", self.platformID), ("platEncID", self.platEncID), ("language", self.language), ], ) writer.newline() codes = sorted(self.cmap.items()) self._writeCodes(codes, writer) writer.endtag(self.__class__.__name__) writer.newline() def getEncoding(self, default=None): """Returns the Python encoding name for this cmap subtable based on its platformID, platEncID, and language. If encoding for these values is not known, by default ``None`` is returned. That can be overridden by passing a value to the ``default`` argument. Note that if you want to choose a "preferred" cmap subtable, most of the time ``self.isUnicode()`` is what you want as that one only returns true for the modern, commonly used, Unicode-compatible triplets, not the legacy ones. """ return getEncoding(self.platformID, self.platEncID, self.language, default) def isUnicode(self): """Returns true if the characters are interpreted as Unicode codepoints.""" return self.platformID == 0 or ( self.platformID == 3 and self.platEncID in [0, 1, 10] ) def isSymbol(self): """Returns true if the subtable is for the Symbol encoding (3,0)""" return self.platformID == 3 and self.platEncID == 0 def _writeCodes(self, codes, writer): isUnicode = self.isUnicode() for code, name in codes: writer.simpletag("map", code=hex(code), name=name) if isUnicode: writer.comment(Unicode[code]) writer.newline() def __lt__(self, other): if not isinstance(other, CmapSubtable): return NotImplemented # implemented so that list.sort() sorts according to the spec. selfTuple = ( getattr(self, "platformID", None), getattr(self, "platEncID", None), getattr(self, "language", None), self.__dict__, ) otherTuple = ( getattr(other, "platformID", None), getattr(other, "platEncID", None), getattr(other, "language", None), other.__dict__, ) return selfTuple < otherTuple class cmap_format_0(CmapSubtable): def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. if data is not None and ttFont is not None: self.decompileHeader(data, ttFont) else: assert ( data is None and ttFont is None ), "Need both data and ttFont arguments" data = ( self.data ) # decompileHeader assigns the data after the header to self.data assert 262 == self.length, "Format 0 cmap subtable not 262 bytes" gids = array.array("B") gids.frombytes(self.data) charCodes = list(range(len(gids))) self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: return struct.pack(">HHH", 0, 262, self.language) + self.data cmap = self.cmap assert set(cmap.keys()).issubset(range(256)) getGlyphID = ttFont.getGlyphID valueList = [getGlyphID(cmap[i]) if i in cmap else 0 for i in range(256)] gids = array.array("B", valueList) data = struct.pack(">HHH", 0, 262, self.language) + gids.tobytes() assert len(data) == 262 return data def fromXML(self, name, attrs, content, ttFont): self.language = safeEval(attrs["language"]) if not hasattr(self, "cmap"): self.cmap = {} cmap = self.cmap for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name != "map": continue cmap[safeEval(attrs["code"])] = attrs["name"] subHeaderFormat = ">HHhH" class SubHeader(object): def __init__(self): self.firstCode = None self.entryCount = None self.idDelta = None self.idRangeOffset = None self.glyphIndexArray = [] class cmap_format_2(CmapSubtable): def setIDDelta(self, subHeader): subHeader.idDelta = 0 # find the minGI which is not zero. minGI = subHeader.glyphIndexArray[0] for gid in subHeader.glyphIndexArray: if (gid != 0) and (gid < minGI): minGI = gid # The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1. # idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K. # We would like to pick an idDelta such that the first glyphArray GID is 1, # so that we are more likely to be able to combine glypharray GID subranges. # This means that we have a problem when minGI is > 32K # Since the final gi is reconstructed from the glyphArray GID by: # (short)finalGID = (gid + idDelta) % 0x10000), # we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the # negative number to an unsigned short. if minGI > 1: if minGI > 0x7FFF: subHeader.idDelta = -(0x10000 - minGI) - 1 else: subHeader.idDelta = minGI - 1 idDelta = subHeader.idDelta for i in range(subHeader.entryCount): gid = subHeader.glyphIndexArray[i] if gid > 0: subHeader.glyphIndexArray[i] = gid - idDelta def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. if data is not None and ttFont is not None: self.decompileHeader(data, ttFont) else: assert ( data is None and ttFont is None ), "Need both data and ttFont arguments" data = ( self.data ) # decompileHeader assigns the data after the header to self.data subHeaderKeys = [] maxSubHeaderindex = 0 # get the key array, and determine the number of subHeaders. allKeys = array.array("H") allKeys.frombytes(data[:512]) data = data[512:] if sys.byteorder != "big": allKeys.byteswap() subHeaderKeys = [key // 8 for key in allKeys] maxSubHeaderindex = max(subHeaderKeys) # Load subHeaders subHeaderList = [] pos = 0 for i in range(maxSubHeaderindex + 1): subHeader = SubHeader() ( subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, subHeader.idRangeOffset, ) = struct.unpack(subHeaderFormat, data[pos : pos + 8]) pos += 8 giDataPos = pos + subHeader.idRangeOffset - 2 giList = array.array("H") giList.frombytes(data[giDataPos : giDataPos + subHeader.entryCount * 2]) if sys.byteorder != "big": giList.byteswap() subHeader.glyphIndexArray = giList subHeaderList.append(subHeader) # How this gets processed. # Charcodes may be one or two bytes. # The first byte of a charcode is mapped through the subHeaderKeys, to select # a subHeader. For any subheader but 0, the next byte is then mapped through the # selected subheader. If subheader Index 0 is selected, then the byte itself is # mapped through the subheader, and there is no second byte. # Then assume that the subsequent byte is the first byte of the next charcode,and repeat. # # Each subheader references a range in the glyphIndexArray whose length is entryCount. # The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray # referenced by another subheader. # The only subheader that will be referenced by more than one first-byte value is the subheader # that maps the entire range of glyphID values to glyphIndex 0, e.g notdef: # {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx} # A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex. # A subheader specifies a subrange within (0...256) by the # firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero # (e.g. glyph not in font). # If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar). # The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by # counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the # glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex. # Example for Logocut-Medium # first byte of charcode = 129; selects subheader 1. # subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252} # second byte of charCode = 66 # the index offset = 66-64 = 2. # The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is: # [glyphIndexArray index], [subrange array index] = glyphIndex # [256], [0]=1 from charcode [129, 64] # [257], [1]=2 from charcode [129, 65] # [258], [2]=3 from charcode [129, 66] # [259], [3]=4 from charcode [129, 67] # So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero, # add it to the glyphID to get the final glyphIndex # value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew! self.data = b"" cmap = {} notdefGI = 0 for firstByte in range(256): subHeadindex = subHeaderKeys[firstByte] subHeader = subHeaderList[subHeadindex] if subHeadindex == 0: if (firstByte < subHeader.firstCode) or ( firstByte >= subHeader.firstCode + subHeader.entryCount ): continue # gi is notdef. else: charCode = firstByte offsetIndex = firstByte - subHeader.firstCode gi = subHeader.glyphIndexArray[offsetIndex] if gi != 0: gi = (gi + subHeader.idDelta) % 0x10000 else: continue # gi is notdef. cmap[charCode] = gi else: if subHeader.entryCount: charCodeOffset = firstByte * 256 + subHeader.firstCode for offsetIndex in range(subHeader.entryCount): charCode = charCodeOffset + offsetIndex gi = subHeader.glyphIndexArray[offsetIndex] if gi != 0: gi = (gi + subHeader.idDelta) % 0x10000 else: continue cmap[charCode] = gi # If not subHeader.entryCount, then all char codes with this first byte are # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the # same as mapping it to .notdef. gids = list(cmap.values()) charCodes = list(cmap.keys()) self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: return ( struct.pack(">HHH", self.format, self.length, self.language) + self.data ) kEmptyTwoCharCodeRange = -1 notdefGI = 0 items = sorted(self.cmap.items()) charCodes = [item[0] for item in items] names = [item[1] for item in items] nameMap = ttFont.getReverseGlyphMap() try: gids = [nameMap[name] for name in names] except KeyError: nameMap = ttFont.getReverseGlyphMap(rebuild=True) try: gids = [nameMap[name] for name in names] except KeyError: # allow virtual GIDs in format 2 tables gids = [] for name in names: try: gid = nameMap[name] except KeyError: try: if name[:3] == "gid": gid = int(name[3:]) else: gid = ttFont.getGlyphID(name) except: raise KeyError(name) gids.append(gid) # Process the (char code to gid) item list in char code order. # By definition, all one byte char codes map to subheader 0. # For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0, # which defines all char codes in its range to map to notdef) unless proven otherwise. # Note that since the char code items are processed in char code order, all the char codes with the # same first byte are in sequential order. subHeaderKeys = [ kEmptyTwoCharCodeRange for x in range(256) ] # list of indices into subHeaderList. subHeaderList = [] # We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up # with a cmap where all the one byte char codes map to notdef, # with the result that the subhead 0 would not get created just by processing the item list. charCode = charCodes[0] if charCode > 255: subHeader = SubHeader() subHeader.firstCode = 0 subHeader.entryCount = 0 subHeader.idDelta = 0 subHeader.idRangeOffset = 0 subHeaderList.append(subHeader) lastFirstByte = -1 items = zip(charCodes, gids) for charCode, gid in items: if gid == 0: continue firstbyte = charCode >> 8 secondByte = charCode & 0x00FF if ( firstbyte != lastFirstByte ): # Need to update the current subhead, and start a new one. if lastFirstByte > -1: # fix GI's and iDelta of current subheader. self.setIDDelta(subHeader) # If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero # for the indices matching the char codes. if lastFirstByte == 0: for index in range(subHeader.entryCount): charCode = subHeader.firstCode + index subHeaderKeys[charCode] = 0 assert subHeader.entryCount == len( subHeader.glyphIndexArray ), "Error - subhead entry count does not match len of glyphID subrange." # init new subheader subHeader = SubHeader() subHeader.firstCode = secondByte subHeader.entryCount = 1 subHeader.glyphIndexArray.append(gid) subHeaderList.append(subHeader) subHeaderKeys[firstbyte] = len(subHeaderList) - 1 lastFirstByte = firstbyte else: # need to fill in with notdefs all the code points between the last charCode and the current charCode. codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount) for i in range(codeDiff): subHeader.glyphIndexArray.append(notdefGI) subHeader.glyphIndexArray.append(gid) subHeader.entryCount = subHeader.entryCount + codeDiff + 1 # fix GI's and iDelta of last subheader that we we added to the subheader array. self.setIDDelta(subHeader) # Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges. subHeader = SubHeader() subHeader.firstCode = 0 subHeader.entryCount = 0 subHeader.idDelta = 0 subHeader.idRangeOffset = 2 subHeaderList.append(subHeader) emptySubheadIndex = len(subHeaderList) - 1 for index in range(256): if subHeaderKeys[index] == kEmptyTwoCharCodeRange: subHeaderKeys[index] = emptySubheadIndex # Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the # idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray, # since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with # charcode 0 and GID 0. idRangeOffset = ( len(subHeaderList) - 1 ) * 8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset. subheadRangeLen = ( len(subHeaderList) - 1 ) # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2. for index in range(subheadRangeLen): subHeader = subHeaderList[index] subHeader.idRangeOffset = 0 for j in range(index): prevSubhead = subHeaderList[j] if ( prevSubhead.glyphIndexArray == subHeader.glyphIndexArray ): # use the glyphIndexArray subarray subHeader.idRangeOffset = ( prevSubhead.idRangeOffset - (index - j) * 8 ) subHeader.glyphIndexArray = [] break if subHeader.idRangeOffset == 0: # didn't find one. subHeader.idRangeOffset = idRangeOffset idRangeOffset = ( idRangeOffset - 8 ) + subHeader.entryCount * 2 # one less subheader, one more subArray. else: idRangeOffset = idRangeOffset - 8 # one less subheader # Now we can write out the data! length = ( 6 + 512 + 8 * len(subHeaderList) ) # header, 256 subHeaderKeys, and subheader array. for subhead in subHeaderList[:-1]: length = ( length + len(subhead.glyphIndexArray) * 2 ) # We can't use subhead.entryCount, as some of the subhead may share subArrays. dataList = [struct.pack(">HHH", 2, length, self.language)] for index in subHeaderKeys: dataList.append(struct.pack(">H", index * 8)) for subhead in subHeaderList: dataList.append( struct.pack( subHeaderFormat, subhead.firstCode, subhead.entryCount, subhead.idDelta, subhead.idRangeOffset, ) ) for subhead in subHeaderList[:-1]: for gi in subhead.glyphIndexArray: dataList.append(struct.pack(">H", gi)) data = bytesjoin(dataList) assert len(data) == length, ( "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data)) + " calc : " + str(length) ) return data def fromXML(self, name, attrs, content, ttFont): self.language = safeEval(attrs["language"]) if not hasattr(self, "cmap"): self.cmap = {} cmap = self.cmap for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name != "map": continue cmap[safeEval(attrs["code"])] = attrs["name"] cmap_format_4_format = ">7H" # uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF. # uint16 reservedPad # This value should be zero # uint16 startCode[segCount] # Starting character code for each segment # uint16 idDelta[segCount] # Delta for all character codes in segment # uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0 # uint16 glyphIndexArray[variable] # Glyph index array def splitRange(startCode, endCode, cmap): # Try to split a range of character codes into subranges with consecutive # glyph IDs in such a way that the cmap4 subtable can be stored "most" # efficiently. I can't prove I've got the optimal solution, but it seems # to do well with the fonts I tested: none became bigger, many became smaller. if startCode == endCode: return [], [endCode] lastID = cmap[startCode] lastCode = startCode inOrder = None orderedBegin = None subRanges = [] # Gather subranges in which the glyph IDs are consecutive. for code in range(startCode + 1, endCode + 1): glyphID = cmap[code] if glyphID - 1 == lastID: if inOrder is None or not inOrder: inOrder = 1 orderedBegin = lastCode else: if inOrder: inOrder = 0 subRanges.append((orderedBegin, lastCode)) orderedBegin = None lastID = glyphID lastCode = code if inOrder: subRanges.append((orderedBegin, lastCode)) assert lastCode == endCode # Now filter out those new subranges that would only make the data bigger. # A new segment cost 8 bytes, not using a new segment costs 2 bytes per # character. newRanges = [] for b, e in subRanges: if b == startCode and e == endCode: break # the whole range, we're fine if b == startCode or e == endCode: threshold = 4 # split costs one more segment else: threshold = 8 # split costs two more segments if (e - b + 1) > threshold: newRanges.append((b, e)) subRanges = newRanges if not subRanges: return [], [endCode] if subRanges[0][0] != startCode: subRanges.insert(0, (startCode, subRanges[0][0] - 1)) if subRanges[-1][1] != endCode: subRanges.append((subRanges[-1][1] + 1, endCode)) # Fill the "holes" in the segments list -- those are the segments in which # the glyph IDs are _not_ consecutive. i = 1 while i < len(subRanges): if subRanges[i - 1][1] + 1 != subRanges[i][0]: subRanges.insert(i, (subRanges[i - 1][1] + 1, subRanges[i][0] - 1)) i = i + 1 i = i + 1 # Transform the ranges into startCode/endCode lists. start = [] end = [] for b, e in subRanges: start.append(b) end.append(e) start.pop(0) assert len(start) + 1 == len(end) return start, end class cmap_format_4(CmapSubtable): def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. if data is not None and ttFont is not None: self.decompileHeader(data, ttFont) else: assert ( data is None and ttFont is None ), "Need both data and ttFont arguments" data = ( self.data ) # decompileHeader assigns the data after the header to self.data (segCountX2, searchRange, entrySelector, rangeShift) = struct.unpack( ">4H", data[:8] ) data = data[8:] segCount = segCountX2 // 2 allCodes = array.array("H") allCodes.frombytes(data) self.data = data = None if sys.byteorder != "big": allCodes.byteswap() # divide the data endCode = allCodes[:segCount] allCodes = allCodes[segCount + 1 :] # the +1 is skipping the reservedPad field startCode = allCodes[:segCount] allCodes = allCodes[segCount:] idDelta = allCodes[:segCount] allCodes = allCodes[segCount:] idRangeOffset = allCodes[:segCount] glyphIndexArray = allCodes[segCount:] lenGIArray = len(glyphIndexArray) # build 2-byte character mapping charCodes = [] gids = [] for i in range(len(startCode) - 1): # don't do 0xffff! start = startCode[i] delta = idDelta[i] rangeOffset = idRangeOffset[i] partial = rangeOffset // 2 - start + i - len(idRangeOffset) rangeCharCodes = list(range(startCode[i], endCode[i] + 1)) charCodes.extend(rangeCharCodes) if rangeOffset == 0: gids.extend( [(charCode + delta) & 0xFFFF for charCode in rangeCharCodes] ) else: for charCode in rangeCharCodes: index = charCode + partial assert index < lenGIArray, ( "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" % (i, index, lenGIArray) ) if glyphIndexArray[index] != 0: # if not missing glyph glyphID = glyphIndexArray[index] + delta else: glyphID = 0 # missing glyph gids.append(glyphID & 0xFFFF) self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: return ( struct.pack(">HHH", self.format, self.length, self.language) + self.data ) charCodes = list(self.cmap.keys()) if not charCodes: startCode = [0xFFFF] endCode = [0xFFFF] else: charCodes.sort() names = [self.cmap[code] for code in charCodes] nameMap = ttFont.getReverseGlyphMap() try: gids = [nameMap[name] for name in names] except KeyError: nameMap = ttFont.getReverseGlyphMap(rebuild=True) try: gids = [nameMap[name] for name in names] except KeyError: # allow virtual GIDs in format 4 tables gids = [] for name in names: try: gid = nameMap[name] except KeyError: try: if name[:3] == "gid": gid = int(name[3:]) else: gid = ttFont.getGlyphID(name) except: raise KeyError(name) gids.append(gid) cmap = {} # code:glyphID mapping for code, gid in zip(charCodes, gids): cmap[code] = gid # Build startCode and endCode lists. # Split the char codes in ranges of consecutive char codes, then split # each range in more ranges of consecutive/not consecutive glyph IDs. # See splitRange(). lastCode = charCodes[0] endCode = [] startCode = [lastCode] for charCode in charCodes[ 1: ]: # skip the first code, it's the first start code if charCode == lastCode + 1: lastCode = charCode continue start, end = splitRange(startCode[-1], lastCode, cmap) startCode.extend(start) endCode.extend(end) startCode.append(charCode) lastCode = charCode start, end = splitRange(startCode[-1], lastCode, cmap) startCode.extend(start) endCode.extend(end) startCode.append(0xFFFF) endCode.append(0xFFFF) # build up rest of cruft idDelta = [] idRangeOffset = [] glyphIndexArray = [] for i in range(len(endCode) - 1): # skip the closing codes (0xffff) indices = [] for charCode in range(startCode[i], endCode[i] + 1): indices.append(cmap[charCode]) if indices == list(range(indices[0], indices[0] + len(indices))): idDelta.append((indices[0] - startCode[i]) % 0x10000) idRangeOffset.append(0) else: idDelta.append(0) idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i)) glyphIndexArray.extend(indices) idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef idRangeOffset.append(0) # Insane. segCount = len(endCode) segCountX2 = segCount * 2 searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2) charCodeArray = array.array("H", endCode + [0] + startCode) idDeltaArray = array.array("H", idDelta) restArray = array.array("H", idRangeOffset + glyphIndexArray) if sys.byteorder != "big": charCodeArray.byteswap() if sys.byteorder != "big": idDeltaArray.byteswap() if sys.byteorder != "big": restArray.byteswap() data = charCodeArray.tobytes() + idDeltaArray.tobytes() + restArray.tobytes() length = struct.calcsize(cmap_format_4_format) + len(data) header = struct.pack( cmap_format_4_format, self.format, length, self.language, segCountX2, searchRange, entrySelector, rangeShift, ) return header + data def fromXML(self, name, attrs, content, ttFont): self.language = safeEval(attrs["language"]) if not hasattr(self, "cmap"): self.cmap = {} cmap = self.cmap for element in content: if not isinstance(element, tuple): continue nameMap, attrsMap, dummyContent = element if nameMap != "map": assert 0, "Unrecognized keyword in cmap subtable" cmap[safeEval(attrsMap["code"])] = attrsMap["name"] class cmap_format_6(CmapSubtable): def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. if data is not None and ttFont is not None: self.decompileHeader(data, ttFont) else: assert ( data is None and ttFont is None ), "Need both data and ttFont arguments" data = ( self.data ) # decompileHeader assigns the data after the header to self.data firstCode, entryCount = struct.unpack(">HH", data[:4]) firstCode = int(firstCode) data = data[4:] # assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!! gids = array.array("H") gids.frombytes(data[: 2 * int(entryCount)]) if sys.byteorder != "big": gids.byteswap() self.data = data = None charCodes = list(range(firstCode, firstCode + len(gids))) self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: return ( struct.pack(">HHH", self.format, self.length, self.language) + self.data ) cmap = self.cmap codes = sorted(cmap.keys()) if codes: # yes, there are empty cmap tables. codes = list(range(codes[0], codes[-1] + 1)) firstCode = codes[0] valueList = [ ttFont.getGlyphID(cmap[code]) if code in cmap else 0 for code in codes ] gids = array.array("H", valueList) if sys.byteorder != "big": gids.byteswap() data = gids.tobytes() else: data = b"" firstCode = 0 header = struct.pack( ">HHHHH", 6, len(data) + 10, self.language, firstCode, len(codes) ) return header + data def fromXML(self, name, attrs, content, ttFont): self.language = safeEval(attrs["language"]) if not hasattr(self, "cmap"): self.cmap = {} cmap = self.cmap for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name != "map": continue cmap[safeEval(attrs["code"])] = attrs["name"] class cmap_format_12_or_13(CmapSubtable): def __init__(self, format): self.format = format self.reserved = 0 self.data = None self.ttFont = None def decompileHeader(self, data, ttFont): format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16]) assert ( len(data) == (16 + nGroups * 12) == (length) ), "corrupt cmap table format %d (data length: %d, header length: %d)" % ( self.format, len(data), length, ) self.format = format self.reserved = reserved self.length = length self.language = language self.nGroups = nGroups self.data = data[16:] self.ttFont = ttFont def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. if data is not None and ttFont is not None: self.decompileHeader(data, ttFont) else: assert ( data is None and ttFont is None ), "Need both data and ttFont arguments" data = ( self.data ) # decompileHeader assigns the data after the header to self.data charCodes = [] gids = [] pos = 0 for i in range(self.nGroups): startCharCode, endCharCode, glyphID = struct.unpack( ">LLL", data[pos : pos + 12] ) pos += 12 lenGroup = 1 + endCharCode - startCharCode charCodes.extend(list(range(startCharCode, endCharCode + 1))) gids.extend(self._computeGIDs(glyphID, lenGroup)) self.data = data = None self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: return ( struct.pack( ">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups, ) + self.data ) charCodes = list(self.cmap.keys()) names = list(self.cmap.values()) nameMap = ttFont.getReverseGlyphMap() try: gids = [nameMap[name] for name in names] except KeyError: nameMap = ttFont.getReverseGlyphMap(rebuild=True) try: gids = [nameMap[name] for name in names] except KeyError: # allow virtual GIDs in format 12 tables gids = [] for name in names: try: gid = nameMap[name] except KeyError: try: if name[:3] == "gid": gid = int(name[3:]) else: gid = ttFont.getGlyphID(name) except: raise KeyError(name) gids.append(gid) cmap = {} # code:glyphID mapping for code, gid in zip(charCodes, gids): cmap[code] = gid charCodes.sort() index = 0 startCharCode = charCodes[0] startGlyphID = cmap[startCharCode] lastGlyphID = startGlyphID - self._format_step lastCharCode = startCharCode - 1 nGroups = 0 dataList = [] maxIndex = len(charCodes) for index in range(maxIndex): charCode = charCodes[index] glyphID = cmap[charCode] if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode): dataList.append( struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID) ) startCharCode = charCode startGlyphID = glyphID nGroups = nGroups + 1 lastGlyphID = glyphID lastCharCode = charCode dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID)) nGroups = nGroups + 1 data = bytesjoin(dataList) lengthSubtable = len(data) + 16 assert len(data) == (nGroups * 12) == (lengthSubtable - 16) return ( struct.pack( ">HHLLL", self.format, self.reserved, lengthSubtable, self.language, nGroups, ) + data ) def toXML(self, writer, ttFont): writer.begintag( self.__class__.__name__, [ ("platformID", self.platformID), ("platEncID", self.platEncID), ("format", self.format), ("reserved", self.reserved), ("length", self.length), ("language", self.language), ("nGroups", self.nGroups), ], ) writer.newline() codes = sorted(self.cmap.items()) self._writeCodes(codes, writer) writer.endtag(self.__class__.__name__) writer.newline() def fromXML(self, name, attrs, content, ttFont): self.format = safeEval(attrs["format"]) self.reserved = safeEval(attrs["reserved"]) self.length = safeEval(attrs["length"]) self.language = safeEval(attrs["language"]) self.nGroups = safeEval(attrs["nGroups"]) if not hasattr(self, "cmap"): self.cmap = {} cmap = self.cmap for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name != "map": continue cmap[safeEval(attrs["code"])] = attrs["name"] class cmap_format_12(cmap_format_12_or_13): _format_step = 1 def __init__(self, format=12): cmap_format_12_or_13.__init__(self, format) def _computeGIDs(self, startingGlyph, numberOfGlyphs): return list(range(startingGlyph, startingGlyph + numberOfGlyphs)) def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode): return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode) class cmap_format_13(cmap_format_12_or_13): _format_step = 0 def __init__(self, format=13): cmap_format_12_or_13.__init__(self, format) def _computeGIDs(self, startingGlyph, numberOfGlyphs): return [startingGlyph] * numberOfGlyphs def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode): return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode) def cvtToUVS(threeByteString): data = b"\0" + threeByteString (val,) = struct.unpack(">L", data) return val def cvtFromUVS(val): assert 0 <= val < 0x1000000 fourByteString = struct.pack(">L", val) return fourByteString[1:] class cmap_format_14(CmapSubtable): def decompileHeader(self, data, ttFont): format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10]) self.data = data[10:] self.length = length self.numVarSelectorRecords = numVarSelectorRecords self.ttFont = ttFont self.language = 0xFF # has no language. def decompile(self, data, ttFont): if data is not None and ttFont is not None: self.decompileHeader(data, ttFont) else: assert ( data is None and ttFont is None ), "Need both data and ttFont arguments" data = self.data self.cmap = ( {} ) # so that clients that expect this to exist in a cmap table won't fail. uvsDict = {} recOffset = 0 for n in range(self.numVarSelectorRecords): uvs, defOVSOffset, nonDefUVSOffset = struct.unpack( ">3sLL", data[recOffset : recOffset + 11] ) recOffset += 11 varUVS = cvtToUVS(uvs) if defOVSOffset: startOffset = defOVSOffset - 10 (numValues,) = struct.unpack(">L", data[startOffset : startOffset + 4]) startOffset += 4 for r in range(numValues): uv, addtlCnt = struct.unpack( ">3sB", data[startOffset : startOffset + 4] ) startOffset += 4 firstBaseUV = cvtToUVS(uv) cnt = addtlCnt + 1 baseUVList = list(range(firstBaseUV, firstBaseUV + cnt)) glyphList = [None] * cnt localUVList = zip(baseUVList, glyphList) try: uvsDict[varUVS].extend(localUVList) except KeyError: uvsDict[varUVS] = list(localUVList) if nonDefUVSOffset: startOffset = nonDefUVSOffset - 10 (numRecs,) = struct.unpack(">L", data[startOffset : startOffset + 4]) startOffset += 4 localUVList = [] for r in range(numRecs): uv, gid = struct.unpack(">3sH", data[startOffset : startOffset + 5]) startOffset += 5 uv = cvtToUVS(uv) glyphName = self.ttFont.getGlyphName(gid) localUVList.append((uv, glyphName)) try: uvsDict[varUVS].extend(localUVList) except KeyError: uvsDict[varUVS] = localUVList self.uvsDict = uvsDict def toXML(self, writer, ttFont): writer.begintag( self.__class__.__name__, [ ("platformID", self.platformID), ("platEncID", self.platEncID), ], ) writer.newline() uvsDict = self.uvsDict uvsList = sorted(uvsDict.keys()) for uvs in uvsList: uvList = uvsDict[uvs] uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1])) for uv, gname in uvList: attrs = [("uv", hex(uv)), ("uvs", hex(uvs))] if gname is not None: attrs.append(("name", gname)) writer.simpletag("map", attrs) writer.newline() writer.endtag(self.__class__.__name__) writer.newline() def fromXML(self, name, attrs, content, ttFont): self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail if not hasattr(self, "cmap"): self.cmap = ( {} ) # so that clients that expect this to exist in a cmap table won't fail. if not hasattr(self, "uvsDict"): self.uvsDict = {} uvsDict = self.uvsDict # For backwards compatibility reasons we accept "None" as an indicator # for "default mapping", unless the font actually has a glyph named # "None". _hasGlyphNamedNone = None for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name != "map": continue uvs = safeEval(attrs["uvs"]) uv = safeEval(attrs["uv"]) gname = attrs.get("name") if gname == "None": if _hasGlyphNamedNone is None: _hasGlyphNamedNone = "None" in ttFont.getGlyphOrder() if not _hasGlyphNamedNone: gname = None try: uvsDict[uvs].append((uv, gname)) except KeyError: uvsDict[uvs] = [(uv, gname)] def compile(self, ttFont): if self.data: return ( struct.pack( ">HLL", self.format, self.length, self.numVarSelectorRecords ) + self.data ) uvsDict = self.uvsDict uvsList = sorted(uvsDict.keys()) self.numVarSelectorRecords = len(uvsList) offset = ( 10 + self.numVarSelectorRecords * 11 ) # current value is end of VarSelectorRecords block. data = [] varSelectorRecords = [] for uvs in uvsList: entryList = uvsDict[uvs] defList = [entry for entry in entryList if entry[1] is None] if defList: defList = [entry[0] for entry in defList] defOVSOffset = offset defList.sort() lastUV = defList[0] cnt = -1 defRecs = [] for defEntry in defList: cnt += 1 if (lastUV + cnt) != defEntry: rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt - 1) lastUV = defEntry defRecs.append(rec) cnt = 0 rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt) defRecs.append(rec) numDefRecs = len(defRecs) data.append(struct.pack(">L", numDefRecs)) data.extend(defRecs) offset += 4 + numDefRecs * 4 else: defOVSOffset = 0 ndefList = [entry for entry in entryList if entry[1] is not None] if ndefList: nonDefUVSOffset = offset ndefList.sort() numNonDefRecs = len(ndefList) data.append(struct.pack(">L", numNonDefRecs)) offset += 4 + numNonDefRecs * 5 for uv, gname in ndefList: gid = ttFont.getGlyphID(gname) ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid) data.append(ndrec) else: nonDefUVSOffset = 0 vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset) varSelectorRecords.append(vrec) data = bytesjoin(varSelectorRecords) + bytesjoin(data) self.length = 10 + len(data) headerdata = struct.pack( ">HLL", self.format, self.length, self.numVarSelectorRecords ) return headerdata + data class cmap_format_unknown(CmapSubtable): def toXML(self, writer, ttFont): cmapName = self.__class__.__name__[:12] + str(self.format) writer.begintag( cmapName, [ ("platformID", self.platformID), ("platEncID", self.platEncID), ], ) writer.newline() writer.dumphex(self.data) writer.endtag(cmapName) writer.newline() def fromXML(self, name, attrs, content, ttFont): self.data = readHex(content) self.cmap = {} def decompileHeader(self, data, ttFont): self.language = 0 # dummy value self.data = data def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. if data is not None and ttFont is not None: self.decompileHeader(data, ttFont) else: assert ( data is None and ttFont is None ), "Need both data and ttFont arguments" def compile(self, ttFont): if self.data: return self.data else: return None cmap_classes = { 0: cmap_format_0, 2: cmap_format_2, 4: cmap_format_4, 6: cmap_format_6, 12: cmap_format_12, 13: cmap_format_13, 14: cmap_format_14, } PKaZZZ�� � � "fontTools/ttLib/tables/_c_v_a_r.pyfrom . import DefaultTable from fontTools.misc import sstruct from fontTools.misc.textTools import bytesjoin from fontTools.ttLib.tables.TupleVariation import ( compileTupleVariationStore, decompileTupleVariationStore, TupleVariation, ) # https://www.microsoft.com/typography/otspec/cvar.htm # https://www.microsoft.com/typography/otspec/otvarcommonformats.htm # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cvar.html CVAR_HEADER_FORMAT = """ > # big endian majorVersion: H minorVersion: H tupleVariationCount: H offsetToData: H """ CVAR_HEADER_SIZE = sstruct.calcsize(CVAR_HEADER_FORMAT) class table__c_v_a_r(DefaultTable.DefaultTable): dependencies = ["cvt ", "fvar"] def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.majorVersion, self.minorVersion = 1, 0 self.variations = [] def compile(self, ttFont, useSharedPoints=False): tupleVariationCount, tuples, data = compileTupleVariationStore( variations=[v for v in self.variations if v.hasImpact()], pointCount=len(ttFont["cvt "].values), axisTags=[axis.axisTag for axis in ttFont["fvar"].axes], sharedTupleIndices={}, useSharedPoints=useSharedPoints, ) header = { "majorVersion": self.majorVersion, "minorVersion": self.minorVersion, "tupleVariationCount": tupleVariationCount, "offsetToData": CVAR_HEADER_SIZE + len(tuples), } return b"".join([sstruct.pack(CVAR_HEADER_FORMAT, header), tuples, data]) def decompile(self, data, ttFont): axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] header = {} sstruct.unpack(CVAR_HEADER_FORMAT, data[0:CVAR_HEADER_SIZE], header) self.majorVersion = header["majorVersion"] self.minorVersion = header["minorVersion"] assert self.majorVersion == 1, self.majorVersion self.variations = decompileTupleVariationStore( tableTag=self.tableTag, axisTags=axisTags, tupleVariationCount=header["tupleVariationCount"], pointCount=len(ttFont["cvt "].values), sharedTuples=None, data=data, pos=CVAR_HEADER_SIZE, dataPos=header["offsetToData"], ) def fromXML(self, name, attrs, content, ttFont): if name == "version": self.majorVersion = int(attrs.get("major", "1")) self.minorVersion = int(attrs.get("minor", "0")) elif name == "tuple": valueCount = len(ttFont["cvt "].values) var = TupleVariation({}, [None] * valueCount) self.variations.append(var) for tupleElement in content: if isinstance(tupleElement, tuple): tupleName, tupleAttrs, tupleContent = tupleElement var.fromXML(tupleName, tupleAttrs, tupleContent) def toXML(self, writer, ttFont): axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] writer.simpletag("version", major=self.majorVersion, minor=self.minorVersion) writer.newline() for var in self.variations: var.toXML(writer, axisTags) PKaZZZ����QQ fontTools/ttLib/tables/_c_v_t.pyfrom fontTools.misc.textTools import safeEval from . import DefaultTable import sys import array class table__c_v_t(DefaultTable.DefaultTable): def decompile(self, data, ttFont): values = array.array("h") values.frombytes(data) if sys.byteorder != "big": values.byteswap() self.values = values def compile(self, ttFont): values = self.values[:] if sys.byteorder != "big": values.byteswap() return values.tobytes() def toXML(self, writer, ttFont): for i in range(len(self.values)): value = self.values[i] writer.simpletag("cv", value=value, index=i) writer.newline() def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "values"): self.values = array.array("h") if name == "cv": index = safeEval(attrs["index"]) value = safeEval(attrs["value"]) for i in range(1 + index - len(self.values)): self.values.append(0) self.values[index] = value def __len__(self): return len(self.values) def __getitem__(self, index): return self.values[index] def __setitem__(self, index, value): self.values[index] = value def __delitem__(self, index): del self.values[index] PKaZZZ4� �00"fontTools/ttLib/tables/_f_e_a_t.pyfrom .otBase import BaseTTXConverter class table__f_e_a_t(BaseTTXConverter): """The feature name table is an AAT (Apple Advanced Typography) table for storing font features, settings, and their human-readable names. It should not be confused with the ``Feat`` table or the OpenType Layout ``GSUB``/``GPOS`` tables. See `Feature Name Table <https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6feat.html>`_ in the TrueType Reference Manual for more information on the structure and purpose of this table.""" pass PKaZZZc�k��"fontTools/ttLib/tables/_f_p_g_m.pyfrom . import DefaultTable from . import ttProgram class table__f_p_g_m(DefaultTable.DefaultTable): def decompile(self, data, ttFont): program = ttProgram.Program() program.fromBytecode(data) self.program = program def compile(self, ttFont): return self.program.getBytecode() def toXML(self, writer, ttFont): self.program.toXML(writer, ttFont) def fromXML(self, name, attrs, content, ttFont): program = ttProgram.Program() program.fromXML(name, attrs, content, ttFont) self.program = program def __bool__(self): """ >>> fpgm = table__f_p_g_m() >>> bool(fpgm) False >>> p = ttProgram.Program() >>> fpgm.program = p >>> bool(fpgm) False >>> bc = bytearray([0]) >>> p.fromBytecode(bc) >>> bool(fpgm) True >>> p.bytecode.pop() 0 >>> bool(fpgm) False """ return hasattr(self, "program") and bool(self.program) __nonzero__ = __bool__ if __name__ == "__main__": import sys import doctest sys.exit(doctest.testmod().failed) PKaZZZ��%�!!"fontTools/ttLib/tables/_f_v_a_r.pyfrom fontTools.misc import sstruct from fontTools.misc.fixedTools import ( fixedToFloat as fi2fl, floatToFixed as fl2fi, floatToFixedToStr as fl2str, strToFixedToFloat as str2fl, ) from fontTools.misc.textTools import Tag, bytesjoin, safeEval from fontTools.ttLib import TTLibError from . import DefaultTable import struct # Apple's documentation of 'fvar': # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6fvar.html FVAR_HEADER_FORMAT = """ > # big endian version: L offsetToData: H countSizePairs: H axisCount: H axisSize: H instanceCount: H instanceSize: H """ FVAR_AXIS_FORMAT = """ > # big endian axisTag: 4s minValue: 16.16F defaultValue: 16.16F maxValue: 16.16F flags: H axisNameID: H """ FVAR_INSTANCE_FORMAT = """ > # big endian subfamilyNameID: H flags: H """ class table__f_v_a_r(DefaultTable.DefaultTable): dependencies = ["name"] def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.axes = [] self.instances = [] def compile(self, ttFont): instanceSize = sstruct.calcsize(FVAR_INSTANCE_FORMAT) + (len(self.axes) * 4) includePostScriptNames = any( instance.postscriptNameID != 0xFFFF for instance in self.instances ) if includePostScriptNames: instanceSize += 2 header = { "version": 0x00010000, "offsetToData": sstruct.calcsize(FVAR_HEADER_FORMAT), "countSizePairs": 2, "axisCount": len(self.axes), "axisSize": sstruct.calcsize(FVAR_AXIS_FORMAT), "instanceCount": len(self.instances), "instanceSize": instanceSize, } result = [sstruct.pack(FVAR_HEADER_FORMAT, header)] result.extend([axis.compile() for axis in self.axes]) axisTags = [axis.axisTag for axis in self.axes] for instance in self.instances: result.append(instance.compile(axisTags, includePostScriptNames)) return bytesjoin(result) def decompile(self, data, ttFont): header = {} headerSize = sstruct.calcsize(FVAR_HEADER_FORMAT) header = sstruct.unpack(FVAR_HEADER_FORMAT, data[0:headerSize]) if header["version"] != 0x00010000: raise TTLibError("unsupported 'fvar' version %04x" % header["version"]) pos = header["offsetToData"] axisSize = header["axisSize"] for _ in range(header["axisCount"]): axis = Axis() axis.decompile(data[pos : pos + axisSize]) self.axes.append(axis) pos += axisSize instanceSize = header["instanceSize"] axisTags = [axis.axisTag for axis in self.axes] for _ in range(header["instanceCount"]): instance = NamedInstance() instance.decompile(data[pos : pos + instanceSize], axisTags) self.instances.append(instance) pos += instanceSize def toXML(self, writer, ttFont): for axis in self.axes: axis.toXML(writer, ttFont) for instance in self.instances: instance.toXML(writer, ttFont) def fromXML(self, name, attrs, content, ttFont): if name == "Axis": axis = Axis() axis.fromXML(name, attrs, content, ttFont) self.axes.append(axis) elif name == "NamedInstance": instance = NamedInstance() instance.fromXML(name, attrs, content, ttFont) self.instances.append(instance) class Axis(object): def __init__(self): self.axisTag = None self.axisNameID = 0 self.flags = 0 self.minValue = -1.0 self.defaultValue = 0.0 self.maxValue = 1.0 def compile(self): return sstruct.pack(FVAR_AXIS_FORMAT, self) def decompile(self, data): sstruct.unpack2(FVAR_AXIS_FORMAT, data, self) def toXML(self, writer, ttFont): name = ( ttFont["name"].getDebugName(self.axisNameID) if "name" in ttFont else None ) if name is not None: writer.newline() writer.comment(name) writer.newline() writer.begintag("Axis") writer.newline() for tag, value in [ ("AxisTag", self.axisTag), ("Flags", "0x%X" % self.flags), ("MinValue", fl2str(self.minValue, 16)), ("DefaultValue", fl2str(self.defaultValue, 16)), ("MaxValue", fl2str(self.maxValue, 16)), ("AxisNameID", str(self.axisNameID)), ]: writer.begintag(tag) writer.write(value) writer.endtag(tag) writer.newline() writer.endtag("Axis") writer.newline() def fromXML(self, name, _attrs, content, ttFont): assert name == "Axis" for tag, _, value in filter(lambda t: type(t) is tuple, content): value = "".join(value) if tag == "AxisTag": self.axisTag = Tag(value) elif tag in {"Flags", "MinValue", "DefaultValue", "MaxValue", "AxisNameID"}: setattr( self, tag[0].lower() + tag[1:], str2fl(value, 16) if tag.endswith("Value") else safeEval(value), ) class NamedInstance(object): def __init__(self): self.subfamilyNameID = 0 self.postscriptNameID = 0xFFFF self.flags = 0 self.coordinates = {} def compile(self, axisTags, includePostScriptName): result = [sstruct.pack(FVAR_INSTANCE_FORMAT, self)] for axis in axisTags: fixedCoord = fl2fi(self.coordinates[axis], 16) result.append(struct.pack(">l", fixedCoord)) if includePostScriptName: result.append(struct.pack(">H", self.postscriptNameID)) return bytesjoin(result) def decompile(self, data, axisTags): sstruct.unpack2(FVAR_INSTANCE_FORMAT, data, self) pos = sstruct.calcsize(FVAR_INSTANCE_FORMAT) for axis in axisTags: value = struct.unpack(">l", data[pos : pos + 4])[0] self.coordinates[axis] = fi2fl(value, 16) pos += 4 if pos + 2 <= len(data): self.postscriptNameID = struct.unpack(">H", data[pos : pos + 2])[0] else: self.postscriptNameID = 0xFFFF def toXML(self, writer, ttFont): name = ( ttFont["name"].getDebugName(self.subfamilyNameID) if "name" in ttFont else None ) if name is not None: writer.newline() writer.comment(name) writer.newline() psname = ( ttFont["name"].getDebugName(self.postscriptNameID) if "name" in ttFont else None ) if psname is not None: writer.comment("PostScript: " + psname) writer.newline() if self.postscriptNameID == 0xFFFF: writer.begintag( "NamedInstance", flags=("0x%X" % self.flags), subfamilyNameID=self.subfamilyNameID, ) else: writer.begintag( "NamedInstance", flags=("0x%X" % self.flags), subfamilyNameID=self.subfamilyNameID, postscriptNameID=self.postscriptNameID, ) writer.newline() for axis in ttFont["fvar"].axes: writer.simpletag( "coord", axis=axis.axisTag, value=fl2str(self.coordinates[axis.axisTag], 16), ) writer.newline() writer.endtag("NamedInstance") writer.newline() def fromXML(self, name, attrs, content, ttFont): assert name == "NamedInstance" self.subfamilyNameID = safeEval(attrs["subfamilyNameID"]) self.flags = safeEval(attrs.get("flags", "0")) if "postscriptNameID" in attrs: self.postscriptNameID = safeEval(attrs["postscriptNameID"]) else: self.postscriptNameID = 0xFFFF for tag, elementAttrs, _ in filter(lambda t: type(t) is tuple, content): if tag == "coord": value = str2fl(elementAttrs["value"], 16) self.coordinates[elementAttrs["axis"]] = value PKaZZZ�� ||"fontTools/ttLib/tables/_g_a_s_p.pyfrom fontTools.misc.textTools import safeEval from . import DefaultTable import struct GASP_SYMMETRIC_GRIDFIT = 0x0004 GASP_SYMMETRIC_SMOOTHING = 0x0008 GASP_DOGRAY = 0x0002 GASP_GRIDFIT = 0x0001 class table__g_a_s_p(DefaultTable.DefaultTable): def decompile(self, data, ttFont): self.version, numRanges = struct.unpack(">HH", data[:4]) assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version data = data[4:] self.gaspRange = {} for i in range(numRanges): rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4]) self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior) data = data[4:] assert not data, "too much data" def compile(self, ttFont): version = 0 # ignore self.version numRanges = len(self.gaspRange) data = b"" items = sorted(self.gaspRange.items()) for rangeMaxPPEM, rangeGaspBehavior in items: data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior) if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY): version = 1 data = struct.pack(">HH", version, numRanges) + data return data def toXML(self, writer, ttFont): items = sorted(self.gaspRange.items()) for rangeMaxPPEM, rangeGaspBehavior in items: writer.simpletag( "gaspRange", [ ("rangeMaxPPEM", rangeMaxPPEM), ("rangeGaspBehavior", rangeGaspBehavior), ], ) writer.newline() def fromXML(self, name, attrs, content, ttFont): if name != "gaspRange": return if not hasattr(self, "gaspRange"): self.gaspRange = {} self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval( attrs["rangeGaspBehavior"] ) PKaZZZ>��9��"fontTools/ttLib/tables/_g_c_i_d.pyfrom .otBase import BaseTTXConverter # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gcid.html class table__g_c_i_d(BaseTTXConverter): pass PKaZZZB�Y?�?�"fontTools/ttLib/tables/_g_l_y_f.py"""_g_l_y_f.py -- Converter classes for the 'glyf' table.""" from collections import namedtuple from fontTools.misc import sstruct from fontTools import ttLib from fontTools import version from fontTools.misc.transform import DecomposedTransform from fontTools.misc.textTools import tostr, safeEval, pad from fontTools.misc.arrayTools import updateBounds, pointInRect from fontTools.misc.bezierTools import calcQuadraticBounds from fontTools.misc.fixedTools import ( fixedToFloat as fi2fl, floatToFixed as fl2fi, floatToFixedToStr as fl2str, strToFixedToFloat as str2fl, ) from fontTools.misc.roundTools import noRound, otRound from fontTools.misc.vector import Vector from numbers import Number from . import DefaultTable from . import ttProgram import sys import struct import array import logging import math import os from fontTools.misc import xmlWriter from fontTools.misc.filenames import userNameToFileName from fontTools.misc.loggingTools import deprecateFunction from enum import IntFlag from functools import partial from types import SimpleNamespace from typing import Set log = logging.getLogger(__name__) # We compute the version the same as is computed in ttlib/__init__ # so that we can write 'ttLibVersion' attribute of the glyf TTX files # when glyf is written to separate files. version = ".".join(version.split(".")[:2]) # # The Apple and MS rasterizers behave differently for # scaled composite components: one does scale first and then translate # and the other does it vice versa. MS defined some flags to indicate # the difference, but it seems nobody actually _sets_ those flags. # # Funny thing: Apple seems to _only_ do their thing in the # WE_HAVE_A_SCALE (eg. Chicago) case, and not when it's WE_HAVE_AN_X_AND_Y_SCALE # (eg. Charcoal)... # SCALE_COMPONENT_OFFSET_DEFAULT = 0 # 0 == MS, 1 == Apple class table__g_l_y_f(DefaultTable.DefaultTable): """Glyph Data Table This class represents the `glyf <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf>`_ table, which contains outlines for glyphs in TrueType format. In many cases, it is easier to access and manipulate glyph outlines through the ``GlyphSet`` object returned from :py:meth:`fontTools.ttLib.ttFont.getGlyphSet`:: >> from fontTools.pens.boundsPen import BoundsPen >> glyphset = font.getGlyphSet() >> bp = BoundsPen(glyphset) >> glyphset["A"].draw(bp) >> bp.bounds (19, 0, 633, 716) However, this class can be used for low-level access to the ``glyf`` table data. Objects of this class support dictionary-like access, mapping glyph names to :py:class:`Glyph` objects:: >> glyf = font["glyf"] >> len(glyf["Aacute"].components) 2 Note that when adding glyphs to the font via low-level access to the ``glyf`` table, the new glyphs must also be added to the ``hmtx``/``vmtx`` table:: >> font["glyf"]["divisionslash"] = Glyph() >> font["hmtx"]["divisionslash"] = (640, 0) """ dependencies = ["fvar"] # this attribute controls the amount of padding applied to glyph data upon compile. # Glyph lenghts are aligned to multiples of the specified value. # Allowed values are (0, 1, 2, 4). '0' means no padding; '1' (default) also means # no padding, except for when padding would allow to use short loca offsets. padding = 1 def decompile(self, data, ttFont): self.axisTags = ( [axis.axisTag for axis in ttFont["fvar"].axes] if "fvar" in ttFont else [] ) loca = ttFont["loca"] pos = int(loca[0]) nextPos = 0 noname = 0 self.glyphs = {} self.glyphOrder = glyphOrder = ttFont.getGlyphOrder() self._reverseGlyphOrder = {} for i in range(0, len(loca) - 1): try: glyphName = glyphOrder[i] except IndexError: noname = noname + 1 glyphName = "ttxautoglyph%s" % i nextPos = int(loca[i + 1]) glyphdata = data[pos:nextPos] if len(glyphdata) != (nextPos - pos): raise ttLib.TTLibError("not enough 'glyf' table data") glyph = Glyph(glyphdata) self.glyphs[glyphName] = glyph pos = nextPos if len(data) - nextPos >= 4: log.warning( "too much 'glyf' table data: expected %d, received %d bytes", nextPos, len(data), ) if noname: log.warning("%s glyphs have no name", noname) if ttFont.lazy is False: # Be lazy for None and True self.ensureDecompiled() def ensureDecompiled(self, recurse=False): # The recurse argument is unused, but part of the signature of # ensureDecompiled across the library. for glyph in self.glyphs.values(): glyph.expand(self) def compile(self, ttFont): self.axisTags = ( [axis.axisTag for axis in ttFont["fvar"].axes] if "fvar" in ttFont else [] ) if not hasattr(self, "glyphOrder"): self.glyphOrder = ttFont.getGlyphOrder() padding = self.padding assert padding in (0, 1, 2, 4) locations = [] currentLocation = 0 dataList = [] recalcBBoxes = ttFont.recalcBBoxes boundsDone = set() for glyphName in self.glyphOrder: glyph = self.glyphs[glyphName] glyphData = glyph.compile(self, recalcBBoxes, boundsDone=boundsDone) if padding > 1: glyphData = pad(glyphData, size=padding) locations.append(currentLocation) currentLocation = currentLocation + len(glyphData) dataList.append(glyphData) locations.append(currentLocation) if padding == 1 and currentLocation < 0x20000: # See if we can pad any odd-lengthed glyphs to allow loca # table to use the short offsets. indices = [ i for i, glyphData in enumerate(dataList) if len(glyphData) % 2 == 1 ] if indices and currentLocation + len(indices) < 0x20000: # It fits. Do it. for i in indices: dataList[i] += b"\0" currentLocation = 0 for i, glyphData in enumerate(dataList): locations[i] = currentLocation currentLocation += len(glyphData) locations[len(dataList)] = currentLocation data = b"".join(dataList) if "loca" in ttFont: ttFont["loca"].set(locations) if "maxp" in ttFont: ttFont["maxp"].numGlyphs = len(self.glyphs) if not data: # As a special case when all glyph in the font are empty, add a zero byte # to the table, so that OTS doesn’t reject it, and to make the table work # on Windows as well. # See https://github.com/khaledhosny/ots/issues/52 data = b"\0" return data def toXML(self, writer, ttFont, splitGlyphs=False): notice = ( "The xMin, yMin, xMax and yMax values\n" "will be recalculated by the compiler." ) glyphNames = ttFont.getGlyphNames() if not splitGlyphs: writer.newline() writer.comment(notice) writer.newline() writer.newline() numGlyphs = len(glyphNames) if splitGlyphs: path, ext = os.path.splitext(writer.file.name) existingGlyphFiles = set() for glyphName in glyphNames: glyph = self.get(glyphName) if glyph is None: log.warning("glyph '%s' does not exist in glyf table", glyphName) continue if glyph.numberOfContours: if splitGlyphs: glyphPath = userNameToFileName( tostr(glyphName, "utf-8"), existingGlyphFiles, prefix=path + ".", suffix=ext, ) existingGlyphFiles.add(glyphPath.lower()) glyphWriter = xmlWriter.XMLWriter( glyphPath, idlefunc=writer.idlefunc, newlinestr=writer.newlinestr, ) glyphWriter.begintag("ttFont", ttLibVersion=version) glyphWriter.newline() glyphWriter.begintag("glyf") glyphWriter.newline() glyphWriter.comment(notice) glyphWriter.newline() writer.simpletag("TTGlyph", src=os.path.basename(glyphPath)) else: glyphWriter = writer glyphWriter.begintag( "TTGlyph", [ ("name", glyphName), ("xMin", glyph.xMin), ("yMin", glyph.yMin), ("xMax", glyph.xMax), ("yMax", glyph.yMax), ], ) glyphWriter.newline() glyph.toXML(glyphWriter, ttFont) glyphWriter.endtag("TTGlyph") glyphWriter.newline() if splitGlyphs: glyphWriter.endtag("glyf") glyphWriter.newline() glyphWriter.endtag("ttFont") glyphWriter.newline() glyphWriter.close() else: writer.simpletag("TTGlyph", name=glyphName) writer.comment("contains no outline data") if not splitGlyphs: writer.newline() writer.newline() def fromXML(self, name, attrs, content, ttFont): if name != "TTGlyph": return if not hasattr(self, "glyphs"): self.glyphs = {} if not hasattr(self, "glyphOrder"): self.glyphOrder = ttFont.getGlyphOrder() glyphName = attrs["name"] log.debug("unpacking glyph '%s'", glyphName) glyph = Glyph() for attr in ["xMin", "yMin", "xMax", "yMax"]: setattr(glyph, attr, safeEval(attrs.get(attr, "0"))) self.glyphs[glyphName] = glyph for element in content: if not isinstance(element, tuple): continue name, attrs, content = element glyph.fromXML(name, attrs, content, ttFont) if not ttFont.recalcBBoxes: glyph.compact(self, 0) def setGlyphOrder(self, glyphOrder): """Sets the glyph order Args: glyphOrder ([str]): List of glyph names in order. """ self.glyphOrder = glyphOrder self._reverseGlyphOrder = {} def getGlyphName(self, glyphID): """Returns the name for the glyph with the given ID. Raises a ``KeyError`` if the glyph name is not found in the font. """ return self.glyphOrder[glyphID] def _buildReverseGlyphOrderDict(self): self._reverseGlyphOrder = d = {} for glyphID, glyphName in enumerate(self.glyphOrder): d[glyphName] = glyphID def getGlyphID(self, glyphName): """Returns the ID of the glyph with the given name. Raises a ``ValueError`` if the glyph is not found in the font. """ glyphOrder = self.glyphOrder id = getattr(self, "_reverseGlyphOrder", {}).get(glyphName) if id is None or id >= len(glyphOrder) or glyphOrder[id] != glyphName: self._buildReverseGlyphOrderDict() id = self._reverseGlyphOrder.get(glyphName) if id is None: raise ValueError(glyphName) return id def removeHinting(self): """Removes TrueType hints from all glyphs in the glyphset. See :py:meth:`Glyph.removeHinting`. """ for glyph in self.glyphs.values(): glyph.removeHinting() def keys(self): return self.glyphs.keys() def has_key(self, glyphName): return glyphName in self.glyphs __contains__ = has_key def get(self, glyphName, default=None): glyph = self.glyphs.get(glyphName, default) if glyph is not None: glyph.expand(self) return glyph def __getitem__(self, glyphName): glyph = self.glyphs[glyphName] glyph.expand(self) return glyph def __setitem__(self, glyphName, glyph): self.glyphs[glyphName] = glyph if glyphName not in self.glyphOrder: self.glyphOrder.append(glyphName) def __delitem__(self, glyphName): del self.glyphs[glyphName] self.glyphOrder.remove(glyphName) def __len__(self): assert len(self.glyphOrder) == len(self.glyphs) return len(self.glyphs) def _getPhantomPoints(self, glyphName, hMetrics, vMetrics=None): """Compute the four "phantom points" for the given glyph from its bounding box and the horizontal and vertical advance widths and sidebearings stored in the ttFont's "hmtx" and "vmtx" tables. 'hMetrics' should be ttFont['hmtx'].metrics. 'vMetrics' should be ttFont['vmtx'].metrics if there is "vmtx" or None otherwise. If there is no vMetrics passed in, vertical phantom points are set to the zero coordinate. https://docs.microsoft.com/en-us/typography/opentype/spec/tt_instructing_glyphs#phantoms """ glyph = self[glyphName] if not hasattr(glyph, "xMin"): glyph.recalcBounds(self) horizontalAdvanceWidth, leftSideBearing = hMetrics[glyphName] leftSideX = glyph.xMin - leftSideBearing rightSideX = leftSideX + horizontalAdvanceWidth if vMetrics: verticalAdvanceWidth, topSideBearing = vMetrics[glyphName] topSideY = topSideBearing + glyph.yMax bottomSideY = topSideY - verticalAdvanceWidth else: bottomSideY = topSideY = 0 return [ (leftSideX, 0), (rightSideX, 0), (0, topSideY), (0, bottomSideY), ] def _getCoordinatesAndControls( self, glyphName, hMetrics, vMetrics=None, *, round=otRound ): """Return glyph coordinates and controls as expected by "gvar" table. The coordinates includes four "phantom points" for the glyph metrics, as mandated by the "gvar" spec. The glyph controls is a namedtuple with the following attributes: - numberOfContours: -1 for composite glyphs. - endPts: list of indices of end points for each contour in simple glyphs, or component indices in composite glyphs (used for IUP optimization). - flags: array of contour point flags for simple glyphs (None for composite glyphs). - components: list of base glyph names (str) for each component in composite glyphs (None for simple glyphs). The "hMetrics" and vMetrics are used to compute the "phantom points" (see the "_getPhantomPoints" method). Return None if the requested glyphName is not present. """ glyph = self.get(glyphName) if glyph is None: return None if glyph.isComposite(): coords = GlyphCoordinates( [(getattr(c, "x", 0), getattr(c, "y", 0)) for c in glyph.components] ) controls = _GlyphControls( numberOfContours=glyph.numberOfContours, endPts=list(range(len(glyph.components))), flags=None, components=[ (c.glyphName, getattr(c, "transform", None)) for c in glyph.components ], ) elif glyph.isVarComposite(): coords = [] controls = [] for component in glyph.components: ( componentCoords, componentControls, ) = component.getCoordinatesAndControls() coords.extend(componentCoords) controls.extend(componentControls) coords = GlyphCoordinates(coords) controls = _GlyphControls( numberOfContours=glyph.numberOfContours, endPts=list(range(len(coords))), flags=None, components=[ (c.glyphName, getattr(c, "flags", None)) for c in glyph.components ], ) else: coords, endPts, flags = glyph.getCoordinates(self) coords = coords.copy() controls = _GlyphControls( numberOfContours=glyph.numberOfContours, endPts=endPts, flags=flags, components=None, ) # Add phantom points for (left, right, top, bottom) positions. phantomPoints = self._getPhantomPoints(glyphName, hMetrics, vMetrics) coords.extend(phantomPoints) coords.toInt(round=round) return coords, controls def _setCoordinates(self, glyphName, coord, hMetrics, vMetrics=None): """Set coordinates and metrics for the given glyph. "coord" is an array of GlyphCoordinates which must include the "phantom points" as the last four coordinates. Both the horizontal/vertical advances and left/top sidebearings in "hmtx" and "vmtx" tables (if any) are updated from four phantom points and the glyph's bounding boxes. The "hMetrics" and vMetrics are used to propagate "phantom points" into "hmtx" and "vmtx" tables if desired. (see the "_getPhantomPoints" method). """ glyph = self[glyphName] # Handle phantom points for (left, right, top, bottom) positions. assert len(coord) >= 4 leftSideX = coord[-4][0] rightSideX = coord[-3][0] topSideY = coord[-2][1] bottomSideY = coord[-1][1] coord = coord[:-4] if glyph.isComposite(): assert len(coord) == len(glyph.components) for p, comp in zip(coord, glyph.components): if hasattr(comp, "x"): comp.x, comp.y = p elif glyph.isVarComposite(): for comp in glyph.components: coord = comp.setCoordinates(coord) assert not coord elif glyph.numberOfContours == 0: assert len(coord) == 0 else: assert len(coord) == len(glyph.coordinates) glyph.coordinates = GlyphCoordinates(coord) glyph.recalcBounds(self, boundsDone=set()) horizontalAdvanceWidth = otRound(rightSideX - leftSideX) if horizontalAdvanceWidth < 0: # unlikely, but it can happen, see: # https://github.com/fonttools/fonttools/pull/1198 horizontalAdvanceWidth = 0 leftSideBearing = otRound(glyph.xMin - leftSideX) hMetrics[glyphName] = horizontalAdvanceWidth, leftSideBearing if vMetrics is not None: verticalAdvanceWidth = otRound(topSideY - bottomSideY) if verticalAdvanceWidth < 0: # unlikely but do the same as horizontal verticalAdvanceWidth = 0 topSideBearing = otRound(topSideY - glyph.yMax) vMetrics[glyphName] = verticalAdvanceWidth, topSideBearing # Deprecated def _synthesizeVMetrics(self, glyphName, ttFont, defaultVerticalOrigin): """This method is wrong and deprecated. For rationale see: https://github.com/fonttools/fonttools/pull/2266/files#r613569473 """ vMetrics = getattr(ttFont.get("vmtx"), "metrics", None) if vMetrics is None: verticalAdvanceWidth = ttFont["head"].unitsPerEm topSideY = getattr(ttFont.get("hhea"), "ascent", None) if topSideY is None: if defaultVerticalOrigin is not None: topSideY = defaultVerticalOrigin else: topSideY = verticalAdvanceWidth glyph = self[glyphName] glyph.recalcBounds(self) topSideBearing = otRound(topSideY - glyph.yMax) vMetrics = {glyphName: (verticalAdvanceWidth, topSideBearing)} return vMetrics @deprecateFunction("use '_getPhantomPoints' instead", category=DeprecationWarning) def getPhantomPoints(self, glyphName, ttFont, defaultVerticalOrigin=None): """Old public name for self._getPhantomPoints(). See: https://github.com/fonttools/fonttools/pull/2266""" hMetrics = ttFont["hmtx"].metrics vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin) return self._getPhantomPoints(glyphName, hMetrics, vMetrics) @deprecateFunction( "use '_getCoordinatesAndControls' instead", category=DeprecationWarning ) def getCoordinatesAndControls(self, glyphName, ttFont, defaultVerticalOrigin=None): """Old public name for self._getCoordinatesAndControls(). See: https://github.com/fonttools/fonttools/pull/2266""" hMetrics = ttFont["hmtx"].metrics vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin) return self._getCoordinatesAndControls(glyphName, hMetrics, vMetrics) @deprecateFunction("use '_setCoordinates' instead", category=DeprecationWarning) def setCoordinates(self, glyphName, ttFont): """Old public name for self._setCoordinates(). See: https://github.com/fonttools/fonttools/pull/2266""" hMetrics = ttFont["hmtx"].metrics vMetrics = getattr(ttFont.get("vmtx"), "metrics", None) self._setCoordinates(glyphName, hMetrics, vMetrics) _GlyphControls = namedtuple( "_GlyphControls", "numberOfContours endPts flags components" ) glyphHeaderFormat = """ > # big endian numberOfContours: h xMin: h yMin: h xMax: h yMax: h """ # flags flagOnCurve = 0x01 flagXShort = 0x02 flagYShort = 0x04 flagRepeat = 0x08 flagXsame = 0x10 flagYsame = 0x20 flagOverlapSimple = 0x40 flagCubic = 0x80 # These flags are kept for XML output after decompiling the coordinates keepFlags = flagOnCurve + flagOverlapSimple + flagCubic _flagSignBytes = { 0: 2, flagXsame: 0, flagXShort | flagXsame: +1, flagXShort: -1, flagYsame: 0, flagYShort | flagYsame: +1, flagYShort: -1, } def flagBest(x, y, onCurve): """For a given x,y delta pair, returns the flag that packs this pair most efficiently, as well as the number of byte cost of such flag.""" flag = flagOnCurve if onCurve else 0 cost = 0 # do x if x == 0: flag = flag | flagXsame elif -255 <= x <= 255: flag = flag | flagXShort if x > 0: flag = flag | flagXsame cost += 1 else: cost += 2 # do y if y == 0: flag = flag | flagYsame elif -255 <= y <= 255: flag = flag | flagYShort if y > 0: flag = flag | flagYsame cost += 1 else: cost += 2 return flag, cost def flagFits(newFlag, oldFlag, mask): newBytes = _flagSignBytes[newFlag & mask] oldBytes = _flagSignBytes[oldFlag & mask] return newBytes == oldBytes or abs(newBytes) > abs(oldBytes) def flagSupports(newFlag, oldFlag): return ( (oldFlag & flagOnCurve) == (newFlag & flagOnCurve) and flagFits(newFlag, oldFlag, flagXsame | flagXShort) and flagFits(newFlag, oldFlag, flagYsame | flagYShort) ) def flagEncodeCoord(flag, mask, coord, coordBytes): byteCount = _flagSignBytes[flag & mask] if byteCount == 1: coordBytes.append(coord) elif byteCount == -1: coordBytes.append(-coord) elif byteCount == 2: coordBytes.extend(struct.pack(">h", coord)) def flagEncodeCoords(flag, x, y, xBytes, yBytes): flagEncodeCoord(flag, flagXsame | flagXShort, x, xBytes) flagEncodeCoord(flag, flagYsame | flagYShort, y, yBytes) ARG_1_AND_2_ARE_WORDS = 0x0001 # if set args are words otherwise they are bytes ARGS_ARE_XY_VALUES = 0x0002 # if set args are xy values, otherwise they are points ROUND_XY_TO_GRID = 0x0004 # for the xy values if above is true WE_HAVE_A_SCALE = 0x0008 # Sx = Sy, otherwise scale == 1.0 NON_OVERLAPPING = 0x0010 # set to same value for all components (obsolete!) MORE_COMPONENTS = 0x0020 # indicates at least one more glyph after this one WE_HAVE_AN_X_AND_Y_SCALE = 0x0040 # Sx, Sy WE_HAVE_A_TWO_BY_TWO = 0x0080 # t00, t01, t10, t11 WE_HAVE_INSTRUCTIONS = 0x0100 # instructions follow USE_MY_METRICS = 0x0200 # apply these metrics to parent glyph OVERLAP_COMPOUND = 0x0400 # used by Apple in GX fonts SCALED_COMPONENT_OFFSET = 0x0800 # composite designed to have the component offset scaled (designed for Apple) UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS) CompositeMaxpValues = namedtuple( "CompositeMaxpValues", ["nPoints", "nContours", "maxComponentDepth"] ) class Glyph(object): """This class represents an individual TrueType glyph. TrueType glyph objects come in two flavours: simple and composite. Simple glyph objects contain contours, represented via the ``.coordinates``, ``.flags``, ``.numberOfContours``, and ``.endPtsOfContours`` attributes; composite glyphs contain components, available through the ``.components`` attributes. Because the ``.coordinates`` attribute (and other simple glyph attributes mentioned above) is only set on simple glyphs and the ``.components`` attribute is only set on composite glyphs, it is necessary to use the :py:meth:`isComposite` method to test whether a glyph is simple or composite before attempting to access its data. For a composite glyph, the components can also be accessed via array-like access:: >> assert(font["glyf"]["Aacute"].isComposite()) >> font["glyf"]["Aacute"][0] <fontTools.ttLib.tables._g_l_y_f.GlyphComponent at 0x1027b2ee0> """ def __init__(self, data=b""): if not data: # empty char self.numberOfContours = 0 return self.data = data def compact(self, glyfTable, recalcBBoxes=True): data = self.compile(glyfTable, recalcBBoxes) self.__dict__.clear() self.data = data def expand(self, glyfTable): if not hasattr(self, "data"): # already unpacked return if not self.data: # empty char del self.data self.numberOfContours = 0 return dummy, data = sstruct.unpack2(glyphHeaderFormat, self.data, self) del self.data # Some fonts (eg. Neirizi.ttf) have a 0 for numberOfContours in # some glyphs; decompileCoordinates assumes that there's at least # one, so short-circuit here. if self.numberOfContours == 0: return if self.isComposite(): self.decompileComponents(data, glyfTable) elif self.isVarComposite(): self.decompileVarComponents(data, glyfTable) else: self.decompileCoordinates(data) def compile(self, glyfTable, recalcBBoxes=True, *, boundsDone=None): if hasattr(self, "data"): if recalcBBoxes: # must unpack glyph in order to recalculate bounding box self.expand(glyfTable) else: return self.data if self.numberOfContours == 0: return b"" if recalcBBoxes: self.recalcBounds(glyfTable, boundsDone=boundsDone) data = sstruct.pack(glyphHeaderFormat, self) if self.isComposite(): data = data + self.compileComponents(glyfTable) elif self.isVarComposite(): data = data + self.compileVarComponents(glyfTable) else: data = data + self.compileCoordinates() return data def toXML(self, writer, ttFont): if self.isComposite(): for compo in self.components: compo.toXML(writer, ttFont) haveInstructions = hasattr(self, "program") elif self.isVarComposite(): for compo in self.components: compo.toXML(writer, ttFont) haveInstructions = False else: last = 0 for i in range(self.numberOfContours): writer.begintag("contour") writer.newline() for j in range(last, self.endPtsOfContours[i] + 1): attrs = [ ("x", self.coordinates[j][0]), ("y", self.coordinates[j][1]), ("on", self.flags[j] & flagOnCurve), ] if self.flags[j] & flagOverlapSimple: # Apple's rasterizer uses flagOverlapSimple in the first contour/first pt to flag glyphs that contain overlapping contours attrs.append(("overlap", 1)) if self.flags[j] & flagCubic: attrs.append(("cubic", 1)) writer.simpletag("pt", attrs) writer.newline() last = self.endPtsOfContours[i] + 1 writer.endtag("contour") writer.newline() haveInstructions = self.numberOfContours > 0 if haveInstructions: if self.program: writer.begintag("instructions") writer.newline() self.program.toXML(writer, ttFont) writer.endtag("instructions") else: writer.simpletag("instructions") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "contour": if self.numberOfContours < 0: raise ttLib.TTLibError("can't mix composites and contours in glyph") self.numberOfContours = self.numberOfContours + 1 coordinates = GlyphCoordinates() flags = bytearray() for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name != "pt": continue # ignore anything but "pt" coordinates.append((safeEval(attrs["x"]), safeEval(attrs["y"]))) flag = bool(safeEval(attrs["on"])) if "overlap" in attrs and bool(safeEval(attrs["overlap"])): flag |= flagOverlapSimple if "cubic" in attrs and bool(safeEval(attrs["cubic"])): flag |= flagCubic flags.append(flag) if not hasattr(self, "coordinates"): self.coordinates = coordinates self.flags = flags self.endPtsOfContours = [len(coordinates) - 1] else: self.coordinates.extend(coordinates) self.flags.extend(flags) self.endPtsOfContours.append(len(self.coordinates) - 1) elif name == "component": if self.numberOfContours > 0: raise ttLib.TTLibError("can't mix composites and contours in glyph") self.numberOfContours = -1 if not hasattr(self, "components"): self.components = [] component = GlyphComponent() self.components.append(component) component.fromXML(name, attrs, content, ttFont) elif name == "varComponent": if self.numberOfContours > 0: raise ttLib.TTLibError("can't mix composites and contours in glyph") self.numberOfContours = -2 if not hasattr(self, "components"): self.components = [] component = GlyphVarComponent() self.components.append(component) component.fromXML(name, attrs, content, ttFont) elif name == "instructions": self.program = ttProgram.Program() for element in content: if not isinstance(element, tuple): continue name, attrs, content = element self.program.fromXML(name, attrs, content, ttFont) def getCompositeMaxpValues(self, glyfTable, maxComponentDepth=1): assert self.isComposite() or self.isVarComposite() nContours = 0 nPoints = 0 initialMaxComponentDepth = maxComponentDepth for compo in self.components: baseGlyph = glyfTable[compo.glyphName] if baseGlyph.numberOfContours == 0: continue elif baseGlyph.numberOfContours > 0: nP, nC = baseGlyph.getMaxpValues() else: nP, nC, componentDepth = baseGlyph.getCompositeMaxpValues( glyfTable, initialMaxComponentDepth + 1 ) maxComponentDepth = max(maxComponentDepth, componentDepth) nPoints = nPoints + nP nContours = nContours + nC return CompositeMaxpValues(nPoints, nContours, maxComponentDepth) def getMaxpValues(self): assert self.numberOfContours > 0 return len(self.coordinates), len(self.endPtsOfContours) def decompileComponents(self, data, glyfTable): self.components = [] more = 1 haveInstructions = 0 while more: component = GlyphComponent() more, haveInstr, data = component.decompile(data, glyfTable) haveInstructions = haveInstructions | haveInstr self.components.append(component) if haveInstructions: (numInstructions,) = struct.unpack(">h", data[:2]) data = data[2:] self.program = ttProgram.Program() self.program.fromBytecode(data[:numInstructions]) data = data[numInstructions:] if len(data) >= 4: log.warning( "too much glyph data at the end of composite glyph: %d excess bytes", len(data), ) def decompileVarComponents(self, data, glyfTable): self.components = [] while len(data) >= GlyphVarComponent.MIN_SIZE: component = GlyphVarComponent() data = component.decompile(data, glyfTable) self.components.append(component) def decompileCoordinates(self, data): endPtsOfContours = array.array("H") endPtsOfContours.frombytes(data[: 2 * self.numberOfContours]) if sys.byteorder != "big": endPtsOfContours.byteswap() self.endPtsOfContours = endPtsOfContours.tolist() pos = 2 * self.numberOfContours (instructionLength,) = struct.unpack(">h", data[pos : pos + 2]) self.program = ttProgram.Program() self.program.fromBytecode(data[pos + 2 : pos + 2 + instructionLength]) pos += 2 + instructionLength nCoordinates = self.endPtsOfContours[-1] + 1 flags, xCoordinates, yCoordinates = self.decompileCoordinatesRaw( nCoordinates, data, pos ) # fill in repetitions and apply signs self.coordinates = coordinates = GlyphCoordinates.zeros(nCoordinates) xIndex = 0 yIndex = 0 for i in range(nCoordinates): flag = flags[i] # x coordinate if flag & flagXShort: if flag & flagXsame: x = xCoordinates[xIndex] else: x = -xCoordinates[xIndex] xIndex = xIndex + 1 elif flag & flagXsame: x = 0 else: x = xCoordinates[xIndex] xIndex = xIndex + 1 # y coordinate if flag & flagYShort: if flag & flagYsame: y = yCoordinates[yIndex] else: y = -yCoordinates[yIndex] yIndex = yIndex + 1 elif flag & flagYsame: y = 0 else: y = yCoordinates[yIndex] yIndex = yIndex + 1 coordinates[i] = (x, y) assert xIndex == len(xCoordinates) assert yIndex == len(yCoordinates) coordinates.relativeToAbsolute() # discard all flags except "keepFlags" for i in range(len(flags)): flags[i] &= keepFlags self.flags = flags def decompileCoordinatesRaw(self, nCoordinates, data, pos=0): # unpack flags and prepare unpacking of coordinates flags = bytearray(nCoordinates) # Warning: deep Python trickery going on. We use the struct module to unpack # the coordinates. We build a format string based on the flags, so we can # unpack the coordinates in one struct.unpack() call. xFormat = ">" # big endian yFormat = ">" # big endian j = 0 while True: flag = data[pos] pos += 1 repeat = 1 if flag & flagRepeat: repeat = data[pos] + 1 pos += 1 for k in range(repeat): if flag & flagXShort: xFormat = xFormat + "B" elif not (flag & flagXsame): xFormat = xFormat + "h" if flag & flagYShort: yFormat = yFormat + "B" elif not (flag & flagYsame): yFormat = yFormat + "h" flags[j] = flag j = j + 1 if j >= nCoordinates: break assert j == nCoordinates, "bad glyph flags" # unpack raw coordinates, krrrrrr-tching! xDataLen = struct.calcsize(xFormat) yDataLen = struct.calcsize(yFormat) if len(data) - pos - (xDataLen + yDataLen) >= 4: log.warning( "too much glyph data: %d excess bytes", len(data) - pos - (xDataLen + yDataLen), ) xCoordinates = struct.unpack(xFormat, data[pos : pos + xDataLen]) yCoordinates = struct.unpack( yFormat, data[pos + xDataLen : pos + xDataLen + yDataLen] ) return flags, xCoordinates, yCoordinates def compileComponents(self, glyfTable): data = b"" lastcomponent = len(self.components) - 1 more = 1 haveInstructions = 0 for i in range(len(self.components)): if i == lastcomponent: haveInstructions = hasattr(self, "program") more = 0 compo = self.components[i] data = data + compo.compile(more, haveInstructions, glyfTable) if haveInstructions: instructions = self.program.getBytecode() data = data + struct.pack(">h", len(instructions)) + instructions return data def compileVarComponents(self, glyfTable): return b"".join(c.compile(glyfTable) for c in self.components) def compileCoordinates(self): assert len(self.coordinates) == len(self.flags) data = [] endPtsOfContours = array.array("H", self.endPtsOfContours) if sys.byteorder != "big": endPtsOfContours.byteswap() data.append(endPtsOfContours.tobytes()) instructions = self.program.getBytecode() data.append(struct.pack(">h", len(instructions))) data.append(instructions) deltas = self.coordinates.copy() deltas.toInt() deltas.absoluteToRelative() # TODO(behdad): Add a configuration option for this? deltas = self.compileDeltasGreedy(self.flags, deltas) # deltas = self.compileDeltasOptimal(self.flags, deltas) data.extend(deltas) return b"".join(data) def compileDeltasGreedy(self, flags, deltas): # Implements greedy algorithm for packing coordinate deltas: # uses shortest representation one coordinate at a time. compressedFlags = bytearray() compressedXs = bytearray() compressedYs = bytearray() lastflag = None repeat = 0 for flag, (x, y) in zip(flags, deltas): # Oh, the horrors of TrueType # do x if x == 0: flag = flag | flagXsame elif -255 <= x <= 255: flag = flag | flagXShort if x > 0: flag = flag | flagXsame else: x = -x compressedXs.append(x) else: compressedXs.extend(struct.pack(">h", x)) # do y if y == 0: flag = flag | flagYsame elif -255 <= y <= 255: flag = flag | flagYShort if y > 0: flag = flag | flagYsame else: y = -y compressedYs.append(y) else: compressedYs.extend(struct.pack(">h", y)) # handle repeating flags if flag == lastflag and repeat != 255: repeat = repeat + 1 if repeat == 1: compressedFlags.append(flag) else: compressedFlags[-2] = flag | flagRepeat compressedFlags[-1] = repeat else: repeat = 0 compressedFlags.append(flag) lastflag = flag return (compressedFlags, compressedXs, compressedYs) def compileDeltasOptimal(self, flags, deltas): # Implements optimal, dynaic-programming, algorithm for packing coordinate # deltas. The savings are negligible :(. candidates = [] bestTuple = None bestCost = 0 repeat = 0 for flag, (x, y) in zip(flags, deltas): # Oh, the horrors of TrueType flag, coordBytes = flagBest(x, y, flag) bestCost += 1 + coordBytes newCandidates = [ (bestCost, bestTuple, flag, coordBytes), (bestCost + 1, bestTuple, (flag | flagRepeat), coordBytes), ] for lastCost, lastTuple, lastFlag, coordBytes in candidates: if ( lastCost + coordBytes <= bestCost + 1 and (lastFlag & flagRepeat) and (lastFlag < 0xFF00) and flagSupports(lastFlag, flag) ): if (lastFlag & 0xFF) == ( flag | flagRepeat ) and lastCost == bestCost + 1: continue newCandidates.append( (lastCost + coordBytes, lastTuple, lastFlag + 256, coordBytes) ) candidates = newCandidates bestTuple = min(candidates, key=lambda t: t[0]) bestCost = bestTuple[0] flags = [] while bestTuple: cost, bestTuple, flag, coordBytes = bestTuple flags.append(flag) flags.reverse() compressedFlags = bytearray() compressedXs = bytearray() compressedYs = bytearray() coords = iter(deltas) ff = [] for flag in flags: repeatCount, flag = flag >> 8, flag & 0xFF compressedFlags.append(flag) if flag & flagRepeat: assert repeatCount > 0 compressedFlags.append(repeatCount) else: assert repeatCount == 0 for i in range(1 + repeatCount): x, y = next(coords) flagEncodeCoords(flag, x, y, compressedXs, compressedYs) ff.append(flag) try: next(coords) raise Exception("internal error") except StopIteration: pass return (compressedFlags, compressedXs, compressedYs) def recalcBounds(self, glyfTable, *, boundsDone=None): """Recalculates the bounds of the glyph. Each glyph object stores its bounding box in the ``xMin``/``yMin``/``xMax``/``yMax`` attributes. These bounds must be recomputed when the ``coordinates`` change. The ``table__g_l_y_f`` bounds must be provided to resolve component bounds. """ if self.isComposite() and self.tryRecalcBoundsComposite( glyfTable, boundsDone=boundsDone ): return try: coords, endPts, flags = self.getCoordinates(glyfTable) self.xMin, self.yMin, self.xMax, self.yMax = coords.calcIntBounds() except NotImplementedError: pass def tryRecalcBoundsComposite(self, glyfTable, *, boundsDone=None): """Try recalculating the bounds of a composite glyph that has certain constrained properties. Namely, none of the components have a transform other than an integer translate, and none uses the anchor points. Each glyph object stores its bounding box in the ``xMin``/``yMin``/``xMax``/``yMax`` attributes. These bounds must be recomputed when the ``coordinates`` change. The ``table__g_l_y_f`` bounds must be provided to resolve component bounds. Return True if bounds were calculated, False otherwise. """ for compo in self.components: if hasattr(compo, "firstPt") or hasattr(compo, "transform"): return False if not float(compo.x).is_integer() or not float(compo.y).is_integer(): return False # All components are untransformed and have an integer x/y translate bounds = None for compo in self.components: glyphName = compo.glyphName g = glyfTable[glyphName] if boundsDone is None or glyphName not in boundsDone: g.recalcBounds(glyfTable, boundsDone=boundsDone) if boundsDone is not None: boundsDone.add(glyphName) # empty components shouldn't update the bounds of the parent glyph if g.numberOfContours == 0: continue x, y = compo.x, compo.y bounds = updateBounds(bounds, (g.xMin + x, g.yMin + y)) bounds = updateBounds(bounds, (g.xMax + x, g.yMax + y)) if bounds is None: bounds = (0, 0, 0, 0) self.xMin, self.yMin, self.xMax, self.yMax = bounds return True def isComposite(self): """Test whether a glyph has components""" if hasattr(self, "data"): return struct.unpack(">h", self.data[:2])[0] == -1 if self.data else False else: return self.numberOfContours == -1 def isVarComposite(self): """Test whether a glyph has variable components""" if hasattr(self, "data"): return struct.unpack(">h", self.data[:2])[0] == -2 if self.data else False else: return self.numberOfContours == -2 def getCoordinates(self, glyfTable): """Return the coordinates, end points and flags This method returns three values: A :py:class:`GlyphCoordinates` object, a list of the indexes of the final points of each contour (allowing you to split up the coordinates list into contours) and a list of flags. On simple glyphs, this method returns information from the glyph's own contours; on composite glyphs, it "flattens" all components recursively to return a list of coordinates representing all the components involved in the glyph. To interpret the flags for each point, see the "Simple Glyph Flags" section of the `glyf table specification <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf#simple-glyph-description>`. """ if self.numberOfContours > 0: return self.coordinates, self.endPtsOfContours, self.flags elif self.isComposite(): # it's a composite allCoords = GlyphCoordinates() allFlags = bytearray() allEndPts = [] for compo in self.components: g = glyfTable[compo.glyphName] try: coordinates, endPts, flags = g.getCoordinates(glyfTable) except RecursionError: raise ttLib.TTLibError( "glyph '%s' contains a recursive component reference" % compo.glyphName ) coordinates = GlyphCoordinates(coordinates) if hasattr(compo, "firstPt"): # component uses two reference points: we apply the transform _before_ # computing the offset between the points if hasattr(compo, "transform"): coordinates.transform(compo.transform) x1, y1 = allCoords[compo.firstPt] x2, y2 = coordinates[compo.secondPt] move = x1 - x2, y1 - y2 coordinates.translate(move) else: # component uses XY offsets move = compo.x, compo.y if not hasattr(compo, "transform"): coordinates.translate(move) else: apple_way = compo.flags & SCALED_COMPONENT_OFFSET ms_way = compo.flags & UNSCALED_COMPONENT_OFFSET assert not (apple_way and ms_way) if not (apple_way or ms_way): scale_component_offset = ( SCALE_COMPONENT_OFFSET_DEFAULT # see top of this file ) else: scale_component_offset = apple_way if scale_component_offset: # the Apple way: first move, then scale (ie. scale the component offset) coordinates.translate(move) coordinates.transform(compo.transform) else: # the MS way: first scale, then move coordinates.transform(compo.transform) coordinates.translate(move) offset = len(allCoords) allEndPts.extend(e + offset for e in endPts) allCoords.extend(coordinates) allFlags.extend(flags) return allCoords, allEndPts, allFlags elif self.isVarComposite(): raise NotImplementedError("use TTGlyphSet to draw VarComposite glyphs") else: return GlyphCoordinates(), [], bytearray() def getComponentNames(self, glyfTable): """Returns a list of names of component glyphs used in this glyph This method can be used on simple glyphs (in which case it returns an empty list) or composite glyphs. """ if hasattr(self, "data") and self.isVarComposite(): # TODO(VarComposite) Add implementation without expanding glyph self.expand(glyfTable) if not hasattr(self, "data"): if self.isComposite() or self.isVarComposite(): return [c.glyphName for c in self.components] else: return [] # Extract components without expanding glyph if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: return [] # Not composite data = self.data i = 10 components = [] more = 1 while more: flags, glyphID = struct.unpack(">HH", data[i : i + 4]) i += 4 flags = int(flags) components.append(glyfTable.getGlyphName(int(glyphID))) if flags & ARG_1_AND_2_ARE_WORDS: i += 4 else: i += 2 if flags & WE_HAVE_A_SCALE: i += 2 elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4 elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8 more = flags & MORE_COMPONENTS return components def trim(self, remove_hinting=False): """Remove padding and, if requested, hinting, from a glyph. This works on both expanded and compacted glyphs, without expanding it.""" if not hasattr(self, "data"): if remove_hinting: if self.isComposite(): if hasattr(self, "program"): del self.program elif self.isVarComposite(): pass # Doesn't have hinting else: self.program = ttProgram.Program() self.program.fromBytecode([]) # No padding to trim. return if not self.data: return numContours = struct.unpack(">h", self.data[:2])[0] data = bytearray(self.data) i = 10 if numContours >= 0: i += 2 * numContours # endPtsOfContours nCoordinates = ((data[i - 2] << 8) | data[i - 1]) + 1 instructionLen = (data[i] << 8) | data[i + 1] if remove_hinting: # Zero instruction length data[i] = data[i + 1] = 0 i += 2 if instructionLen: # Splice it out data = data[:i] + data[i + instructionLen :] instructionLen = 0 else: i += 2 + instructionLen coordBytes = 0 j = 0 while True: flag = data[i] i = i + 1 repeat = 1 if flag & flagRepeat: repeat = data[i] + 1 i = i + 1 xBytes = yBytes = 0 if flag & flagXShort: xBytes = 1 elif not (flag & flagXsame): xBytes = 2 if flag & flagYShort: yBytes = 1 elif not (flag & flagYsame): yBytes = 2 coordBytes += (xBytes + yBytes) * repeat j += repeat if j >= nCoordinates: break assert j == nCoordinates, "bad glyph flags" i += coordBytes # Remove padding data = data[:i] elif self.isComposite(): more = 1 we_have_instructions = False while more: flags = (data[i] << 8) | data[i + 1] if remove_hinting: flags &= ~WE_HAVE_INSTRUCTIONS if flags & WE_HAVE_INSTRUCTIONS: we_have_instructions = True data[i + 0] = flags >> 8 data[i + 1] = flags & 0xFF i += 4 flags = int(flags) if flags & ARG_1_AND_2_ARE_WORDS: i += 4 else: i += 2 if flags & WE_HAVE_A_SCALE: i += 2 elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4 elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8 more = flags & MORE_COMPONENTS if we_have_instructions: instructionLen = (data[i] << 8) | data[i + 1] i += 2 + instructionLen # Remove padding data = data[:i] elif self.isVarComposite(): i = 0 MIN_SIZE = GlyphVarComponent.MIN_SIZE while len(data[i : i + MIN_SIZE]) >= MIN_SIZE: size = GlyphVarComponent.getSize(data[i : i + MIN_SIZE]) i += size data = data[:i] self.data = data def removeHinting(self): """Removes TrueType hinting instructions from the glyph.""" self.trim(remove_hinting=True) def draw(self, pen, glyfTable, offset=0): """Draws the glyph using the supplied pen object. Arguments: pen: An object conforming to the pen protocol. glyfTable: A :py:class:`table__g_l_y_f` object, to resolve components. offset (int): A horizontal offset. If provided, all coordinates are translated by this offset. """ if self.isComposite(): for component in self.components: glyphName, transform = component.getComponentInfo() pen.addComponent(glyphName, transform) return coordinates, endPts, flags = self.getCoordinates(glyfTable) if offset: coordinates = coordinates.copy() coordinates.translate((offset, 0)) start = 0 maybeInt = lambda v: int(v) if v == int(v) else v for end in endPts: end = end + 1 contour = coordinates[start:end] cFlags = [flagOnCurve & f for f in flags[start:end]] cuFlags = [flagCubic & f for f in flags[start:end]] start = end if 1 not in cFlags: assert all(cuFlags) or not any(cuFlags) cubic = all(cuFlags) if cubic: count = len(contour) assert count % 2 == 0, "Odd number of cubic off-curves undefined" l = contour[-1] f = contour[0] p0 = (maybeInt((l[0] + f[0]) * 0.5), maybeInt((l[1] + f[1]) * 0.5)) pen.moveTo(p0) for i in range(0, count, 2): p1 = contour[i] p2 = contour[i + 1] p4 = contour[i + 2 if i + 2 < count else 0] p3 = ( maybeInt((p2[0] + p4[0]) * 0.5), maybeInt((p2[1] + p4[1]) * 0.5), ) pen.curveTo(p1, p2, p3) else: # There is not a single on-curve point on the curve, # use pen.qCurveTo's special case by specifying None # as the on-curve point. contour.append(None) pen.qCurveTo(*contour) else: # Shuffle the points so that the contour is guaranteed # to *end* in an on-curve point, which we'll use for # the moveTo. firstOnCurve = cFlags.index(1) + 1 contour = contour[firstOnCurve:] + contour[:firstOnCurve] cFlags = cFlags[firstOnCurve:] + cFlags[:firstOnCurve] cuFlags = cuFlags[firstOnCurve:] + cuFlags[:firstOnCurve] pen.moveTo(contour[-1]) while contour: nextOnCurve = cFlags.index(1) + 1 if nextOnCurve == 1: # Skip a final lineTo(), as it is implied by # pen.closePath() if len(contour) > 1: pen.lineTo(contour[0]) else: cubicFlags = [f for f in cuFlags[: nextOnCurve - 1]] assert all(cubicFlags) or not any(cubicFlags) cubic = any(cubicFlags) if cubic: assert all( cubicFlags ), "Mixed cubic and quadratic segment undefined" count = nextOnCurve assert ( count >= 3 ), "At least two cubic off-curve points required" assert ( count - 1 ) % 2 == 0, "Odd number of cubic off-curves undefined" for i in range(0, count - 3, 2): p1 = contour[i] p2 = contour[i + 1] p4 = contour[i + 2] p3 = ( maybeInt((p2[0] + p4[0]) * 0.5), maybeInt((p2[1] + p4[1]) * 0.5), ) lastOnCurve = p3 pen.curveTo(p1, p2, p3) pen.curveTo(*contour[count - 3 : count]) else: pen.qCurveTo(*contour[:nextOnCurve]) contour = contour[nextOnCurve:] cFlags = cFlags[nextOnCurve:] cuFlags = cuFlags[nextOnCurve:] pen.closePath() def drawPoints(self, pen, glyfTable, offset=0): """Draw the glyph using the supplied pointPen. As opposed to Glyph.draw(), this will not change the point indices. """ if self.isComposite(): for component in self.components: glyphName, transform = component.getComponentInfo() pen.addComponent(glyphName, transform) return coordinates, endPts, flags = self.getCoordinates(glyfTable) if offset: coordinates = coordinates.copy() coordinates.translate((offset, 0)) start = 0 for end in endPts: end = end + 1 contour = coordinates[start:end] cFlags = flags[start:end] start = end pen.beginPath() # Start with the appropriate segment type based on the final segment if cFlags[-1] & flagOnCurve: segmentType = "line" elif cFlags[-1] & flagCubic: segmentType = "curve" else: segmentType = "qcurve" for i, pt in enumerate(contour): if cFlags[i] & flagOnCurve: pen.addPoint(pt, segmentType=segmentType) segmentType = "line" else: pen.addPoint(pt) segmentType = "curve" if cFlags[i] & flagCubic else "qcurve" pen.endPath() def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.__dict__ == other.__dict__ def __ne__(self, other): result = self.__eq__(other) return result if result is NotImplemented else not result # Vector.__round__ uses the built-in (Banker's) `round` but we want # to use otRound below _roundv = partial(Vector.__round__, round=otRound) def _is_mid_point(p0: tuple, p1: tuple, p2: tuple) -> bool: # True if p1 is in the middle of p0 and p2, either before or after rounding p0 = Vector(p0) p1 = Vector(p1) p2 = Vector(p2) return ((p0 + p2) * 0.5).isclose(p1) or _roundv(p0) + _roundv(p2) == _roundv(p1) * 2 def dropImpliedOnCurvePoints(*interpolatable_glyphs: Glyph) -> Set[int]: """Drop impliable on-curve points from the (simple) glyph or glyphs. In TrueType glyf outlines, on-curve points can be implied when they are located at the midpoint of the line connecting two consecutive off-curve points. If more than one glyphs are passed, these are assumed to be interpolatable masters of the same glyph impliable, and thus only the on-curve points that are impliable for all of them will actually be implied. Composite glyphs or empty glyphs are skipped, only simple glyphs with 1 or more contours are considered. The input glyph(s) is/are modified in-place. Args: interpolatable_glyphs: The glyph or glyphs to modify in-place. Returns: The set of point indices that were dropped if any. Raises: ValueError if simple glyphs are not in fact interpolatable because they have different point flags or number of contours. Reference: https://developer.apple.com/fonts/TrueType-Reference-Manual/RM01/Chap1.html """ staticAttributes = SimpleNamespace( numberOfContours=None, flags=None, endPtsOfContours=None ) drop = None simple_glyphs = [] for i, glyph in enumerate(interpolatable_glyphs): if glyph.numberOfContours < 1: # ignore composite or empty glyphs continue for attr in staticAttributes.__dict__: expected = getattr(staticAttributes, attr) found = getattr(glyph, attr) if expected is None: setattr(staticAttributes, attr, found) elif expected != found: raise ValueError( f"Incompatible {attr} for glyph at master index {i}: " f"expected {expected}, found {found}" ) may_drop = set() start = 0 coords = glyph.coordinates flags = staticAttributes.flags endPtsOfContours = staticAttributes.endPtsOfContours for last in endPtsOfContours: for i in range(start, last + 1): if not (flags[i] & flagOnCurve): continue prv = i - 1 if i > start else last nxt = i + 1 if i < last else start if (flags[prv] & flagOnCurve) or flags[prv] != flags[nxt]: continue # we may drop the ith on-curve if halfway between previous/next off-curves if not _is_mid_point(coords[prv], coords[i], coords[nxt]): continue may_drop.add(i) start = last + 1 # we only want to drop if ALL interpolatable glyphs have the same implied oncurves if drop is None: drop = may_drop else: drop.intersection_update(may_drop) simple_glyphs.append(glyph) if drop: # Do the actual dropping flags = staticAttributes.flags assert flags is not None newFlags = array.array( "B", (flags[i] for i in range(len(flags)) if i not in drop) ) endPts = staticAttributes.endPtsOfContours assert endPts is not None newEndPts = [] i = 0 delta = 0 for d in sorted(drop): while d > endPts[i]: newEndPts.append(endPts[i] - delta) i += 1 delta += 1 while i < len(endPts): newEndPts.append(endPts[i] - delta) i += 1 for glyph in simple_glyphs: coords = glyph.coordinates glyph.coordinates = GlyphCoordinates( coords[i] for i in range(len(coords)) if i not in drop ) glyph.flags = newFlags glyph.endPtsOfContours = newEndPts return drop if drop is not None else set() class GlyphComponent(object): """Represents a component within a composite glyph. The component is represented internally with four attributes: ``glyphName``, ``x``, ``y`` and ``transform``. If there is no "two-by-two" matrix (i.e no scaling, reflection, or rotation; only translation), the ``transform`` attribute is not present. """ # The above documentation is not *completely* true, but is *true enough* because # the rare firstPt/lastPt attributes are not totally supported and nobody seems to # mind - see below. def __init__(self): pass def getComponentInfo(self): """Return information about the component This method returns a tuple of two values: the glyph name of the component's base glyph, and a transformation matrix. As opposed to accessing the attributes directly, ``getComponentInfo`` always returns a six-element tuple of the component's transformation matrix, even when the two-by-two ``.transform`` matrix is not present. """ # XXX Ignoring self.firstPt & self.lastpt for now: I need to implement # something equivalent in fontTools.objects.glyph (I'd rather not # convert it to an absolute offset, since it is valuable information). # This method will now raise "AttributeError: x" on glyphs that use # this TT feature. if hasattr(self, "transform"): [[xx, xy], [yx, yy]] = self.transform trans = (xx, xy, yx, yy, self.x, self.y) else: trans = (1, 0, 0, 1, self.x, self.y) return self.glyphName, trans def decompile(self, data, glyfTable): flags, glyphID = struct.unpack(">HH", data[:4]) self.flags = int(flags) glyphID = int(glyphID) self.glyphName = glyfTable.getGlyphName(int(glyphID)) data = data[4:] if self.flags & ARG_1_AND_2_ARE_WORDS: if self.flags & ARGS_ARE_XY_VALUES: self.x, self.y = struct.unpack(">hh", data[:4]) else: x, y = struct.unpack(">HH", data[:4]) self.firstPt, self.secondPt = int(x), int(y) data = data[4:] else: if self.flags & ARGS_ARE_XY_VALUES: self.x, self.y = struct.unpack(">bb", data[:2]) else: x, y = struct.unpack(">BB", data[:2]) self.firstPt, self.secondPt = int(x), int(y) data = data[2:] if self.flags & WE_HAVE_A_SCALE: (scale,) = struct.unpack(">h", data[:2]) self.transform = [ [fi2fl(scale, 14), 0], [0, fi2fl(scale, 14)], ] # fixed 2.14 data = data[2:] elif self.flags & WE_HAVE_AN_X_AND_Y_SCALE: xscale, yscale = struct.unpack(">hh", data[:4]) self.transform = [ [fi2fl(xscale, 14), 0], [0, fi2fl(yscale, 14)], ] # fixed 2.14 data = data[4:] elif self.flags & WE_HAVE_A_TWO_BY_TWO: (xscale, scale01, scale10, yscale) = struct.unpack(">hhhh", data[:8]) self.transform = [ [fi2fl(xscale, 14), fi2fl(scale01, 14)], [fi2fl(scale10, 14), fi2fl(yscale, 14)], ] # fixed 2.14 data = data[8:] more = self.flags & MORE_COMPONENTS haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS self.flags = self.flags & ( ROUND_XY_TO_GRID | USE_MY_METRICS | SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | NON_OVERLAPPING | OVERLAP_COMPOUND ) return more, haveInstructions, data def compile(self, more, haveInstructions, glyfTable): data = b"" # reset all flags we will calculate ourselves flags = self.flags & ( ROUND_XY_TO_GRID | USE_MY_METRICS | SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | NON_OVERLAPPING | OVERLAP_COMPOUND ) if more: flags = flags | MORE_COMPONENTS if haveInstructions: flags = flags | WE_HAVE_INSTRUCTIONS if hasattr(self, "firstPt"): if (0 <= self.firstPt <= 255) and (0 <= self.secondPt <= 255): data = data + struct.pack(">BB", self.firstPt, self.secondPt) else: data = data + struct.pack(">HH", self.firstPt, self.secondPt) flags = flags | ARG_1_AND_2_ARE_WORDS else: x = otRound(self.x) y = otRound(self.y) flags = flags | ARGS_ARE_XY_VALUES if (-128 <= x <= 127) and (-128 <= y <= 127): data = data + struct.pack(">bb", x, y) else: data = data + struct.pack(">hh", x, y) flags = flags | ARG_1_AND_2_ARE_WORDS if hasattr(self, "transform"): transform = [[fl2fi(x, 14) for x in row] for row in self.transform] if transform[0][1] or transform[1][0]: flags = flags | WE_HAVE_A_TWO_BY_TWO data = data + struct.pack( ">hhhh", transform[0][0], transform[0][1], transform[1][0], transform[1][1], ) elif transform[0][0] != transform[1][1]: flags = flags | WE_HAVE_AN_X_AND_Y_SCALE data = data + struct.pack(">hh", transform[0][0], transform[1][1]) else: flags = flags | WE_HAVE_A_SCALE data = data + struct.pack(">h", transform[0][0]) glyphID = glyfTable.getGlyphID(self.glyphName) return struct.pack(">HH", flags, glyphID) + data def toXML(self, writer, ttFont): attrs = [("glyphName", self.glyphName)] if not hasattr(self, "firstPt"): attrs = attrs + [("x", self.x), ("y", self.y)] else: attrs = attrs + [("firstPt", self.firstPt), ("secondPt", self.secondPt)] if hasattr(self, "transform"): transform = self.transform if transform[0][1] or transform[1][0]: attrs = attrs + [ ("scalex", fl2str(transform[0][0], 14)), ("scale01", fl2str(transform[0][1], 14)), ("scale10", fl2str(transform[1][0], 14)), ("scaley", fl2str(transform[1][1], 14)), ] elif transform[0][0] != transform[1][1]: attrs = attrs + [ ("scalex", fl2str(transform[0][0], 14)), ("scaley", fl2str(transform[1][1], 14)), ] else: attrs = attrs + [("scale", fl2str(transform[0][0], 14))] attrs = attrs + [("flags", hex(self.flags))] writer.simpletag("component", attrs) writer.newline() def fromXML(self, name, attrs, content, ttFont): self.glyphName = attrs["glyphName"] if "firstPt" in attrs: self.firstPt = safeEval(attrs["firstPt"]) self.secondPt = safeEval(attrs["secondPt"]) else: self.x = safeEval(attrs["x"]) self.y = safeEval(attrs["y"]) if "scale01" in attrs: scalex = str2fl(attrs["scalex"], 14) scale01 = str2fl(attrs["scale01"], 14) scale10 = str2fl(attrs["scale10"], 14) scaley = str2fl(attrs["scaley"], 14) self.transform = [[scalex, scale01], [scale10, scaley]] elif "scalex" in attrs: scalex = str2fl(attrs["scalex"], 14) scaley = str2fl(attrs["scaley"], 14) self.transform = [[scalex, 0], [0, scaley]] elif "scale" in attrs: scale = str2fl(attrs["scale"], 14) self.transform = [[scale, 0], [0, scale]] self.flags = safeEval(attrs["flags"]) def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.__dict__ == other.__dict__ def __ne__(self, other): result = self.__eq__(other) return result if result is NotImplemented else not result # # Variable Composite glyphs # https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1.md # class VarComponentFlags(IntFlag): USE_MY_METRICS = 0x0001 AXIS_INDICES_ARE_SHORT = 0x0002 UNIFORM_SCALE = 0x0004 HAVE_TRANSLATE_X = 0x0008 HAVE_TRANSLATE_Y = 0x0010 HAVE_ROTATION = 0x0020 HAVE_SCALE_X = 0x0040 HAVE_SCALE_Y = 0x0080 HAVE_SKEW_X = 0x0100 HAVE_SKEW_Y = 0x0200 HAVE_TCENTER_X = 0x0400 HAVE_TCENTER_Y = 0x0800 GID_IS_24BIT = 0x1000 AXES_HAVE_VARIATION = 0x2000 RESET_UNSPECIFIED_AXES = 0x4000 VarComponentTransformMappingValues = namedtuple( "VarComponentTransformMappingValues", ["flag", "fractionalBits", "scale", "defaultValue"], ) VAR_COMPONENT_TRANSFORM_MAPPING = { "translateX": VarComponentTransformMappingValues( VarComponentFlags.HAVE_TRANSLATE_X, 0, 1, 0 ), "translateY": VarComponentTransformMappingValues( VarComponentFlags.HAVE_TRANSLATE_Y, 0, 1, 0 ), "rotation": VarComponentTransformMappingValues( VarComponentFlags.HAVE_ROTATION, 12, 180, 0 ), "scaleX": VarComponentTransformMappingValues( VarComponentFlags.HAVE_SCALE_X, 10, 1, 1 ), "scaleY": VarComponentTransformMappingValues( VarComponentFlags.HAVE_SCALE_Y, 10, 1, 1 ), "skewX": VarComponentTransformMappingValues( VarComponentFlags.HAVE_SKEW_X, 12, -180, 0 ), "skewY": VarComponentTransformMappingValues( VarComponentFlags.HAVE_SKEW_Y, 12, 180, 0 ), "tCenterX": VarComponentTransformMappingValues( VarComponentFlags.HAVE_TCENTER_X, 0, 1, 0 ), "tCenterY": VarComponentTransformMappingValues( VarComponentFlags.HAVE_TCENTER_Y, 0, 1, 0 ), } class GlyphVarComponent(object): MIN_SIZE = 5 def __init__(self): self.location = {} self.transform = DecomposedTransform() @staticmethod def getSize(data): size = 5 flags = struct.unpack(">H", data[:2])[0] numAxes = int(data[2]) if flags & VarComponentFlags.GID_IS_24BIT: size += 1 size += numAxes if flags & VarComponentFlags.AXIS_INDICES_ARE_SHORT: size += 2 * numAxes else: axisIndices = array.array("B", data[:numAxes]) size += numAxes for attr_name, mapping_values in VAR_COMPONENT_TRANSFORM_MAPPING.items(): if flags & mapping_values.flag: size += 2 return size def decompile(self, data, glyfTable): flags = struct.unpack(">H", data[:2])[0] self.flags = int(flags) data = data[2:] numAxes = int(data[0]) data = data[1:] if flags & VarComponentFlags.GID_IS_24BIT: glyphID = int(struct.unpack(">L", b"\0" + data[:3])[0]) data = data[3:] flags ^= VarComponentFlags.GID_IS_24BIT else: glyphID = int(struct.unpack(">H", data[:2])[0]) data = data[2:] self.glyphName = glyfTable.getGlyphName(int(glyphID)) if flags & VarComponentFlags.AXIS_INDICES_ARE_SHORT: axisIndices = array.array("H", data[: 2 * numAxes]) if sys.byteorder != "big": axisIndices.byteswap() data = data[2 * numAxes :] flags ^= VarComponentFlags.AXIS_INDICES_ARE_SHORT else: axisIndices = array.array("B", data[:numAxes]) data = data[numAxes:] assert len(axisIndices) == numAxes axisIndices = list(axisIndices) axisValues = array.array("h", data[: 2 * numAxes]) if sys.byteorder != "big": axisValues.byteswap() data = data[2 * numAxes :] assert len(axisValues) == numAxes axisValues = [fi2fl(v, 14) for v in axisValues] self.location = { glyfTable.axisTags[i]: v for i, v in zip(axisIndices, axisValues) } def read_transform_component(data, values): if flags & values.flag: return ( data[2:], fi2fl(struct.unpack(">h", data[:2])[0], values.fractionalBits) * values.scale, ) else: return data, values.defaultValue for attr_name, mapping_values in VAR_COMPONENT_TRANSFORM_MAPPING.items(): data, value = read_transform_component(data, mapping_values) setattr(self.transform, attr_name, value) if flags & VarComponentFlags.UNIFORM_SCALE: if flags & VarComponentFlags.HAVE_SCALE_X and not ( flags & VarComponentFlags.HAVE_SCALE_Y ): self.transform.scaleY = self.transform.scaleX flags |= VarComponentFlags.HAVE_SCALE_Y flags ^= VarComponentFlags.UNIFORM_SCALE return data def compile(self, glyfTable): data = b"" if not hasattr(self, "flags"): flags = 0 # Calculate optimal transform component flags for attr_name, mapping in VAR_COMPONENT_TRANSFORM_MAPPING.items(): value = getattr(self.transform, attr_name) if fl2fi(value / mapping.scale, mapping.fractionalBits) != fl2fi( mapping.defaultValue / mapping.scale, mapping.fractionalBits ): flags |= mapping.flag else: flags = self.flags if ( flags & VarComponentFlags.HAVE_SCALE_X and flags & VarComponentFlags.HAVE_SCALE_Y and fl2fi(self.transform.scaleX, 10) == fl2fi(self.transform.scaleY, 10) ): flags |= VarComponentFlags.UNIFORM_SCALE flags ^= VarComponentFlags.HAVE_SCALE_Y numAxes = len(self.location) data = data + struct.pack(">B", numAxes) glyphID = glyfTable.getGlyphID(self.glyphName) if glyphID > 65535: flags |= VarComponentFlags.GID_IS_24BIT data = data + struct.pack(">L", glyphID)[1:] else: data = data + struct.pack(">H", glyphID) axisIndices = [glyfTable.axisTags.index(tag) for tag in self.location.keys()] if all(a <= 255 for a in axisIndices): axisIndices = array.array("B", axisIndices) else: axisIndices = array.array("H", axisIndices) if sys.byteorder != "big": axisIndices.byteswap() flags |= VarComponentFlags.AXIS_INDICES_ARE_SHORT data = data + bytes(axisIndices) axisValues = self.location.values() axisValues = array.array("h", (fl2fi(v, 14) for v in axisValues)) if sys.byteorder != "big": axisValues.byteswap() data = data + bytes(axisValues) def write_transform_component(data, value, values): if flags & values.flag: return data + struct.pack( ">h", fl2fi(value / values.scale, values.fractionalBits) ) else: return data for attr_name, mapping_values in VAR_COMPONENT_TRANSFORM_MAPPING.items(): value = getattr(self.transform, attr_name) data = write_transform_component(data, value, mapping_values) return struct.pack(">H", flags) + data def toXML(self, writer, ttFont): attrs = [("glyphName", self.glyphName)] if hasattr(self, "flags"): attrs = attrs + [("flags", hex(self.flags))] for attr_name, mapping in VAR_COMPONENT_TRANSFORM_MAPPING.items(): v = getattr(self.transform, attr_name) if v != mapping.defaultValue: attrs.append((attr_name, fl2str(v, mapping.fractionalBits))) writer.begintag("varComponent", attrs) writer.newline() writer.begintag("location") writer.newline() for tag, v in self.location.items(): writer.simpletag("axis", [("tag", tag), ("value", fl2str(v, 14))]) writer.newline() writer.endtag("location") writer.newline() writer.endtag("varComponent") writer.newline() def fromXML(self, name, attrs, content, ttFont): self.glyphName = attrs["glyphName"] if "flags" in attrs: self.flags = safeEval(attrs["flags"]) for attr_name, mapping in VAR_COMPONENT_TRANSFORM_MAPPING.items(): if attr_name not in attrs: continue v = str2fl(safeEval(attrs[attr_name]), mapping.fractionalBits) setattr(self.transform, attr_name, v) for c in content: if not isinstance(c, tuple): continue name, attrs, content = c if name != "location": continue for c in content: if not isinstance(c, tuple): continue name, attrs, content = c assert name == "axis" assert not content self.location[attrs["tag"]] = str2fl(safeEval(attrs["value"]), 14) def getPointCount(self): assert hasattr(self, "flags"), "VarComponent with variations must have flags" count = 0 if self.flags & VarComponentFlags.AXES_HAVE_VARIATION: count += len(self.location) if self.flags & ( VarComponentFlags.HAVE_TRANSLATE_X | VarComponentFlags.HAVE_TRANSLATE_Y ): count += 1 if self.flags & VarComponentFlags.HAVE_ROTATION: count += 1 if self.flags & ( VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y ): count += 1 if self.flags & (VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y): count += 1 if self.flags & ( VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y ): count += 1 return count def getCoordinatesAndControls(self): coords = [] controls = [] if self.flags & VarComponentFlags.AXES_HAVE_VARIATION: for tag, v in self.location.items(): controls.append(tag) coords.append((fl2fi(v, 14), 0)) if self.flags & ( VarComponentFlags.HAVE_TRANSLATE_X | VarComponentFlags.HAVE_TRANSLATE_Y ): controls.append("translate") coords.append((self.transform.translateX, self.transform.translateY)) if self.flags & VarComponentFlags.HAVE_ROTATION: controls.append("rotation") coords.append((fl2fi(self.transform.rotation / 180, 12), 0)) if self.flags & ( VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y ): controls.append("scale") coords.append( (fl2fi(self.transform.scaleX, 10), fl2fi(self.transform.scaleY, 10)) ) if self.flags & (VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y): controls.append("skew") coords.append( ( fl2fi(self.transform.skewX / -180, 12), fl2fi(self.transform.skewY / 180, 12), ) ) if self.flags & ( VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y ): controls.append("tCenter") coords.append((self.transform.tCenterX, self.transform.tCenterY)) return coords, controls def setCoordinates(self, coords): i = 0 if self.flags & VarComponentFlags.AXES_HAVE_VARIATION: newLocation = {} for tag in self.location: newLocation[tag] = fi2fl(coords[i][0], 14) i += 1 self.location = newLocation self.transform = DecomposedTransform() if self.flags & ( VarComponentFlags.HAVE_TRANSLATE_X | VarComponentFlags.HAVE_TRANSLATE_Y ): self.transform.translateX, self.transform.translateY = coords[i] i += 1 if self.flags & VarComponentFlags.HAVE_ROTATION: self.transform.rotation = fi2fl(coords[i][0], 12) * 180 i += 1 if self.flags & ( VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y ): self.transform.scaleX, self.transform.scaleY = fi2fl( coords[i][0], 10 ), fi2fl(coords[i][1], 10) i += 1 if self.flags & (VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y): self.transform.skewX, self.transform.skewY = ( fi2fl(coords[i][0], 12) * -180, fi2fl(coords[i][1], 12) * 180, ) i += 1 if self.flags & ( VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y ): self.transform.tCenterX, self.transform.tCenterY = coords[i] i += 1 return coords[i:] def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.__dict__ == other.__dict__ def __ne__(self, other): result = self.__eq__(other) return result if result is NotImplemented else not result class GlyphCoordinates(object): """A list of glyph coordinates. Unlike an ordinary list, this is a numpy-like matrix object which supports matrix addition, scalar multiplication and other operations described below. """ def __init__(self, iterable=[]): self._a = array.array("d") self.extend(iterable) @property def array(self): """Returns the underlying array of coordinates""" return self._a @staticmethod def zeros(count): """Creates a new ``GlyphCoordinates`` object with all coordinates set to (0,0)""" g = GlyphCoordinates() g._a.frombytes(bytes(count * 2 * g._a.itemsize)) return g def copy(self): """Creates a new ``GlyphCoordinates`` object which is a copy of the current one.""" c = GlyphCoordinates() c._a.extend(self._a) return c def __len__(self): """Returns the number of coordinates in the array.""" return len(self._a) // 2 def __getitem__(self, k): """Returns a two element tuple (x,y)""" a = self._a if isinstance(k, slice): indices = range(*k.indices(len(self))) # Instead of calling ourselves recursively, duplicate code; faster ret = [] for k in indices: x = a[2 * k] y = a[2 * k + 1] ret.append( (int(x) if x.is_integer() else x, int(y) if y.is_integer() else y) ) return ret x = a[2 * k] y = a[2 * k + 1] return (int(x) if x.is_integer() else x, int(y) if y.is_integer() else y) def __setitem__(self, k, v): """Sets a point's coordinates to a two element tuple (x,y)""" if isinstance(k, slice): indices = range(*k.indices(len(self))) # XXX This only works if len(v) == len(indices) for j, i in enumerate(indices): self[i] = v[j] return self._a[2 * k], self._a[2 * k + 1] = v def __delitem__(self, i): """Removes a point from the list""" i = (2 * i) % len(self._a) del self._a[i] del self._a[i] def __repr__(self): return "GlyphCoordinates([" + ",".join(str(c) for c in self) + "])" def append(self, p): self._a.extend(tuple(p)) def extend(self, iterable): for p in iterable: self._a.extend(p) def toInt(self, *, round=otRound): if round is noRound: return a = self._a for i in range(len(a)): a[i] = round(a[i]) def calcBounds(self): a = self._a if not a: return 0, 0, 0, 0 xs = a[0::2] ys = a[1::2] return min(xs), min(ys), max(xs), max(ys) def calcIntBounds(self, round=otRound): return tuple(round(v) for v in self.calcBounds()) def relativeToAbsolute(self): a = self._a x, y = 0, 0 for i in range(0, len(a), 2): a[i] = x = a[i] + x a[i + 1] = y = a[i + 1] + y def absoluteToRelative(self): a = self._a x, y = 0, 0 for i in range(0, len(a), 2): nx = a[i] ny = a[i + 1] a[i] = nx - x a[i + 1] = ny - y x = nx y = ny def translate(self, p): """ >>> GlyphCoordinates([(1,2)]).translate((.5,0)) """ x, y = p if x == 0 and y == 0: return a = self._a for i in range(0, len(a), 2): a[i] += x a[i + 1] += y def scale(self, p): """ >>> GlyphCoordinates([(1,2)]).scale((.5,0)) """ x, y = p if x == 1 and y == 1: return a = self._a for i in range(0, len(a), 2): a[i] *= x a[i + 1] *= y def transform(self, t): """ >>> GlyphCoordinates([(1,2)]).transform(((.5,0),(.2,.5))) """ a = self._a for i in range(0, len(a), 2): x = a[i] y = a[i + 1] px = x * t[0][0] + y * t[1][0] py = x * t[0][1] + y * t[1][1] a[i] = px a[i + 1] = py def __eq__(self, other): """ >>> g = GlyphCoordinates([(1,2)]) >>> g2 = GlyphCoordinates([(1.0,2)]) >>> g3 = GlyphCoordinates([(1.5,2)]) >>> g == g2 True >>> g == g3 False >>> g2 == g3 False """ if type(self) != type(other): return NotImplemented return self._a == other._a def __ne__(self, other): """ >>> g = GlyphCoordinates([(1,2)]) >>> g2 = GlyphCoordinates([(1.0,2)]) >>> g3 = GlyphCoordinates([(1.5,2)]) >>> g != g2 False >>> g != g3 True >>> g2 != g3 True """ result = self.__eq__(other) return result if result is NotImplemented else not result # Math operations def __pos__(self): """ >>> g = GlyphCoordinates([(1,2)]) >>> g GlyphCoordinates([(1, 2)]) >>> g2 = +g >>> g2 GlyphCoordinates([(1, 2)]) >>> g2.translate((1,0)) >>> g2 GlyphCoordinates([(2, 2)]) >>> g GlyphCoordinates([(1, 2)]) """ return self.copy() def __neg__(self): """ >>> g = GlyphCoordinates([(1,2)]) >>> g GlyphCoordinates([(1, 2)]) >>> g2 = -g >>> g2 GlyphCoordinates([(-1, -2)]) >>> g GlyphCoordinates([(1, 2)]) """ r = self.copy() a = r._a for i in range(len(a)): a[i] = -a[i] return r def __round__(self, *, round=otRound): r = self.copy() r.toInt(round=round) return r def __add__(self, other): return self.copy().__iadd__(other) def __sub__(self, other): return self.copy().__isub__(other) def __mul__(self, other): return self.copy().__imul__(other) def __truediv__(self, other): return self.copy().__itruediv__(other) __radd__ = __add__ __rmul__ = __mul__ def __rsub__(self, other): return other + (-self) def __iadd__(self, other): """ >>> g = GlyphCoordinates([(1,2)]) >>> g += (.5,0) >>> g GlyphCoordinates([(1.5, 2)]) >>> g2 = GlyphCoordinates([(3,4)]) >>> g += g2 >>> g GlyphCoordinates([(4.5, 6)]) """ if isinstance(other, tuple): assert len(other) == 2 self.translate(other) return self if isinstance(other, GlyphCoordinates): other = other._a a = self._a assert len(a) == len(other) for i in range(len(a)): a[i] += other[i] return self return NotImplemented def __isub__(self, other): """ >>> g = GlyphCoordinates([(1,2)]) >>> g -= (.5,0) >>> g GlyphCoordinates([(0.5, 2)]) >>> g2 = GlyphCoordinates([(3,4)]) >>> g -= g2 >>> g GlyphCoordinates([(-2.5, -2)]) """ if isinstance(other, tuple): assert len(other) == 2 self.translate((-other[0], -other[1])) return self if isinstance(other, GlyphCoordinates): other = other._a a = self._a assert len(a) == len(other) for i in range(len(a)): a[i] -= other[i] return self return NotImplemented def __imul__(self, other): """ >>> g = GlyphCoordinates([(1,2)]) >>> g *= (2,.5) >>> g *= 2 >>> g GlyphCoordinates([(4, 2)]) >>> g = GlyphCoordinates([(1,2)]) >>> g *= 2 >>> g GlyphCoordinates([(2, 4)]) """ if isinstance(other, tuple): assert len(other) == 2 self.scale(other) return self if isinstance(other, Number): if other == 1: return self a = self._a for i in range(len(a)): a[i] *= other return self return NotImplemented def __itruediv__(self, other): """ >>> g = GlyphCoordinates([(1,3)]) >>> g /= (.5,1.5) >>> g /= 2 >>> g GlyphCoordinates([(1, 1)]) """ if isinstance(other, Number): other = (other, other) if isinstance(other, tuple): if other == (1, 1): return self assert len(other) == 2 self.scale((1.0 / other[0], 1.0 / other[1])) return self return NotImplemented def __bool__(self): """ >>> g = GlyphCoordinates([]) >>> bool(g) False >>> g = GlyphCoordinates([(0,0), (0.,0)]) >>> bool(g) True >>> g = GlyphCoordinates([(0,0), (1,0)]) >>> bool(g) True >>> g = GlyphCoordinates([(0,.5), (0,0)]) >>> bool(g) True """ return bool(self._a) __nonzero__ = __bool__ if __name__ == "__main__": import doctest, sys sys.exit(doctest.testmod().failed) PKaZZZ?J��'�'"fontTools/ttLib/tables/_g_v_a_r.pyfrom collections import UserDict, deque from functools import partial from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval from . import DefaultTable import array import itertools import logging import struct import sys import fontTools.ttLib.tables.TupleVariation as tv log = logging.getLogger(__name__) TupleVariation = tv.TupleVariation # https://www.microsoft.com/typography/otspec/gvar.htm # https://www.microsoft.com/typography/otspec/otvarcommonformats.htm # # Apple's documentation of 'gvar': # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html # # FreeType2 source code for parsing 'gvar': # http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/src/truetype/ttgxvar.c GVAR_HEADER_FORMAT = """ > # big endian version: H reserved: H axisCount: H sharedTupleCount: H offsetToSharedTuples: I glyphCount: H flags: H offsetToGlyphVariationData: I """ GVAR_HEADER_SIZE = sstruct.calcsize(GVAR_HEADER_FORMAT) class _LazyDict(UserDict): def __init__(self, data): super().__init__() self.data = data def __getitem__(self, k): v = self.data[k] if callable(v): v = v() self.data[k] = v return v class table__g_v_a_r(DefaultTable.DefaultTable): dependencies = ["fvar", "glyf"] def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.version, self.reserved = 1, 0 self.variations = {} def compile(self, ttFont): axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] sharedTuples = tv.compileSharedTuples( axisTags, itertools.chain(*self.variations.values()) ) sharedTupleIndices = {coord: i for i, coord in enumerate(sharedTuples)} sharedTupleSize = sum([len(c) for c in sharedTuples]) compiledGlyphs = self.compileGlyphs_(ttFont, axisTags, sharedTupleIndices) offset = 0 offsets = [] for glyph in compiledGlyphs: offsets.append(offset) offset += len(glyph) offsets.append(offset) compiledOffsets, tableFormat = self.compileOffsets_(offsets) header = {} header["version"] = self.version header["reserved"] = self.reserved header["axisCount"] = len(axisTags) header["sharedTupleCount"] = len(sharedTuples) header["offsetToSharedTuples"] = GVAR_HEADER_SIZE + len(compiledOffsets) header["glyphCount"] = len(compiledGlyphs) header["flags"] = tableFormat header["offsetToGlyphVariationData"] = ( header["offsetToSharedTuples"] + sharedTupleSize ) compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header) result = [compiledHeader, compiledOffsets] result.extend(sharedTuples) result.extend(compiledGlyphs) return b"".join(result) def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices): result = [] glyf = ttFont["glyf"] for glyphName in ttFont.getGlyphOrder(): variations = self.variations.get(glyphName, []) if not variations: result.append(b"") continue pointCountUnused = 0 # pointCount is actually unused by compileGlyph result.append( compileGlyph_( variations, pointCountUnused, axisTags, sharedCoordIndices ) ) return result def decompile(self, data, ttFont): axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] glyphs = ttFont.getGlyphOrder() sstruct.unpack(GVAR_HEADER_FORMAT, data[0:GVAR_HEADER_SIZE], self) assert len(glyphs) == self.glyphCount assert len(axisTags) == self.axisCount offsets = self.decompileOffsets_( data[GVAR_HEADER_SIZE:], tableFormat=(self.flags & 1), glyphCount=self.glyphCount, ) sharedCoords = tv.decompileSharedTuples( axisTags, self.sharedTupleCount, data, self.offsetToSharedTuples ) variations = {} offsetToData = self.offsetToGlyphVariationData glyf = ttFont["glyf"] def decompileVarGlyph(glyphName, gid): gvarData = data[ offsetToData + offsets[gid] : offsetToData + offsets[gid + 1] ] if not gvarData: return [] glyph = glyf[glyphName] numPointsInGlyph = self.getNumPoints_(glyph) return decompileGlyph_(numPointsInGlyph, sharedCoords, axisTags, gvarData) for gid in range(self.glyphCount): glyphName = glyphs[gid] variations[glyphName] = partial(decompileVarGlyph, glyphName, gid) self.variations = _LazyDict(variations) if ttFont.lazy is False: # Be lazy for None and True self.ensureDecompiled() def ensureDecompiled(self, recurse=False): # The recurse argument is unused, but part of the signature of # ensureDecompiled across the library. # Use a zero-length deque to consume the lazy dict deque(self.variations.values(), maxlen=0) @staticmethod def decompileOffsets_(data, tableFormat, glyphCount): if tableFormat == 0: # Short format: array of UInt16 offsets = array.array("H") offsetsSize = (glyphCount + 1) * 2 else: # Long format: array of UInt32 offsets = array.array("I") offsetsSize = (glyphCount + 1) * 4 offsets.frombytes(data[0:offsetsSize]) if sys.byteorder != "big": offsets.byteswap() # In the short format, offsets need to be multiplied by 2. # This is not documented in Apple's TrueType specification, # but can be inferred from the FreeType implementation, and # we could verify it with two sample GX fonts. if tableFormat == 0: offsets = [off * 2 for off in offsets] return offsets @staticmethod def compileOffsets_(offsets): """Packs a list of offsets into a 'gvar' offset table. Returns a pair (bytestring, tableFormat). Bytestring is the packed offset table. Format indicates whether the table uses short (tableFormat=0) or long (tableFormat=1) integers. The returned tableFormat should get packed into the flags field of the 'gvar' header. """ assert len(offsets) >= 2 for i in range(1, len(offsets)): assert offsets[i - 1] <= offsets[i] if max(offsets) <= 0xFFFF * 2: packed = array.array("H", [n >> 1 for n in offsets]) tableFormat = 0 else: packed = array.array("I", offsets) tableFormat = 1 if sys.byteorder != "big": packed.byteswap() return (packed.tobytes(), tableFormat) def toXML(self, writer, ttFont): writer.simpletag("version", value=self.version) writer.newline() writer.simpletag("reserved", value=self.reserved) writer.newline() axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] for glyphName in ttFont.getGlyphNames(): variations = self.variations.get(glyphName) if not variations: continue writer.begintag("glyphVariations", glyph=glyphName) writer.newline() for gvar in variations: gvar.toXML(writer, axisTags) writer.endtag("glyphVariations") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "version": self.version = safeEval(attrs["value"]) elif name == "reserved": self.reserved = safeEval(attrs["value"]) elif name == "glyphVariations": if not hasattr(self, "variations"): self.variations = {} glyphName = attrs["glyph"] glyph = ttFont["glyf"][glyphName] numPointsInGlyph = self.getNumPoints_(glyph) glyphVariations = [] for element in content: if isinstance(element, tuple): name, attrs, content = element if name == "tuple": gvar = TupleVariation({}, [None] * numPointsInGlyph) glyphVariations.append(gvar) for tupleElement in content: if isinstance(tupleElement, tuple): tupleName, tupleAttrs, tupleContent = tupleElement gvar.fromXML(tupleName, tupleAttrs, tupleContent) self.variations[glyphName] = glyphVariations @staticmethod def getNumPoints_(glyph): NUM_PHANTOM_POINTS = 4 if glyph.isComposite(): return len(glyph.components) + NUM_PHANTOM_POINTS elif glyph.isVarComposite(): count = 0 for component in glyph.components: count += component.getPointCount() return count + NUM_PHANTOM_POINTS else: # Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute. return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS def compileGlyph_(variations, pointCount, axisTags, sharedCoordIndices): tupleVariationCount, tuples, data = tv.compileTupleVariationStore( variations, pointCount, axisTags, sharedCoordIndices ) if tupleVariationCount == 0: return b"" result = [struct.pack(">HH", tupleVariationCount, 4 + len(tuples)), tuples, data] if (len(tuples) + len(data)) % 2 != 0: result.append(b"\0") # padding return b"".join(result) def decompileGlyph_(pointCount, sharedTuples, axisTags, data): if len(data) < 4: return [] tupleVariationCount, offsetToData = struct.unpack(">HH", data[:4]) dataPos = offsetToData return tv.decompileTupleVariationStore( "gvar", axisTags, tupleVariationCount, pointCount, sharedTuples, data, 4, offsetToData, ) PKaZZZe��-��"fontTools/ttLib/tables/_h_d_m_x.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import bytechr, byteord, strjoin from . import DefaultTable import array from collections.abc import Mapping hdmxHeaderFormat = """ > # big endian! version: H numRecords: H recordSize: l """ class _GlyphnamedList(Mapping): def __init__(self, reverseGlyphOrder, data): self._array = data self._map = dict(reverseGlyphOrder) def __getitem__(self, k): return self._array[self._map[k]] def __len__(self): return len(self._map) def __iter__(self): return iter(self._map) def keys(self): return self._map.keys() class table__h_d_m_x(DefaultTable.DefaultTable): def decompile(self, data, ttFont): numGlyphs = ttFont["maxp"].numGlyphs glyphOrder = ttFont.getGlyphOrder() dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self) self.hdmx = {} for i in range(self.numRecords): ppem = byteord(data[0]) maxSize = byteord(data[1]) widths = _GlyphnamedList( ttFont.getReverseGlyphMap(), array.array("B", data[2 : 2 + numGlyphs]) ) self.hdmx[ppem] = widths data = data[self.recordSize :] assert len(data) == 0, "too much hdmx data" def compile(self, ttFont): self.version = 0 numGlyphs = ttFont["maxp"].numGlyphs glyphOrder = ttFont.getGlyphOrder() self.recordSize = 4 * ((2 + numGlyphs + 3) // 4) pad = (self.recordSize - 2 - numGlyphs) * b"\0" self.numRecords = len(self.hdmx) data = sstruct.pack(hdmxHeaderFormat, self) items = sorted(self.hdmx.items()) for ppem, widths in items: data = data + bytechr(ppem) + bytechr(max(widths.values())) for glyphID in range(len(glyphOrder)): width = widths[glyphOrder[glyphID]] data = data + bytechr(width) data = data + pad return data def toXML(self, writer, ttFont): writer.begintag("hdmxData") writer.newline() ppems = sorted(self.hdmx.keys()) records = [] format = "" for ppem in ppems: widths = self.hdmx[ppem] records.append(widths) format = format + "%4d" glyphNames = ttFont.getGlyphOrder()[:] glyphNames.sort() maxNameLen = max(map(len, glyphNames)) format = "%" + repr(maxNameLen) + "s:" + format + " ;" writer.write(format % (("ppem",) + tuple(ppems))) writer.newline() writer.newline() for glyphName in glyphNames: row = [] for ppem in ppems: widths = self.hdmx[ppem] row.append(widths[glyphName]) if ";" in glyphName: glyphName = "\\x3b".join(glyphName.split(";")) writer.write(format % ((glyphName,) + tuple(row))) writer.newline() writer.endtag("hdmxData") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name != "hdmxData": return content = strjoin(content) lines = content.split(";") topRow = lines[0].split() assert topRow[0] == "ppem:", "illegal hdmx format" ppems = list(map(int, topRow[1:])) self.hdmx = hdmx = {} for ppem in ppems: hdmx[ppem] = {} lines = (line.split() for line in lines[1:]) for line in lines: if not line: continue assert line[0][-1] == ":", "illegal hdmx format" glyphName = line[0][:-1] if "\\" in glyphName: from fontTools.misc.textTools import safeEval glyphName = safeEval('"""' + glyphName + '"""') line = list(map(int, line[1:])) assert len(line) == len(ppems), "illegal hdmx format" for i in range(len(ppems)): hdmx[ppems[i]][glyphName] = line[i] PKaZZZ���ȉ�"fontTools/ttLib/tables/_h_e_a_d.pyfrom fontTools.misc import sstruct from fontTools.misc.fixedTools import floatToFixedToStr, strToFixedToFloat from fontTools.misc.textTools import safeEval, num2binary, binary2num from fontTools.misc.timeTools import ( timestampFromString, timestampToString, timestampNow, ) from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat from fontTools.misc.arrayTools import intRect, unionRect from . import DefaultTable import logging log = logging.getLogger(__name__) headFormat = """ > # big endian tableVersion: 16.16F fontRevision: 16.16F checkSumAdjustment: I magicNumber: I flags: H unitsPerEm: H created: Q modified: Q xMin: h yMin: h xMax: h yMax: h macStyle: H lowestRecPPEM: H fontDirectionHint: h indexToLocFormat: h glyphDataFormat: h """ class table__h_e_a_d(DefaultTable.DefaultTable): dependencies = ["maxp", "loca", "CFF ", "CFF2"] def decompile(self, data, ttFont): dummy, rest = sstruct.unpack2(headFormat, data, self) if rest: # this is quite illegal, but there seem to be fonts out there that do this log.warning("extra bytes at the end of 'head' table") assert rest == b"\0\0" # For timestamp fields, ignore the top four bytes. Some fonts have # bogus values there. Since till 2038 those bytes only can be zero, # ignore them. # # https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810 for stamp in "created", "modified": value = getattr(self, stamp) if value > 0xFFFFFFFF: log.warning("'%s' timestamp out of range; ignoring top bytes", stamp) value &= 0xFFFFFFFF setattr(self, stamp, value) if value < 0x7C259DC0: # January 1, 1970 00:00:00 log.warning( "'%s' timestamp seems very low; regarding as unix timestamp", stamp ) value += 0x7C259DC0 setattr(self, stamp, value) def compile(self, ttFont): if ttFont.recalcBBoxes: # For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc(). if "CFF " in ttFont: topDict = ttFont["CFF "].cff.topDictIndex[0] self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox) elif "CFF2" in ttFont: topDict = ttFont["CFF2"].cff.topDictIndex[0] charStrings = topDict.CharStrings fontBBox = None for charString in charStrings.values(): bounds = charString.calcBounds(charStrings) if bounds is not None: if fontBBox is not None: fontBBox = unionRect(fontBBox, bounds) else: fontBBox = bounds if fontBBox is not None: self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox) if ttFont.recalcTimestamp: self.modified = timestampNow() data = sstruct.pack(headFormat, self) return data def toXML(self, writer, ttFont): writer.comment("Most of this table will be recalculated by the compiler") writer.newline() _, names, fixes = sstruct.getformat(headFormat) for name in names: value = getattr(self, name) if name in fixes: value = floatToFixedToStr(value, precisionBits=fixes[name]) elif name in ("created", "modified"): value = timestampToString(value) elif name in ("magicNumber", "checkSumAdjustment"): if value < 0: value = value + 0x100000000 value = hex(value) if value[-1:] == "L": value = value[:-1] elif name in ("macStyle", "flags"): value = num2binary(value, 16) writer.simpletag(name, value=value) writer.newline() def fromXML(self, name, attrs, content, ttFont): value = attrs["value"] fixes = sstruct.getformat(headFormat)[2] if name in fixes: value = strToFixedToFloat(value, precisionBits=fixes[name]) elif name in ("created", "modified"): value = timestampFromString(value) elif name in ("macStyle", "flags"): value = binary2num(value) else: value = safeEval(value) setattr(self, name, value) PKaZZZ��RR"fontTools/ttLib/tables/_h_h_e_a.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import safeEval from fontTools.misc.fixedTools import ( ensureVersionIsLong as fi2ve, versionToFixed as ve2fi, ) from . import DefaultTable import math hheaFormat = """ > # big endian tableVersion: L ascent: h descent: h lineGap: h advanceWidthMax: H minLeftSideBearing: h minRightSideBearing: h xMaxExtent: h caretSlopeRise: h caretSlopeRun: h caretOffset: h reserved0: h reserved1: h reserved2: h reserved3: h metricDataFormat: h numberOfHMetrics: H """ class table__h_h_e_a(DefaultTable.DefaultTable): # Note: Keep in sync with table__v_h_e_a dependencies = ["hmtx", "glyf", "CFF ", "CFF2"] # OpenType spec renamed these, add aliases for compatibility @property def ascender(self): return self.ascent @ascender.setter def ascender(self, value): self.ascent = value @property def descender(self): return self.descent @descender.setter def descender(self, value): self.descent = value def decompile(self, data, ttFont): sstruct.unpack(hheaFormat, data, self) def compile(self, ttFont): if ttFont.recalcBBoxes and ( ttFont.isLoaded("glyf") or ttFont.isLoaded("CFF ") or ttFont.isLoaded("CFF2") ): self.recalc(ttFont) self.tableVersion = fi2ve(self.tableVersion) return sstruct.pack(hheaFormat, self) def recalc(self, ttFont): if "hmtx" not in ttFont: return hmtxTable = ttFont["hmtx"] self.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values()) boundsWidthDict = {} if "glyf" in ttFont: glyfTable = ttFont["glyf"] for name in ttFont.getGlyphOrder(): g = glyfTable[name] if g.numberOfContours == 0: continue if g.numberOfContours < 0 and not hasattr(g, "xMax"): # Composite glyph without extents set. # Calculate those. g.recalcBounds(glyfTable) boundsWidthDict[name] = g.xMax - g.xMin elif "CFF " in ttFont or "CFF2" in ttFont: if "CFF " in ttFont: topDict = ttFont["CFF "].cff.topDictIndex[0] else: topDict = ttFont["CFF2"].cff.topDictIndex[0] charStrings = topDict.CharStrings for name in ttFont.getGlyphOrder(): cs = charStrings[name] bounds = cs.calcBounds(charStrings) if bounds is not None: boundsWidthDict[name] = int( math.ceil(bounds[2]) - math.floor(bounds[0]) ) if boundsWidthDict: minLeftSideBearing = float("inf") minRightSideBearing = float("inf") xMaxExtent = -float("inf") for name, boundsWidth in boundsWidthDict.items(): advanceWidth, lsb = hmtxTable[name] rsb = advanceWidth - lsb - boundsWidth extent = lsb + boundsWidth minLeftSideBearing = min(minLeftSideBearing, lsb) minRightSideBearing = min(minRightSideBearing, rsb) xMaxExtent = max(xMaxExtent, extent) self.minLeftSideBearing = minLeftSideBearing self.minRightSideBearing = minRightSideBearing self.xMaxExtent = xMaxExtent else: # No glyph has outlines. self.minLeftSideBearing = 0 self.minRightSideBearing = 0 self.xMaxExtent = 0 def toXML(self, writer, ttFont): formatstring, names, fixes = sstruct.getformat(hheaFormat) for name in names: value = getattr(self, name) if name == "tableVersion": value = fi2ve(value) value = "0x%08x" % value writer.simpletag(name, value=value) writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "tableVersion": setattr(self, name, ve2fi(attrs["value"])) return setattr(self, name, safeEval(attrs["value"])) PKaZZZ�yB��"fontTools/ttLib/tables/_h_m_t_x.pyfrom fontTools.misc.roundTools import otRound from fontTools import ttLib from fontTools.misc.textTools import safeEval from . import DefaultTable import sys import struct import array import logging log = logging.getLogger(__name__) class table__h_m_t_x(DefaultTable.DefaultTable): headerTag = "hhea" advanceName = "width" sideBearingName = "lsb" numberOfMetricsName = "numberOfHMetrics" longMetricFormat = "Hh" def decompile(self, data, ttFont): numGlyphs = ttFont["maxp"].numGlyphs headerTable = ttFont.get(self.headerTag) if headerTable is not None: numberOfMetrics = int(getattr(headerTable, self.numberOfMetricsName)) else: numberOfMetrics = numGlyphs if numberOfMetrics > numGlyphs: log.warning( "The %s.%s exceeds the maxp.numGlyphs" % (self.headerTag, self.numberOfMetricsName) ) numberOfMetrics = numGlyphs if len(data) < 4 * numberOfMetrics: raise ttLib.TTLibError("not enough '%s' table data" % self.tableTag) # Note: advanceWidth is unsigned, but some font editors might # read/write as signed. We can't be sure whether it was a mistake # or not, so we read as unsigned but also issue a warning... metricsFmt = ">" + self.longMetricFormat * numberOfMetrics metrics = struct.unpack(metricsFmt, data[: 4 * numberOfMetrics]) data = data[4 * numberOfMetrics :] numberOfSideBearings = numGlyphs - numberOfMetrics sideBearings = array.array("h", data[: 2 * numberOfSideBearings]) data = data[2 * numberOfSideBearings :] if sys.byteorder != "big": sideBearings.byteswap() if data: log.warning("too much '%s' table data" % self.tableTag) self.metrics = {} glyphOrder = ttFont.getGlyphOrder() for i in range(numberOfMetrics): glyphName = glyphOrder[i] advanceWidth, lsb = metrics[i * 2 : i * 2 + 2] if advanceWidth > 32767: log.warning( "Glyph %r has a huge advance %s (%d); is it intentional or " "an (invalid) negative value?", glyphName, self.advanceName, advanceWidth, ) self.metrics[glyphName] = (advanceWidth, lsb) lastAdvance = metrics[-2] for i in range(numberOfSideBearings): glyphName = glyphOrder[i + numberOfMetrics] self.metrics[glyphName] = (lastAdvance, sideBearings[i]) def compile(self, ttFont): metrics = [] hasNegativeAdvances = False for glyphName in ttFont.getGlyphOrder(): advanceWidth, sideBearing = self.metrics[glyphName] if advanceWidth < 0: log.error( "Glyph %r has negative advance %s" % (glyphName, self.advanceName) ) hasNegativeAdvances = True metrics.append([advanceWidth, sideBearing]) headerTable = ttFont.get(self.headerTag) if headerTable is not None: lastAdvance = metrics[-1][0] lastIndex = len(metrics) while metrics[lastIndex - 2][0] == lastAdvance: lastIndex -= 1 if lastIndex <= 1: # all advances are equal lastIndex = 1 break additionalMetrics = metrics[lastIndex:] additionalMetrics = [otRound(sb) for _, sb in additionalMetrics] metrics = metrics[:lastIndex] numberOfMetrics = len(metrics) setattr(headerTable, self.numberOfMetricsName, numberOfMetrics) else: # no hhea/vhea, can't store numberOfMetrics; assume == numGlyphs numberOfMetrics = ttFont["maxp"].numGlyphs additionalMetrics = [] allMetrics = [] for advance, sb in metrics: allMetrics.extend([otRound(advance), otRound(sb)]) metricsFmt = ">" + self.longMetricFormat * numberOfMetrics try: data = struct.pack(metricsFmt, *allMetrics) except struct.error as e: if "out of range" in str(e) and hasNegativeAdvances: raise ttLib.TTLibError( "'%s' table can't contain negative advance %ss" % (self.tableTag, self.advanceName) ) else: raise additionalMetrics = array.array("h", additionalMetrics) if sys.byteorder != "big": additionalMetrics.byteswap() data = data + additionalMetrics.tobytes() return data def toXML(self, writer, ttFont): names = sorted(self.metrics.keys()) for glyphName in names: advance, sb = self.metrics[glyphName] writer.simpletag( "mtx", [ ("name", glyphName), (self.advanceName, advance), (self.sideBearingName, sb), ], ) writer.newline() def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "metrics"): self.metrics = {} if name == "mtx": self.metrics[attrs["name"]] = ( safeEval(attrs[self.advanceName]), safeEval(attrs[self.sideBearingName]), ) def __delitem__(self, glyphName): del self.metrics[glyphName] def __getitem__(self, glyphName): return self.metrics[glyphName] def __setitem__(self, glyphName, advance_sb_pair): self.metrics[glyphName] = tuple(advance_sb_pair) PKaZZZ��V��(�("fontTools/ttLib/tables/_k_e_r_n.pyfrom fontTools.ttLib import getSearchRange from fontTools.misc.textTools import safeEval, readHex from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi from . import DefaultTable import struct import sys import array import logging log = logging.getLogger(__name__) class table__k_e_r_n(DefaultTable.DefaultTable): def getkern(self, format): for subtable in self.kernTables: if subtable.format == format: return subtable return None # not found def decompile(self, data, ttFont): version, nTables = struct.unpack(">HH", data[:4]) apple = False if (len(data) >= 8) and (version == 1): # AAT Apple's "new" format. Hm. version, nTables = struct.unpack(">LL", data[:8]) self.version = fi2fl(version, 16) data = data[8:] apple = True else: self.version = version data = data[4:] self.kernTables = [] for i in range(nTables): if self.version == 1.0: # Apple length, coverage, subtableFormat = struct.unpack(">LBB", data[:6]) else: # in OpenType spec the "version" field refers to the common # subtable header; the actual subtable format is stored in # the 8-15 mask bits of "coverage" field. # This "version" is always 0 so we ignore it here _, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6]) if nTables == 1 and subtableFormat == 0: # The "length" value is ignored since some fonts # (like OpenSans and Calibri) have a subtable larger than # its value. (nPairs,) = struct.unpack(">H", data[6:8]) calculated_length = (nPairs * 6) + 14 if length != calculated_length: log.warning( "'kern' subtable longer than defined: " "%d bytes instead of %d bytes" % (calculated_length, length) ) length = calculated_length if subtableFormat not in kern_classes: subtable = KernTable_format_unkown(subtableFormat) else: subtable = kern_classes[subtableFormat](apple) subtable.decompile(data[:length], ttFont) self.kernTables.append(subtable) data = data[length:] def compile(self, ttFont): if hasattr(self, "kernTables"): nTables = len(self.kernTables) else: nTables = 0 if self.version == 1.0: # AAT Apple's "new" format. data = struct.pack(">LL", fl2fi(self.version, 16), nTables) else: data = struct.pack(">HH", self.version, nTables) if hasattr(self, "kernTables"): for subtable in self.kernTables: data = data + subtable.compile(ttFont) return data def toXML(self, writer, ttFont): writer.simpletag("version", value=self.version) writer.newline() for subtable in self.kernTables: subtable.toXML(writer, ttFont) def fromXML(self, name, attrs, content, ttFont): if name == "version": self.version = safeEval(attrs["value"]) return if name != "kernsubtable": return if not hasattr(self, "kernTables"): self.kernTables = [] format = safeEval(attrs["format"]) if format not in kern_classes: subtable = KernTable_format_unkown(format) else: apple = self.version == 1.0 subtable = kern_classes[format](apple) self.kernTables.append(subtable) subtable.fromXML(name, attrs, content, ttFont) class KernTable_format_0(object): # 'version' is kept for backward compatibility version = format = 0 def __init__(self, apple=False): self.apple = apple def decompile(self, data, ttFont): if not self.apple: version, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6]) if version != 0: from fontTools.ttLib import TTLibError raise TTLibError("unsupported kern subtable version: %d" % version) tupleIndex = None # Should we also assert length == len(data)? data = data[6:] else: length, coverage, subtableFormat, tupleIndex = struct.unpack( ">LBBH", data[:8] ) data = data[8:] assert self.format == subtableFormat, "unsupported format" self.coverage = coverage self.tupleIndex = tupleIndex self.kernTable = kernTable = {} nPairs, searchRange, entrySelector, rangeShift = struct.unpack( ">HHHH", data[:8] ) data = data[8:] datas = array.array("H", data[: 6 * nPairs]) if sys.byteorder != "big": datas.byteswap() it = iter(datas) glyphOrder = ttFont.getGlyphOrder() for k in range(nPairs): left, right, value = next(it), next(it), next(it) if value >= 32768: value -= 65536 try: kernTable[(glyphOrder[left], glyphOrder[right])] = value except IndexError: # Slower, but will not throw an IndexError on an invalid # glyph id. kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = ( value ) if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess log.warning( "excess data in 'kern' subtable: %d bytes", len(data) - 6 * nPairs ) def compile(self, ttFont): nPairs = min(len(self.kernTable), 0xFFFF) searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6) searchRange &= 0xFFFF entrySelector = min(entrySelector, 0xFFFF) rangeShift = min(rangeShift, 0xFFFF) data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift) # yeehee! (I mean, turn names into indices) try: reverseOrder = ttFont.getReverseGlyphMap() kernTable = sorted( (reverseOrder[left], reverseOrder[right], value) for ((left, right), value) in self.kernTable.items() ) except KeyError: # Slower, but will not throw KeyError on invalid glyph id. getGlyphID = ttFont.getGlyphID kernTable = sorted( (getGlyphID(left), getGlyphID(right), value) for ((left, right), value) in self.kernTable.items() ) for left, right, value in kernTable: data = data + struct.pack(">HHh", left, right, value) if not self.apple: version = 0 length = len(data) + 6 if length >= 0x10000: log.warning( '"kern" subtable overflow, ' "truncating length value while preserving pairs." ) length &= 0xFFFF header = struct.pack(">HHBB", version, length, self.format, self.coverage) else: if self.tupleIndex is None: # sensible default when compiling a TTX from an old fonttools # or when inserting a Windows-style format 0 subtable into an # Apple version=1.0 kern table log.warning("'tupleIndex' is None; default to 0") self.tupleIndex = 0 length = len(data) + 8 header = struct.pack( ">LBBH", length, self.coverage, self.format, self.tupleIndex ) return header + data def toXML(self, writer, ttFont): attrs = dict(coverage=self.coverage, format=self.format) if self.apple: if self.tupleIndex is None: log.warning("'tupleIndex' is None; default to 0") attrs["tupleIndex"] = 0 else: attrs["tupleIndex"] = self.tupleIndex writer.begintag("kernsubtable", **attrs) writer.newline() items = sorted(self.kernTable.items()) for (left, right), value in items: writer.simpletag("pair", [("l", left), ("r", right), ("v", value)]) writer.newline() writer.endtag("kernsubtable") writer.newline() def fromXML(self, name, attrs, content, ttFont): self.coverage = safeEval(attrs["coverage"]) subtableFormat = safeEval(attrs["format"]) if self.apple: if "tupleIndex" in attrs: self.tupleIndex = safeEval(attrs["tupleIndex"]) else: # previous fontTools versions didn't export tupleIndex log.warning("Apple kern subtable is missing 'tupleIndex' attribute") self.tupleIndex = None else: self.tupleIndex = None assert subtableFormat == self.format, "unsupported format" if not hasattr(self, "kernTable"): self.kernTable = {} for element in content: if not isinstance(element, tuple): continue name, attrs, content = element self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"]) def __getitem__(self, pair): return self.kernTable[pair] def __setitem__(self, pair, value): self.kernTable[pair] = value def __delitem__(self, pair): del self.kernTable[pair] class KernTable_format_unkown(object): def __init__(self, format): self.format = format def decompile(self, data, ttFont): self.data = data def compile(self, ttFont): return self.data def toXML(self, writer, ttFont): writer.begintag("kernsubtable", format=self.format) writer.newline() writer.comment("unknown 'kern' subtable format") writer.newline() writer.dumphex(self.data) writer.endtag("kernsubtable") writer.newline() def fromXML(self, name, attrs, content, ttFont): self.decompile(readHex(content), ttFont) kern_classes = {0: KernTable_format_0} PKaZZZ��?XX"fontTools/ttLib/tables/_l_c_a_r.pyfrom .otBase import BaseTTXConverter class table__l_c_a_r(BaseTTXConverter): pass PKaZZZ9Nޕ��"fontTools/ttLib/tables/_l_o_c_a.pyfrom . import DefaultTable import sys import array import logging log = logging.getLogger(__name__) class table__l_o_c_a(DefaultTable.DefaultTable): dependencies = ["glyf"] def decompile(self, data, ttFont): longFormat = ttFont["head"].indexToLocFormat if longFormat: format = "I" else: format = "H" locations = array.array(format) locations.frombytes(data) if sys.byteorder != "big": locations.byteswap() if not longFormat: l = array.array("I") for i in range(len(locations)): l.append(locations[i] * 2) locations = l if len(locations) < (ttFont["maxp"].numGlyphs + 1): log.warning( "corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d", len(locations) - 1, ttFont["maxp"].numGlyphs, ) self.locations = locations def compile(self, ttFont): try: max_location = max(self.locations) except AttributeError: self.set([]) max_location = 0 if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations): locations = array.array("H") for i in range(len(self.locations)): locations.append(self.locations[i] // 2) ttFont["head"].indexToLocFormat = 0 else: locations = array.array("I", self.locations) ttFont["head"].indexToLocFormat = 1 if sys.byteorder != "big": locations.byteswap() return locations.tobytes() def set(self, locations): self.locations = array.array("I", locations) def toXML(self, writer, ttFont): writer.comment("The 'loca' table will be calculated by the compiler") writer.newline() def __getitem__(self, index): return self.locations[index] def __len__(self): return len(self.locations) PKaZZZY)���"fontTools/ttLib/tables/_l_t_a_g.pyfrom fontTools.misc.textTools import bytesjoin, tobytes, safeEval from . import DefaultTable import struct # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html class table__l_t_a_g(DefaultTable.DefaultTable): def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.version, self.flags = 1, 0 self.tags = [] def addTag(self, tag): """Add 'tag' to the list of langauge tags if not already there. Returns the integer index of 'tag' in the list of all tags. """ try: return self.tags.index(tag) except ValueError: self.tags.append(tag) return len(self.tags) - 1 def decompile(self, data, ttFont): self.version, self.flags, numTags = struct.unpack(">LLL", data[:12]) assert self.version == 1 self.tags = [] for i in range(numTags): pos = 12 + i * 4 offset, length = struct.unpack(">HH", data[pos : pos + 4]) tag = data[offset : offset + length].decode("ascii") self.tags.append(tag) def compile(self, ttFont): dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))] stringPool = "" for tag in self.tags: offset = stringPool.find(tag) if offset < 0: offset = len(stringPool) stringPool = stringPool + tag offset = offset + 12 + len(self.tags) * 4 dataList.append(struct.pack(">HH", offset, len(tag))) dataList.append(tobytes(stringPool)) return bytesjoin(dataList) def toXML(self, writer, ttFont): writer.simpletag("version", value=self.version) writer.newline() writer.simpletag("flags", value=self.flags) writer.newline() for tag in self.tags: writer.simpletag("LanguageTag", tag=tag) writer.newline() def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "tags"): self.tags = [] if name == "LanguageTag": self.tags.append(attrs["tag"]) elif "value" in attrs: value = safeEval(attrs["value"]) setattr(self, name, value) PKaZZZ�?����"fontTools/ttLib/tables/_m_a_x_p.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import safeEval from . import DefaultTable maxpFormat_0_5 = """ > # big endian tableVersion: i numGlyphs: H """ maxpFormat_1_0_add = """ > # big endian maxPoints: H maxContours: H maxCompositePoints: H maxCompositeContours: H maxZones: H maxTwilightPoints: H maxStorage: H maxFunctionDefs: H maxInstructionDefs: H maxStackElements: H maxSizeOfInstructions: H maxComponentElements: H maxComponentDepth: H """ class table__m_a_x_p(DefaultTable.DefaultTable): dependencies = ["glyf"] def decompile(self, data, ttFont): dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self) self.numGlyphs = int(self.numGlyphs) if self.tableVersion != 0x00005000: dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self) assert len(data) == 0 def compile(self, ttFont): if "glyf" in ttFont: if ttFont.isLoaded("glyf") and ttFont.recalcBBoxes: self.recalc(ttFont) else: pass # CFF self.numGlyphs = len(ttFont.getGlyphOrder()) if self.tableVersion != 0x00005000: self.tableVersion = 0x00010000 data = sstruct.pack(maxpFormat_0_5, self) if self.tableVersion == 0x00010000: data = data + sstruct.pack(maxpFormat_1_0_add, self) return data def recalc(self, ttFont): """Recalculate the font bounding box, and most other maxp values except for the TT instructions values. Also recalculate the value of bit 1 of the flags field and the font bounding box of the 'head' table. """ glyfTable = ttFont["glyf"] hmtxTable = ttFont["hmtx"] headTable = ttFont["head"] self.numGlyphs = len(glyfTable) INFINITY = 100000 xMin = +INFINITY yMin = +INFINITY xMax = -INFINITY yMax = -INFINITY maxPoints = 0 maxContours = 0 maxCompositePoints = 0 maxCompositeContours = 0 maxComponentElements = 0 maxComponentDepth = 0 allXMinIsLsb = 1 for glyphName in ttFont.getGlyphOrder(): g = glyfTable[glyphName] if g.numberOfContours: if hmtxTable[glyphName][1] != g.xMin: allXMinIsLsb = 0 xMin = min(xMin, g.xMin) yMin = min(yMin, g.yMin) xMax = max(xMax, g.xMax) yMax = max(yMax, g.yMax) if g.numberOfContours > 0: nPoints, nContours = g.getMaxpValues() maxPoints = max(maxPoints, nPoints) maxContours = max(maxContours, nContours) elif g.isComposite(): nPoints, nContours, componentDepth = g.getCompositeMaxpValues( glyfTable ) maxCompositePoints = max(maxCompositePoints, nPoints) maxCompositeContours = max(maxCompositeContours, nContours) maxComponentElements = max(maxComponentElements, len(g.components)) maxComponentDepth = max(maxComponentDepth, componentDepth) if xMin == +INFINITY: headTable.xMin = 0 headTable.yMin = 0 headTable.xMax = 0 headTable.yMax = 0 else: headTable.xMin = xMin headTable.yMin = yMin headTable.xMax = xMax headTable.yMax = yMax self.maxPoints = maxPoints self.maxContours = maxContours self.maxCompositePoints = maxCompositePoints self.maxCompositeContours = maxCompositeContours self.maxComponentElements = maxComponentElements self.maxComponentDepth = maxComponentDepth if allXMinIsLsb: headTable.flags = headTable.flags | 0x2 else: headTable.flags = headTable.flags & ~0x2 def testrepr(self): items = sorted(self.__dict__.items()) print(". . . . . . . . .") for combo in items: print(" %s: %s" % combo) print(". . . . . . . . .") def toXML(self, writer, ttFont): if self.tableVersion != 0x00005000: writer.comment("Most of this table will be recalculated by the compiler") writer.newline() formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5) if self.tableVersion != 0x00005000: formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add) names = names + names_1_0 for name in names: value = getattr(self, name) if name == "tableVersion": value = hex(value) writer.simpletag(name, value=value) writer.newline() def fromXML(self, name, attrs, content, ttFont): setattr(self, name, safeEval(attrs["value"])) PKaZZZ\XF CC"fontTools/ttLib/tables/_m_e_t_a.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import bytesjoin, strjoin, readHex from fontTools.ttLib import TTLibError from . import DefaultTable # Apple's documentation of 'meta': # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6meta.html META_HEADER_FORMAT = """ > # big endian version: L flags: L dataOffset: L numDataMaps: L """ DATA_MAP_FORMAT = """ > # big endian tag: 4s dataOffset: L dataLength: L """ class table__m_e_t_a(DefaultTable.DefaultTable): def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.data = {} def decompile(self, data, ttFont): headerSize = sstruct.calcsize(META_HEADER_FORMAT) header = sstruct.unpack(META_HEADER_FORMAT, data[0:headerSize]) if header["version"] != 1: raise TTLibError("unsupported 'meta' version %d" % header["version"]) dataMapSize = sstruct.calcsize(DATA_MAP_FORMAT) for i in range(header["numDataMaps"]): dataMapOffset = headerSize + i * dataMapSize dataMap = sstruct.unpack( DATA_MAP_FORMAT, data[dataMapOffset : dataMapOffset + dataMapSize] ) tag = dataMap["tag"] offset = dataMap["dataOffset"] self.data[tag] = data[offset : offset + dataMap["dataLength"]] if tag in ["dlng", "slng"]: self.data[tag] = self.data[tag].decode("utf-8") def compile(self, ttFont): keys = sorted(self.data.keys()) headerSize = sstruct.calcsize(META_HEADER_FORMAT) dataOffset = headerSize + len(keys) * sstruct.calcsize(DATA_MAP_FORMAT) header = sstruct.pack( META_HEADER_FORMAT, { "version": 1, "flags": 0, "dataOffset": dataOffset, "numDataMaps": len(keys), }, ) dataMaps = [] dataBlocks = [] for tag in keys: if tag in ["dlng", "slng"]: data = self.data[tag].encode("utf-8") else: data = self.data[tag] dataMaps.append( sstruct.pack( DATA_MAP_FORMAT, {"tag": tag, "dataOffset": dataOffset, "dataLength": len(data)}, ) ) dataBlocks.append(data) dataOffset += len(data) return bytesjoin([header] + dataMaps + dataBlocks) def toXML(self, writer, ttFont): for tag in sorted(self.data.keys()): if tag in ["dlng", "slng"]: writer.begintag("text", tag=tag) writer.newline() writer.write(self.data[tag]) writer.newline() writer.endtag("text") writer.newline() else: writer.begintag("hexdata", tag=tag) writer.newline() data = self.data[tag] if min(data) >= 0x20 and max(data) <= 0x7E: writer.comment("ascii: " + data.decode("ascii")) writer.newline() writer.dumphex(data) writer.endtag("hexdata") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "hexdata": self.data[attrs["tag"]] = readHex(content) elif name == "text" and attrs["tag"] in ["dlng", "slng"]: self.data[attrs["tag"]] = strjoin(content).strip() else: raise TTLibError("can't handle '%s' element" % name) PKaZZZPӦȪ�"fontTools/ttLib/tables/_m_o_r_t.pyfrom .otBase import BaseTTXConverter # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6mort.html class table__m_o_r_t(BaseTTXConverter): pass PKaZZZ������"fontTools/ttLib/tables/_m_o_r_x.pyfrom .otBase import BaseTTXConverter # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html class table__m_o_r_x(BaseTTXConverter): pass PKaZZZ��r��"fontTools/ttLib/tables/_n_a_m_e.py# -*- coding: utf-8 -*- from fontTools.misc import sstruct from fontTools.misc.textTools import ( bytechr, byteord, bytesjoin, strjoin, tobytes, tostr, safeEval, ) from fontTools.misc.encodingTools import getEncoding from fontTools.ttLib import newTable from fontTools.ttLib.ttVisitor import TTVisitor from fontTools import ttLib import fontTools.ttLib.tables.otTables as otTables from fontTools.ttLib.tables import C_P_A_L_ from . import DefaultTable import struct import logging log = logging.getLogger(__name__) nameRecordFormat = """ > # big endian platformID: H platEncID: H langID: H nameID: H length: H offset: H """ nameRecordSize = sstruct.calcsize(nameRecordFormat) class table__n_a_m_e(DefaultTable.DefaultTable): dependencies = ["ltag"] def decompile(self, data, ttFont): format, n, stringOffset = struct.unpack(b">HHH", data[:6]) expectedStringOffset = 6 + n * nameRecordSize if stringOffset != expectedStringOffset: log.error( "'name' table stringOffset incorrect. Expected: %s; Actual: %s", expectedStringOffset, stringOffset, ) stringData = data[stringOffset:] data = data[6:] self.names = [] for i in range(n): if len(data) < 12: log.error("skipping malformed name record #%d", i) continue name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord()) name.string = stringData[name.offset : name.offset + name.length] if name.offset + name.length > len(stringData): log.error("skipping malformed name record #%d", i) continue assert len(name.string) == name.length # if (name.platEncID, name.platformID) in ((0, 0), (1, 3)): # if len(name.string) % 2: # print "2-byte string doesn't have even length!" # print name.__dict__ del name.offset, name.length self.names.append(name) def compile(self, ttFont): if not hasattr(self, "names"): # only happens when there are NO name table entries read # from the TTX file self.names = [] names = self.names names.sort() # sort according to the spec; see NameRecord.__lt__() stringData = b"" format = 0 n = len(names) stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat) data = struct.pack(b">HHH", format, n, stringOffset) lastoffset = 0 done = {} # remember the data so we can reuse the "pointers" for name in names: string = name.toBytes() if string in done: name.offset, name.length = done[string] else: name.offset, name.length = done[string] = len(stringData), len(string) stringData = bytesjoin([stringData, string]) data = data + sstruct.pack(nameRecordFormat, name) return data + stringData def toXML(self, writer, ttFont): for name in self.names: name.toXML(writer, ttFont) def fromXML(self, name, attrs, content, ttFont): if name != "namerecord": return # ignore unknown tags if not hasattr(self, "names"): self.names = [] name = NameRecord() self.names.append(name) name.fromXML(name, attrs, content, ttFont) def getName(self, nameID, platformID, platEncID, langID=None): for namerecord in self.names: if ( namerecord.nameID == nameID and namerecord.platformID == platformID and namerecord.platEncID == platEncID ): if langID is None or namerecord.langID == langID: return namerecord return None # not found def getDebugName(self, nameID): englishName = someName = None for name in self.names: if name.nameID != nameID: continue try: unistr = name.toUnicode() except UnicodeDecodeError: continue someName = unistr if (name.platformID, name.langID) in ((1, 0), (3, 0x409)): englishName = unistr break if englishName: return englishName elif someName: return someName else: return None def getFirstDebugName(self, nameIDs): for nameID in nameIDs: name = self.getDebugName(nameID) if name is not None: return name return None def getBestFamilyName(self): # 21 = WWS Family Name # 16 = Typographic Family Name # 1 = Family Name return self.getFirstDebugName((21, 16, 1)) def getBestSubFamilyName(self): # 22 = WWS SubFamily Name # 17 = Typographic SubFamily Name # 2 = SubFamily Name return self.getFirstDebugName((22, 17, 2)) def getBestFullName(self): # 4 = Full Name # 6 = PostScript Name for nameIDs in ((21, 22), (16, 17), (1, 2), (4,), (6,)): if len(nameIDs) == 2: name_fam = self.getDebugName(nameIDs[0]) name_subfam = self.getDebugName(nameIDs[1]) if None in [name_fam, name_subfam]: continue # if any is None, skip name = f"{name_fam} {name_subfam}" if name_subfam.lower() == "regular": name = f"{name_fam}" return name else: name = self.getDebugName(nameIDs[0]) if name is not None: return name return None def setName(self, string, nameID, platformID, platEncID, langID): """Set the 'string' for the name record identified by 'nameID', 'platformID', 'platEncID' and 'langID'. If a record with that nameID doesn't exist, create it and append to the name table. 'string' can be of type `str` (`unicode` in PY2) or `bytes`. In the latter case, it is assumed to be already encoded with the correct plaform-specific encoding identified by the (platformID, platEncID, langID) triplet. A warning is issued to prevent unexpected results. """ if not hasattr(self, "names"): self.names = [] if not isinstance(string, str): if isinstance(string, bytes): log.warning( "name string is bytes, ensure it's correctly encoded: %r", string ) else: raise TypeError( "expected unicode or bytes, found %s: %r" % (type(string).__name__, string) ) namerecord = self.getName(nameID, platformID, platEncID, langID) if namerecord: namerecord.string = string else: self.names.append(makeName(string, nameID, platformID, platEncID, langID)) def removeNames(self, nameID=None, platformID=None, platEncID=None, langID=None): """Remove any name records identified by the given combination of 'nameID', 'platformID', 'platEncID' and 'langID'. """ args = { argName: argValue for argName, argValue in ( ("nameID", nameID), ("platformID", platformID), ("platEncID", platEncID), ("langID", langID), ) if argValue is not None } if not args: # no arguments, nothing to do return self.names = [ rec for rec in self.names if any( argValue != getattr(rec, argName) for argName, argValue in args.items() ) ] @staticmethod def removeUnusedNames(ttFont): """Remove any name records which are not in NameID range 0-255 and not utilized within the font itself.""" visitor = NameRecordVisitor() visitor.visit(ttFont) toDelete = set() for record in ttFont["name"].names: # Name IDs 26 to 255, inclusive, are reserved for future standard names. # https://learn.microsoft.com/en-us/typography/opentype/spec/name#name-ids if record.nameID < 256: continue if record.nameID not in visitor.seen: toDelete.add(record.nameID) for nameID in toDelete: ttFont["name"].removeNames(nameID) return toDelete def _findUnusedNameID(self, minNameID=256): """Finds an unused name id. The nameID is assigned in the range between 'minNameID' and 32767 (inclusive), following the last nameID in the name table. """ names = getattr(self, "names", []) nameID = 1 + max([n.nameID for n in names] + [minNameID - 1]) if nameID > 32767: raise ValueError("nameID must be less than 32768") return nameID def findMultilingualName( self, names, windows=True, mac=True, minNameID=0, ttFont=None ): """Return the name ID of an existing multilingual name that matches the 'names' dictionary, or None if not found. 'names' is a dictionary with the name in multiple languages, such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}. The keys can be arbitrary IETF BCP 47 language codes; the values are Unicode strings. If 'windows' is True, the returned name ID is guaranteed exist for all requested languages for platformID=3 and platEncID=1. If 'mac' is True, the returned name ID is guaranteed to exist for all requested languages for platformID=1 and platEncID=0. The returned name ID will not be less than the 'minNameID' argument. """ # Gather the set of requested # (string, platformID, platEncID, langID) # tuples reqNameSet = set() for lang, name in sorted(names.items()): if windows: windowsName = _makeWindowsName(name, None, lang) if windowsName is not None: reqNameSet.add( ( windowsName.string, windowsName.platformID, windowsName.platEncID, windowsName.langID, ) ) if mac: macName = _makeMacName(name, None, lang, ttFont) if macName is not None: reqNameSet.add( ( macName.string, macName.platformID, macName.platEncID, macName.langID, ) ) # Collect matching name IDs matchingNames = dict() for name in self.names: try: key = (name.toUnicode(), name.platformID, name.platEncID, name.langID) except UnicodeDecodeError: continue if key in reqNameSet and name.nameID >= minNameID: nameSet = matchingNames.setdefault(name.nameID, set()) nameSet.add(key) # Return the first name ID that defines all requested strings for nameID, nameSet in sorted(matchingNames.items()): if nameSet == reqNameSet: return nameID return None # not found def addMultilingualName( self, names, ttFont=None, nameID=None, windows=True, mac=True, minNameID=0 ): """Add a multilingual name, returning its name ID 'names' is a dictionary with the name in multiple languages, such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}. The keys can be arbitrary IETF BCP 47 language codes; the values are Unicode strings. 'ttFont' is the TTFont to which the names are added, or None. If present, the font's 'ltag' table can get populated to store exotic language codes, which allows encoding names that otherwise cannot get encoded at all. 'nameID' is the name ID to be used, or None to let the library find an existing set of name records that match, or pick an unused name ID. If 'windows' is True, a platformID=3 name record will be added. If 'mac' is True, a platformID=1 name record will be added. If the 'nameID' argument is None, the created nameID will not be less than the 'minNameID' argument. """ if not hasattr(self, "names"): self.names = [] if nameID is None: # Reuse nameID if possible nameID = self.findMultilingualName( names, windows=windows, mac=mac, minNameID=minNameID, ttFont=ttFont ) if nameID is not None: return nameID nameID = self._findUnusedNameID() # TODO: Should minimize BCP 47 language codes. # https://github.com/fonttools/fonttools/issues/930 for lang, name in sorted(names.items()): if windows: windowsName = _makeWindowsName(name, nameID, lang) if windowsName is not None: self.names.append(windowsName) else: # We cannot not make a Windows name: make sure we add a # Mac name as a fallback. This can happen for exotic # BCP47 language tags that have no Windows language code. mac = True if mac: macName = _makeMacName(name, nameID, lang, ttFont) if macName is not None: self.names.append(macName) return nameID def addName(self, string, platforms=((1, 0, 0), (3, 1, 0x409)), minNameID=255): """Add a new name record containing 'string' for each (platformID, platEncID, langID) tuple specified in the 'platforms' list. The nameID is assigned in the range between 'minNameID'+1 and 32767 (inclusive), following the last nameID in the name table. If no 'platforms' are specified, two English name records are added, one for the Macintosh (platformID=0), and one for the Windows platform (3). The 'string' must be a Unicode string, so it can be encoded with different, platform-specific encodings. Return the new nameID. """ assert ( len(platforms) > 0 ), "'platforms' must contain at least one (platformID, platEncID, langID) tuple" if not hasattr(self, "names"): self.names = [] if not isinstance(string, str): raise TypeError( "expected str, found %s: %r" % (type(string).__name__, string) ) nameID = self._findUnusedNameID(minNameID + 1) for platformID, platEncID, langID in platforms: self.names.append(makeName(string, nameID, platformID, platEncID, langID)) return nameID def makeName(string, nameID, platformID, platEncID, langID): name = NameRecord() name.string, name.nameID, name.platformID, name.platEncID, name.langID = ( string, nameID, platformID, platEncID, langID, ) return name def _makeWindowsName(name, nameID, language): """Create a NameRecord for the Microsoft Windows platform 'language' is an arbitrary IETF BCP 47 language identifier such as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. If Microsoft Windows does not support the desired language, the result will be None. Future versions of fonttools might return a NameRecord for the OpenType 'name' table format 1, but this is not implemented yet. """ langID = _WINDOWS_LANGUAGE_CODES.get(language.lower()) if langID is not None: return makeName(name, nameID, 3, 1, langID) else: log.warning( "cannot add Windows name in language %s " "because fonttools does not yet support " "name table format 1" % language ) return None def _makeMacName(name, nameID, language, font=None): """Create a NameRecord for Apple platforms 'language' is an arbitrary IETF BCP 47 language identifier such as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. When possible, we create a Macintosh NameRecord that is understood by old applications (platform ID 1 and an old-style Macintosh language enum). If this is not possible, we create a Unicode NameRecord (platform ID 0) whose language points to the font’s 'ltag' table. The latter can encode any string in any language, but legacy applications might not recognize the format (in which case they will ignore those names). 'font' should be the TTFont for which you want to create a name. If 'font' is None, we only return NameRecords for legacy Macintosh; in that case, the result will be None for names that need to be encoded with an 'ltag' table. See the section “The language identifier” in Apple’s specification: https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html """ macLang = _MAC_LANGUAGE_CODES.get(language.lower()) macScript = _MAC_LANGUAGE_TO_SCRIPT.get(macLang) if macLang is not None and macScript is not None: encoding = getEncoding(1, macScript, macLang, default="ascii") # Check if we can actually encode this name. If we can't, # for example because we have no support for the legacy # encoding, or because the name string contains Unicode # characters that the legacy encoding cannot represent, # we fall back to encoding the name in Unicode and put # the language tag into the ltag table. try: _ = tobytes(name, encoding, errors="strict") return makeName(name, nameID, 1, macScript, macLang) except UnicodeEncodeError: pass if font is not None: ltag = font.tables.get("ltag") if ltag is None: ltag = font["ltag"] = newTable("ltag") # 0 = Unicode; 4 = “Unicode 2.0 or later semantics (non-BMP characters allowed)” # “The preferred platform-specific code for Unicode would be 3 or 4.” # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html return makeName(name, nameID, 0, 4, ltag.addTag(language)) else: log.warning( "cannot store language %s into 'ltag' table " "without having access to the TTFont object" % language ) return None class NameRecord(object): def getEncoding(self, default="ascii"): """Returns the Python encoding name for this name entry based on its platformID, platEncID, and langID. If encoding for these values is not known, by default 'ascii' is returned. That can be overriden by passing a value to the default argument. """ return getEncoding(self.platformID, self.platEncID, self.langID, default) def encodingIsUnicodeCompatible(self): return self.getEncoding(None) in ["utf_16_be", "ucs2be", "ascii", "latin1"] def __str__(self): return self.toStr(errors="backslashreplace") def isUnicode(self): return self.platformID == 0 or ( self.platformID == 3 and self.platEncID in [0, 1, 10] ) def toUnicode(self, errors="strict"): """ If self.string is a Unicode string, return it; otherwise try decoding the bytes in self.string to a Unicode string using the encoding of this entry as returned by self.getEncoding(); Note that self.getEncoding() returns 'ascii' if the encoding is unknown to the library. Certain heuristics are performed to recover data from bytes that are ill-formed in the chosen encoding, or that otherwise look misencoded (mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE but marked otherwise). If the bytes are ill-formed and the heuristics fail, the error is handled according to the errors parameter to this function, which is passed to the underlying decode() function; by default it throws a UnicodeDecodeError exception. Note: The mentioned heuristics mean that roundtripping a font to XML and back to binary might recover some misencoded data whereas just loading the font and saving it back will not change them. """ def isascii(b): return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D] encoding = self.getEncoding() string = self.string if ( isinstance(string, bytes) and encoding == "utf_16_be" and len(string) % 2 == 1 ): # Recover badly encoded UTF-16 strings that have an odd number of bytes: # - If the last byte is zero, drop it. Otherwise, # - If all the odd bytes are zero and all the even bytes are ASCII, # prepend one zero byte. Otherwise, # - If first byte is zero and all other bytes are ASCII, insert zero # bytes between consecutive ASCII bytes. # # (Yes, I've seen all of these in the wild... sigh) if byteord(string[-1]) == 0: string = string[:-1] elif all( byteord(b) == 0 if i % 2 else isascii(byteord(b)) for i, b in enumerate(string) ): string = b"\0" + string elif byteord(string[0]) == 0 and all( isascii(byteord(b)) for b in string[1:] ): string = bytesjoin(b"\0" + bytechr(byteord(b)) for b in string[1:]) string = tostr(string, encoding=encoding, errors=errors) # If decoded strings still looks like UTF-16BE, it suggests a double-encoding. # Fix it up. if all( ord(c) == 0 if i % 2 == 0 else isascii(ord(c)) for i, c in enumerate(string) ): # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text, # narrow it down. string = "".join(c for c in string[1::2]) return string def toBytes(self, errors="strict"): """If self.string is a bytes object, return it; otherwise try encoding the Unicode string in self.string to bytes using the encoding of this entry as returned by self.getEncoding(); Note that self.getEncoding() returns 'ascii' if the encoding is unknown to the library. If the Unicode string cannot be encoded to bytes in the chosen encoding, the error is handled according to the errors parameter to this function, which is passed to the underlying encode() function; by default it throws a UnicodeEncodeError exception. """ return tobytes(self.string, encoding=self.getEncoding(), errors=errors) toStr = toUnicode def toXML(self, writer, ttFont): try: unistr = self.toUnicode() except UnicodeDecodeError: unistr = None attrs = [ ("nameID", self.nameID), ("platformID", self.platformID), ("platEncID", self.platEncID), ("langID", hex(self.langID)), ] if unistr is None or not self.encodingIsUnicodeCompatible(): attrs.append(("unicode", unistr is not None)) writer.begintag("namerecord", attrs) writer.newline() if unistr is not None: writer.write(unistr) else: writer.write8bit(self.string) writer.newline() writer.endtag("namerecord") writer.newline() def fromXML(self, name, attrs, content, ttFont): self.nameID = safeEval(attrs["nameID"]) self.platformID = safeEval(attrs["platformID"]) self.platEncID = safeEval(attrs["platEncID"]) self.langID = safeEval(attrs["langID"]) s = strjoin(content).strip() encoding = self.getEncoding() if self.encodingIsUnicodeCompatible() or safeEval( attrs.get("unicode", "False") ): self.string = s.encode(encoding) else: # This is the inverse of write8bit... self.string = s.encode("latin1") def __lt__(self, other): if type(self) != type(other): return NotImplemented try: selfTuple = ( self.platformID, self.platEncID, self.langID, self.nameID, ) otherTuple = ( other.platformID, other.platEncID, other.langID, other.nameID, ) except AttributeError: # This can only happen for # 1) an object that is not a NameRecord, or # 2) an unlikely incomplete NameRecord object which has not been # fully populated return NotImplemented try: # Include the actual NameRecord string in the comparison tuples selfTuple = selfTuple + (self.toBytes(),) otherTuple = otherTuple + (other.toBytes(),) except UnicodeEncodeError as e: # toBytes caused an encoding error in either of the two, so content # to sorting based on IDs only log.error("NameRecord sorting failed to encode: %s" % e) # Implemented so that list.sort() sorts according to the spec by using # the order of the tuple items and their comparison return selfTuple < otherTuple def __repr__(self): return "<NameRecord NameID=%d; PlatformID=%d; LanguageID=%d>" % ( self.nameID, self.platformID, self.langID, ) # Windows language ID → IETF BCP-47 language tag # # While Microsoft indicates a region/country for all its language # IDs, we follow Unicode practice by omitting “most likely subtags” # as per Unicode CLDR. For example, English is simply “en” and not # “en-Latn” because according to Unicode, the default script # for English is Latin. # # http://www.unicode.org/cldr/charts/latest/supplemental/likely_subtags.html # http://www.iana.org/assignments/language-subtag-registry/language-subtag-registry _WINDOWS_LANGUAGES = { 0x0436: "af", 0x041C: "sq", 0x0484: "gsw", 0x045E: "am", 0x1401: "ar-DZ", 0x3C01: "ar-BH", 0x0C01: "ar", 0x0801: "ar-IQ", 0x2C01: "ar-JO", 0x3401: "ar-KW", 0x3001: "ar-LB", 0x1001: "ar-LY", 0x1801: "ary", 0x2001: "ar-OM", 0x4001: "ar-QA", 0x0401: "ar-SA", 0x2801: "ar-SY", 0x1C01: "aeb", 0x3801: "ar-AE", 0x2401: "ar-YE", 0x042B: "hy", 0x044D: "as", 0x082C: "az-Cyrl", 0x042C: "az", 0x046D: "ba", 0x042D: "eu", 0x0423: "be", 0x0845: "bn", 0x0445: "bn-IN", 0x201A: "bs-Cyrl", 0x141A: "bs", 0x047E: "br", 0x0402: "bg", 0x0403: "ca", 0x0C04: "zh-HK", 0x1404: "zh-MO", 0x0804: "zh", 0x1004: "zh-SG", 0x0404: "zh-TW", 0x0483: "co", 0x041A: "hr", 0x101A: "hr-BA", 0x0405: "cs", 0x0406: "da", 0x048C: "prs", 0x0465: "dv", 0x0813: "nl-BE", 0x0413: "nl", 0x0C09: "en-AU", 0x2809: "en-BZ", 0x1009: "en-CA", 0x2409: "en-029", 0x4009: "en-IN", 0x1809: "en-IE", 0x2009: "en-JM", 0x4409: "en-MY", 0x1409: "en-NZ", 0x3409: "en-PH", 0x4809: "en-SG", 0x1C09: "en-ZA", 0x2C09: "en-TT", 0x0809: "en-GB", 0x0409: "en", 0x3009: "en-ZW", 0x0425: "et", 0x0438: "fo", 0x0464: "fil", 0x040B: "fi", 0x080C: "fr-BE", 0x0C0C: "fr-CA", 0x040C: "fr", 0x140C: "fr-LU", 0x180C: "fr-MC", 0x100C: "fr-CH", 0x0462: "fy", 0x0456: "gl", 0x0437: "ka", 0x0C07: "de-AT", 0x0407: "de", 0x1407: "de-LI", 0x1007: "de-LU", 0x0807: "de-CH", 0x0408: "el", 0x046F: "kl", 0x0447: "gu", 0x0468: "ha", 0x040D: "he", 0x0439: "hi", 0x040E: "hu", 0x040F: "is", 0x0470: "ig", 0x0421: "id", 0x045D: "iu", 0x085D: "iu-Latn", 0x083C: "ga", 0x0434: "xh", 0x0435: "zu", 0x0410: "it", 0x0810: "it-CH", 0x0411: "ja", 0x044B: "kn", 0x043F: "kk", 0x0453: "km", 0x0486: "quc", 0x0487: "rw", 0x0441: "sw", 0x0457: "kok", 0x0412: "ko", 0x0440: "ky", 0x0454: "lo", 0x0426: "lv", 0x0427: "lt", 0x082E: "dsb", 0x046E: "lb", 0x042F: "mk", 0x083E: "ms-BN", 0x043E: "ms", 0x044C: "ml", 0x043A: "mt", 0x0481: "mi", 0x047A: "arn", 0x044E: "mr", 0x047C: "moh", 0x0450: "mn", 0x0850: "mn-CN", 0x0461: "ne", 0x0414: "nb", 0x0814: "nn", 0x0482: "oc", 0x0448: "or", 0x0463: "ps", 0x0415: "pl", 0x0416: "pt", 0x0816: "pt-PT", 0x0446: "pa", 0x046B: "qu-BO", 0x086B: "qu-EC", 0x0C6B: "qu", 0x0418: "ro", 0x0417: "rm", 0x0419: "ru", 0x243B: "smn", 0x103B: "smj-NO", 0x143B: "smj", 0x0C3B: "se-FI", 0x043B: "se", 0x083B: "se-SE", 0x203B: "sms", 0x183B: "sma-NO", 0x1C3B: "sms", 0x044F: "sa", 0x1C1A: "sr-Cyrl-BA", 0x0C1A: "sr", 0x181A: "sr-Latn-BA", 0x081A: "sr-Latn", 0x046C: "nso", 0x0432: "tn", 0x045B: "si", 0x041B: "sk", 0x0424: "sl", 0x2C0A: "es-AR", 0x400A: "es-BO", 0x340A: "es-CL", 0x240A: "es-CO", 0x140A: "es-CR", 0x1C0A: "es-DO", 0x300A: "es-EC", 0x440A: "es-SV", 0x100A: "es-GT", 0x480A: "es-HN", 0x080A: "es-MX", 0x4C0A: "es-NI", 0x180A: "es-PA", 0x3C0A: "es-PY", 0x280A: "es-PE", 0x500A: "es-PR", # Microsoft has defined two different language codes for # “Spanish with modern sorting” and “Spanish with traditional # sorting”. This makes sense for collation APIs, and it would be # possible to express this in BCP 47 language tags via Unicode # extensions (eg., “es-u-co-trad” is “Spanish with traditional # sorting”). However, for storing names in fonts, this distinction # does not make sense, so we use “es” in both cases. 0x0C0A: "es", 0x040A: "es", 0x540A: "es-US", 0x380A: "es-UY", 0x200A: "es-VE", 0x081D: "sv-FI", 0x041D: "sv", 0x045A: "syr", 0x0428: "tg", 0x085F: "tzm", 0x0449: "ta", 0x0444: "tt", 0x044A: "te", 0x041E: "th", 0x0451: "bo", 0x041F: "tr", 0x0442: "tk", 0x0480: "ug", 0x0422: "uk", 0x042E: "hsb", 0x0420: "ur", 0x0843: "uz-Cyrl", 0x0443: "uz", 0x042A: "vi", 0x0452: "cy", 0x0488: "wo", 0x0485: "sah", 0x0478: "ii", 0x046A: "yo", } _MAC_LANGUAGES = { 0: "en", 1: "fr", 2: "de", 3: "it", 4: "nl", 5: "sv", 6: "es", 7: "da", 8: "pt", 9: "no", 10: "he", 11: "ja", 12: "ar", 13: "fi", 14: "el", 15: "is", 16: "mt", 17: "tr", 18: "hr", 19: "zh-Hant", 20: "ur", 21: "hi", 22: "th", 23: "ko", 24: "lt", 25: "pl", 26: "hu", 27: "es", 28: "lv", 29: "se", 30: "fo", 31: "fa", 32: "ru", 33: "zh", 34: "nl-BE", 35: "ga", 36: "sq", 37: "ro", 38: "cz", 39: "sk", 40: "sl", 41: "yi", 42: "sr", 43: "mk", 44: "bg", 45: "uk", 46: "be", 47: "uz", 48: "kk", 49: "az-Cyrl", 50: "az-Arab", 51: "hy", 52: "ka", 53: "mo", 54: "ky", 55: "tg", 56: "tk", 57: "mn-CN", 58: "mn", 59: "ps", 60: "ks", 61: "ku", 62: "sd", 63: "bo", 64: "ne", 65: "sa", 66: "mr", 67: "bn", 68: "as", 69: "gu", 70: "pa", 71: "or", 72: "ml", 73: "kn", 74: "ta", 75: "te", 76: "si", 77: "my", 78: "km", 79: "lo", 80: "vi", 81: "id", 82: "tl", 83: "ms", 84: "ms-Arab", 85: "am", 86: "ti", 87: "om", 88: "so", 89: "sw", 90: "rw", 91: "rn", 92: "ny", 93: "mg", 94: "eo", 128: "cy", 129: "eu", 130: "ca", 131: "la", 132: "qu", 133: "gn", 134: "ay", 135: "tt", 136: "ug", 137: "dz", 138: "jv", 139: "su", 140: "gl", 141: "af", 142: "br", 143: "iu", 144: "gd", 145: "gv", 146: "ga", 147: "to", 148: "el-polyton", 149: "kl", 150: "az", 151: "nn", } _WINDOWS_LANGUAGE_CODES = { lang.lower(): code for code, lang in _WINDOWS_LANGUAGES.items() } _MAC_LANGUAGE_CODES = {lang.lower(): code for code, lang in _MAC_LANGUAGES.items()} # MacOS language ID → MacOS script ID # # Note that the script ID is not sufficient to determine what encoding # to use in TrueType files. For some languages, MacOS used a modification # of a mainstream script. For example, an Icelandic name would be stored # with smRoman in the TrueType naming table, but the actual encoding # is a special Icelandic version of the normal Macintosh Roman encoding. # As another example, Inuktitut uses an 8-bit encoding for Canadian Aboriginal # Syllables but MacOS had run out of available script codes, so this was # done as a (pretty radical) “modification” of Ethiopic. # # http://unicode.org/Public/MAPPINGS/VENDORS/APPLE/Readme.txt _MAC_LANGUAGE_TO_SCRIPT = { 0: 0, # langEnglish → smRoman 1: 0, # langFrench → smRoman 2: 0, # langGerman → smRoman 3: 0, # langItalian → smRoman 4: 0, # langDutch → smRoman 5: 0, # langSwedish → smRoman 6: 0, # langSpanish → smRoman 7: 0, # langDanish → smRoman 8: 0, # langPortuguese → smRoman 9: 0, # langNorwegian → smRoman 10: 5, # langHebrew → smHebrew 11: 1, # langJapanese → smJapanese 12: 4, # langArabic → smArabic 13: 0, # langFinnish → smRoman 14: 6, # langGreek → smGreek 15: 0, # langIcelandic → smRoman (modified) 16: 0, # langMaltese → smRoman 17: 0, # langTurkish → smRoman (modified) 18: 0, # langCroatian → smRoman (modified) 19: 2, # langTradChinese → smTradChinese 20: 4, # langUrdu → smArabic 21: 9, # langHindi → smDevanagari 22: 21, # langThai → smThai 23: 3, # langKorean → smKorean 24: 29, # langLithuanian → smCentralEuroRoman 25: 29, # langPolish → smCentralEuroRoman 26: 29, # langHungarian → smCentralEuroRoman 27: 29, # langEstonian → smCentralEuroRoman 28: 29, # langLatvian → smCentralEuroRoman 29: 0, # langSami → smRoman 30: 0, # langFaroese → smRoman (modified) 31: 4, # langFarsi → smArabic (modified) 32: 7, # langRussian → smCyrillic 33: 25, # langSimpChinese → smSimpChinese 34: 0, # langFlemish → smRoman 35: 0, # langIrishGaelic → smRoman (modified) 36: 0, # langAlbanian → smRoman 37: 0, # langRomanian → smRoman (modified) 38: 29, # langCzech → smCentralEuroRoman 39: 29, # langSlovak → smCentralEuroRoman 40: 0, # langSlovenian → smRoman (modified) 41: 5, # langYiddish → smHebrew 42: 7, # langSerbian → smCyrillic 43: 7, # langMacedonian → smCyrillic 44: 7, # langBulgarian → smCyrillic 45: 7, # langUkrainian → smCyrillic (modified) 46: 7, # langByelorussian → smCyrillic 47: 7, # langUzbek → smCyrillic 48: 7, # langKazakh → smCyrillic 49: 7, # langAzerbaijani → smCyrillic 50: 4, # langAzerbaijanAr → smArabic 51: 24, # langArmenian → smArmenian 52: 23, # langGeorgian → smGeorgian 53: 7, # langMoldavian → smCyrillic 54: 7, # langKirghiz → smCyrillic 55: 7, # langTajiki → smCyrillic 56: 7, # langTurkmen → smCyrillic 57: 27, # langMongolian → smMongolian 58: 7, # langMongolianCyr → smCyrillic 59: 4, # langPashto → smArabic 60: 4, # langKurdish → smArabic 61: 4, # langKashmiri → smArabic 62: 4, # langSindhi → smArabic 63: 26, # langTibetan → smTibetan 64: 9, # langNepali → smDevanagari 65: 9, # langSanskrit → smDevanagari 66: 9, # langMarathi → smDevanagari 67: 13, # langBengali → smBengali 68: 13, # langAssamese → smBengali 69: 11, # langGujarati → smGujarati 70: 10, # langPunjabi → smGurmukhi 71: 12, # langOriya → smOriya 72: 17, # langMalayalam → smMalayalam 73: 16, # langKannada → smKannada 74: 14, # langTamil → smTamil 75: 15, # langTelugu → smTelugu 76: 18, # langSinhalese → smSinhalese 77: 19, # langBurmese → smBurmese 78: 20, # langKhmer → smKhmer 79: 22, # langLao → smLao 80: 30, # langVietnamese → smVietnamese 81: 0, # langIndonesian → smRoman 82: 0, # langTagalog → smRoman 83: 0, # langMalayRoman → smRoman 84: 4, # langMalayArabic → smArabic 85: 28, # langAmharic → smEthiopic 86: 28, # langTigrinya → smEthiopic 87: 28, # langOromo → smEthiopic 88: 0, # langSomali → smRoman 89: 0, # langSwahili → smRoman 90: 0, # langKinyarwanda → smRoman 91: 0, # langRundi → smRoman 92: 0, # langNyanja → smRoman 93: 0, # langMalagasy → smRoman 94: 0, # langEsperanto → smRoman 128: 0, # langWelsh → smRoman (modified) 129: 0, # langBasque → smRoman 130: 0, # langCatalan → smRoman 131: 0, # langLatin → smRoman 132: 0, # langQuechua → smRoman 133: 0, # langGuarani → smRoman 134: 0, # langAymara → smRoman 135: 7, # langTatar → smCyrillic 136: 4, # langUighur → smArabic 137: 26, # langDzongkha → smTibetan 138: 0, # langJavaneseRom → smRoman 139: 0, # langSundaneseRom → smRoman 140: 0, # langGalician → smRoman 141: 0, # langAfrikaans → smRoman 142: 0, # langBreton → smRoman (modified) 143: 28, # langInuktitut → smEthiopic (modified) 144: 0, # langScottishGaelic → smRoman (modified) 145: 0, # langManxGaelic → smRoman (modified) 146: 0, # langIrishGaelicScript → smRoman (modified) 147: 0, # langTongan → smRoman 148: 6, # langGreekAncient → smRoman 149: 0, # langGreenlandic → smRoman 150: 0, # langAzerbaijanRoman → smRoman 151: 0, # langNynorsk → smRoman } class NameRecordVisitor(TTVisitor): # Font tables that have NameIDs we need to collect. TABLES = ("GSUB", "GPOS", "fvar", "CPAL", "STAT") def __init__(self): self.seen = set() @NameRecordVisitor.register_attrs( ( (otTables.FeatureParamsSize, ("SubfamilyID", "SubfamilyNameID")), (otTables.FeatureParamsStylisticSet, ("UINameID",)), ( otTables.FeatureParamsCharacterVariants, ( "FeatUILabelNameID", "FeatUITooltipTextNameID", "SampleTextNameID", "FirstParamUILabelNameID", ), ), (otTables.STAT, ("ElidedFallbackNameID",)), (otTables.AxisRecord, ("AxisNameID",)), (otTables.AxisValue, ("ValueNameID",)), (otTables.FeatureName, ("FeatureNameID",)), (otTables.Setting, ("SettingNameID",)), ) ) def visit(visitor, obj, attr, value): visitor.seen.add(value) @NameRecordVisitor.register(ttLib.getTableClass("fvar")) def visit(visitor, obj): for inst in obj.instances: if inst.postscriptNameID != 0xFFFF: visitor.seen.add(inst.postscriptNameID) visitor.seen.add(inst.subfamilyNameID) for axis in obj.axes: visitor.seen.add(axis.axisNameID) @NameRecordVisitor.register(ttLib.getTableClass("CPAL")) def visit(visitor, obj): if obj.version == 1: visitor.seen.update(obj.paletteLabels) visitor.seen.update(obj.paletteEntryLabels) @NameRecordVisitor.register(ttLib.TTFont) def visit(visitor, font, *args, **kwargs): if hasattr(visitor, "font"): return False visitor.font = font for tag in visitor.TABLES: if tag in font: visitor.visit(font[tag], *args, **kwargs) del visitor.font return False PKaZZZ��d���"fontTools/ttLib/tables/_o_p_b_d.pyfrom .otBase import BaseTTXConverter # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html class table__o_p_b_d(BaseTTXConverter): pass PKaZZZ}a�T,,"fontTools/ttLib/tables/_p_o_s_t.pyfrom fontTools import ttLib from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder from fontTools.misc import sstruct from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval, readHex from . import DefaultTable import sys import struct import array import logging log = logging.getLogger(__name__) postFormat = """ > formatType: 16.16F italicAngle: 16.16F # italic angle in degrees underlinePosition: h underlineThickness: h isFixedPitch: L minMemType42: L # minimum memory if TrueType font is downloaded maxMemType42: L # maximum memory if TrueType font is downloaded minMemType1: L # minimum memory if Type1 font is downloaded maxMemType1: L # maximum memory if Type1 font is downloaded """ postFormatSize = sstruct.calcsize(postFormat) class table__p_o_s_t(DefaultTable.DefaultTable): def decompile(self, data, ttFont): sstruct.unpack(postFormat, data[:postFormatSize], self) data = data[postFormatSize:] if self.formatType == 1.0: self.decode_format_1_0(data, ttFont) elif self.formatType == 2.0: self.decode_format_2_0(data, ttFont) elif self.formatType == 3.0: self.decode_format_3_0(data, ttFont) elif self.formatType == 4.0: self.decode_format_4_0(data, ttFont) else: # supported format raise ttLib.TTLibError( "'post' table format %f not supported" % self.formatType ) def compile(self, ttFont): data = sstruct.pack(postFormat, self) if self.formatType == 1.0: pass # we're done elif self.formatType == 2.0: data = data + self.encode_format_2_0(ttFont) elif self.formatType == 3.0: pass # we're done elif self.formatType == 4.0: data = data + self.encode_format_4_0(ttFont) else: # supported format raise ttLib.TTLibError( "'post' table format %f not supported" % self.formatType ) return data def getGlyphOrder(self): """This function will get called by a ttLib.TTFont instance. Do not call this function yourself, use TTFont().getGlyphOrder() or its relatives instead! """ if not hasattr(self, "glyphOrder"): raise ttLib.TTLibError("illegal use of getGlyphOrder()") glyphOrder = self.glyphOrder del self.glyphOrder return glyphOrder def decode_format_1_0(self, data, ttFont): self.glyphOrder = standardGlyphOrder[: ttFont["maxp"].numGlyphs] def decode_format_2_0(self, data, ttFont): (numGlyphs,) = struct.unpack(">H", data[:2]) numGlyphs = int(numGlyphs) if numGlyphs > ttFont["maxp"].numGlyphs: # Assume the numGlyphs field is bogus, so sync with maxp. # I've seen this in one font, and if the assumption is # wrong elsewhere, well, so be it: it's hard enough to # work around _one_ non-conforming post format... numGlyphs = ttFont["maxp"].numGlyphs data = data[2:] indices = array.array("H") indices.frombytes(data[: 2 * numGlyphs]) if sys.byteorder != "big": indices.byteswap() data = data[2 * numGlyphs :] maxIndex = max(indices) self.extraNames = extraNames = unpackPStrings(data, maxIndex - 257) self.glyphOrder = glyphOrder = [""] * int(ttFont["maxp"].numGlyphs) for glyphID in range(numGlyphs): index = indices[glyphID] if index > 257: try: name = extraNames[index - 258] except IndexError: name = "" else: # fetch names from standard list name = standardGlyphOrder[index] glyphOrder[glyphID] = name self.build_psNameMapping(ttFont) def build_psNameMapping(self, ttFont): mapping = {} allNames = {} for i in range(ttFont["maxp"].numGlyphs): glyphName = psName = self.glyphOrder[i] if glyphName == "": glyphName = "glyph%.5d" % i if glyphName in allNames: # make up a new glyphName that's unique n = allNames[glyphName] while (glyphName + "#" + str(n)) in allNames: n += 1 allNames[glyphName] = n + 1 glyphName = glyphName + "#" + str(n) self.glyphOrder[i] = glyphName allNames[glyphName] = 1 if glyphName != psName: mapping[glyphName] = psName self.mapping = mapping def decode_format_3_0(self, data, ttFont): # Setting self.glyphOrder to None will cause the TTFont object # try and construct glyph names from a Unicode cmap table. self.glyphOrder = None def decode_format_4_0(self, data, ttFont): from fontTools import agl numGlyphs = ttFont["maxp"].numGlyphs indices = array.array("H") indices.frombytes(data) if sys.byteorder != "big": indices.byteswap() # In some older fonts, the size of the post table doesn't match # the number of glyphs. Sometimes it's bigger, sometimes smaller. self.glyphOrder = glyphOrder = [""] * int(numGlyphs) for i in range(min(len(indices), numGlyphs)): if indices[i] == 0xFFFF: self.glyphOrder[i] = "" elif indices[i] in agl.UV2AGL: self.glyphOrder[i] = agl.UV2AGL[indices[i]] else: self.glyphOrder[i] = "uni%04X" % indices[i] self.build_psNameMapping(ttFont) def encode_format_2_0(self, ttFont): numGlyphs = ttFont["maxp"].numGlyphs glyphOrder = ttFont.getGlyphOrder() assert len(glyphOrder) == numGlyphs indices = array.array("H") extraDict = {} extraNames = self.extraNames = [ n for n in self.extraNames if n not in standardGlyphOrder ] for i in range(len(extraNames)): extraDict[extraNames[i]] = i for glyphID in range(numGlyphs): glyphName = glyphOrder[glyphID] if glyphName in self.mapping: psName = self.mapping[glyphName] else: psName = glyphName if psName in extraDict: index = 258 + extraDict[psName] elif psName in standardGlyphOrder: index = standardGlyphOrder.index(psName) else: index = 258 + len(extraNames) extraDict[psName] = len(extraNames) extraNames.append(psName) indices.append(index) if sys.byteorder != "big": indices.byteswap() return ( struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames) ) def encode_format_4_0(self, ttFont): from fontTools import agl numGlyphs = ttFont["maxp"].numGlyphs glyphOrder = ttFont.getGlyphOrder() assert len(glyphOrder) == numGlyphs indices = array.array("H") for glyphID in glyphOrder: glyphID = glyphID.split("#")[0] if glyphID in agl.AGL2UV: indices.append(agl.AGL2UV[glyphID]) elif len(glyphID) == 7 and glyphID[:3] == "uni": indices.append(int(glyphID[3:], 16)) else: indices.append(0xFFFF) if sys.byteorder != "big": indices.byteswap() return indices.tobytes() def toXML(self, writer, ttFont): formatstring, names, fixes = sstruct.getformat(postFormat) for name in names: value = getattr(self, name) writer.simpletag(name, value=value) writer.newline() if hasattr(self, "mapping"): writer.begintag("psNames") writer.newline() writer.comment( "This file uses unique glyph names based on the information\n" "found in the 'post' table. Since these names might not be unique,\n" "we have to invent artificial names in case of clashes. In order to\n" "be able to retain the original information, we need a name to\n" "ps name mapping for those cases where they differ. That's what\n" "you see below.\n" ) writer.newline() items = sorted(self.mapping.items()) for name, psName in items: writer.simpletag("psName", name=name, psName=psName) writer.newline() writer.endtag("psNames") writer.newline() if hasattr(self, "extraNames"): writer.begintag("extraNames") writer.newline() writer.comment( "following are the name that are not taken from the standard Mac glyph order" ) writer.newline() for name in self.extraNames: writer.simpletag("psName", name=name) writer.newline() writer.endtag("extraNames") writer.newline() if hasattr(self, "data"): writer.begintag("hexdata") writer.newline() writer.dumphex(self.data) writer.endtag("hexdata") writer.newline() def fromXML(self, name, attrs, content, ttFont): if name not in ("psNames", "extraNames", "hexdata"): setattr(self, name, safeEval(attrs["value"])) elif name == "psNames": self.mapping = {} for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name == "psName": self.mapping[attrs["name"]] = attrs["psName"] elif name == "extraNames": self.extraNames = [] for element in content: if not isinstance(element, tuple): continue name, attrs, content = element if name == "psName": self.extraNames.append(attrs["name"]) else: self.data = readHex(content) def unpackPStrings(data, n): # extract n Pascal strings from data. # if there is not enough data, use "" strings = [] index = 0 dataLen = len(data) for _ in range(n): if dataLen <= index: length = 0 else: length = byteord(data[index]) index += 1 if dataLen <= index + length - 1: name = "" else: name = tostr(data[index : index + length], encoding="latin1") strings.append(name) index += length if index < dataLen: log.warning("%d extra bytes in post.stringData array", dataLen - index) elif dataLen < index: log.warning("not enough data in post.stringData array") return strings def packPStrings(strings): data = b"" for s in strings: data = data + bytechr(len(s)) + tobytes(s, encoding="latin1") return data PKaZZZ��ss"fontTools/ttLib/tables/_p_r_e_p.pyfrom fontTools import ttLib superclass = ttLib.getTableClass("fpgm") class table__p_r_e_p(superclass): pass PKaZZZP{հ��"fontTools/ttLib/tables/_p_r_o_p.pyfrom .otBase import BaseTTXConverter # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6prop.html class table__p_r_o_p(BaseTTXConverter): pass PKaZZZ���[["fontTools/ttLib/tables/_s_b_i_x.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import safeEval, num2binary, binary2num from . import DefaultTable from .sbixStrike import Strike sbixHeaderFormat = """ > version: H # Version number (set to 1) flags: H # The only two bits used in the flags field are bits 0 # and 1. For historical reasons, bit 0 must always be 1. # Bit 1 is a sbixDrawOutlines flag and is interpreted as # follows: # 0: Draw only 'sbix' bitmaps # 1: Draw both 'sbix' bitmaps and outlines, in that # order numStrikes: L # Number of bitmap strikes to follow """ sbixHeaderFormatSize = sstruct.calcsize(sbixHeaderFormat) sbixStrikeOffsetFormat = """ > strikeOffset: L # Offset from begining of table to data for the # individual strike """ sbixStrikeOffsetFormatSize = sstruct.calcsize(sbixStrikeOffsetFormat) class table__s_b_i_x(DefaultTable.DefaultTable): def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.version = 1 self.flags = 1 self.numStrikes = 0 self.strikes = {} self.strikeOffsets = [] def decompile(self, data, ttFont): # read table header sstruct.unpack(sbixHeaderFormat, data[:sbixHeaderFormatSize], self) # collect offsets to individual strikes in self.strikeOffsets for i in range(self.numStrikes): current_offset = sbixHeaderFormatSize + i * sbixStrikeOffsetFormatSize offset_entry = sbixStrikeOffset() sstruct.unpack( sbixStrikeOffsetFormat, data[current_offset : current_offset + sbixStrikeOffsetFormatSize], offset_entry, ) self.strikeOffsets.append(offset_entry.strikeOffset) # decompile Strikes for i in range(self.numStrikes - 1, -1, -1): current_strike = Strike(rawdata=data[self.strikeOffsets[i] :]) data = data[: self.strikeOffsets[i]] current_strike.decompile(ttFont) # print " Strike length: %xh" % len(bitmapSetData) # print "Number of Glyph entries:", len(current_strike.glyphs) if current_strike.ppem in self.strikes: from fontTools import ttLib raise ttLib.TTLibError("Pixel 'ppem' must be unique for each Strike") self.strikes[current_strike.ppem] = current_strike # after the glyph data records have been extracted, we don't need the offsets anymore del self.strikeOffsets del self.numStrikes def compile(self, ttFont): sbixData = b"" self.numStrikes = len(self.strikes) sbixHeader = sstruct.pack(sbixHeaderFormat, self) # calculate offset to start of first strike setOffset = sbixHeaderFormatSize + sbixStrikeOffsetFormatSize * self.numStrikes for si in sorted(self.strikes.keys()): current_strike = self.strikes[si] current_strike.compile(ttFont) # append offset to this strike to table header current_strike.strikeOffset = setOffset sbixHeader += sstruct.pack(sbixStrikeOffsetFormat, current_strike) setOffset += len(current_strike.data) sbixData += current_strike.data return sbixHeader + sbixData def toXML(self, xmlWriter, ttFont): xmlWriter.simpletag("version", value=self.version) xmlWriter.newline() xmlWriter.simpletag("flags", value=num2binary(self.flags, 16)) xmlWriter.newline() for i in sorted(self.strikes.keys()): self.strikes[i].toXML(xmlWriter, ttFont) def fromXML(self, name, attrs, content, ttFont): if name == "version": setattr(self, name, safeEval(attrs["value"])) elif name == "flags": setattr(self, name, binary2num(attrs["value"])) elif name == "strike": current_strike = Strike() for element in content: if isinstance(element, tuple): name, attrs, content = element current_strike.fromXML(name, attrs, content, ttFont) self.strikes[current_strike.ppem] = current_strike else: from fontTools import ttLib raise ttLib.TTLibError("can't handle '%s' element" % name) # Helper classes class sbixStrikeOffset(object): pass PKaZZZ +��=+=+"fontTools/ttLib/tables/_t_r_a_k.pyfrom fontTools.misc import sstruct from fontTools.misc.fixedTools import ( fixedToFloat as fi2fl, floatToFixed as fl2fi, floatToFixedToStr as fl2str, strToFixedToFloat as str2fl, ) from fontTools.misc.textTools import bytesjoin, safeEval from fontTools.ttLib import TTLibError from . import DefaultTable import struct from collections.abc import MutableMapping # Apple's documentation of 'trak': # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6trak.html TRAK_HEADER_FORMAT = """ > # big endian version: 16.16F format: H horizOffset: H vertOffset: H reserved: H """ TRAK_HEADER_FORMAT_SIZE = sstruct.calcsize(TRAK_HEADER_FORMAT) TRACK_DATA_FORMAT = """ > # big endian nTracks: H nSizes: H sizeTableOffset: L """ TRACK_DATA_FORMAT_SIZE = sstruct.calcsize(TRACK_DATA_FORMAT) TRACK_TABLE_ENTRY_FORMAT = """ > # big endian track: 16.16F nameIndex: H offset: H """ TRACK_TABLE_ENTRY_FORMAT_SIZE = sstruct.calcsize(TRACK_TABLE_ENTRY_FORMAT) # size values are actually '16.16F' fixed-point values, but here I do the # fixedToFloat conversion manually instead of relying on sstruct SIZE_VALUE_FORMAT = ">l" SIZE_VALUE_FORMAT_SIZE = struct.calcsize(SIZE_VALUE_FORMAT) # per-Size values are in 'FUnits', i.e. 16-bit signed integers PER_SIZE_VALUE_FORMAT = ">h" PER_SIZE_VALUE_FORMAT_SIZE = struct.calcsize(PER_SIZE_VALUE_FORMAT) class table__t_r_a_k(DefaultTable.DefaultTable): dependencies = ["name"] def compile(self, ttFont): dataList = [] offset = TRAK_HEADER_FORMAT_SIZE for direction in ("horiz", "vert"): trackData = getattr(self, direction + "Data", TrackData()) offsetName = direction + "Offset" # set offset to 0 if None or empty if not trackData: setattr(self, offsetName, 0) continue # TrackData table format must be longword aligned alignedOffset = (offset + 3) & ~3 padding, offset = b"\x00" * (alignedOffset - offset), alignedOffset setattr(self, offsetName, offset) data = trackData.compile(offset) offset += len(data) dataList.append(padding + data) self.reserved = 0 tableData = bytesjoin([sstruct.pack(TRAK_HEADER_FORMAT, self)] + dataList) return tableData def decompile(self, data, ttFont): sstruct.unpack(TRAK_HEADER_FORMAT, data[:TRAK_HEADER_FORMAT_SIZE], self) for direction in ("horiz", "vert"): trackData = TrackData() offset = getattr(self, direction + "Offset") if offset != 0: trackData.decompile(data, offset) setattr(self, direction + "Data", trackData) def toXML(self, writer, ttFont): writer.simpletag("version", value=self.version) writer.newline() writer.simpletag("format", value=self.format) writer.newline() for direction in ("horiz", "vert"): dataName = direction + "Data" writer.begintag(dataName) writer.newline() trackData = getattr(self, dataName, TrackData()) trackData.toXML(writer, ttFont) writer.endtag(dataName) writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "version": self.version = safeEval(attrs["value"]) elif name == "format": self.format = safeEval(attrs["value"]) elif name in ("horizData", "vertData"): trackData = TrackData() setattr(self, name, trackData) for element in content: if not isinstance(element, tuple): continue name, attrs, content_ = element trackData.fromXML(name, attrs, content_, ttFont) class TrackData(MutableMapping): def __init__(self, initialdata={}): self._map = dict(initialdata) def compile(self, offset): nTracks = len(self) sizes = self.sizes() nSizes = len(sizes) # offset to the start of the size subtable offset += TRACK_DATA_FORMAT_SIZE + TRACK_TABLE_ENTRY_FORMAT_SIZE * nTracks trackDataHeader = sstruct.pack( TRACK_DATA_FORMAT, {"nTracks": nTracks, "nSizes": nSizes, "sizeTableOffset": offset}, ) entryDataList = [] perSizeDataList = [] # offset to per-size tracking values offset += SIZE_VALUE_FORMAT_SIZE * nSizes # sort track table entries by track value for track, entry in sorted(self.items()): assert entry.nameIndex is not None entry.track = track entry.offset = offset entryDataList += [sstruct.pack(TRACK_TABLE_ENTRY_FORMAT, entry)] # sort per-size values by size for size, value in sorted(entry.items()): perSizeDataList += [struct.pack(PER_SIZE_VALUE_FORMAT, value)] offset += PER_SIZE_VALUE_FORMAT_SIZE * nSizes # sort size values sizeDataList = [ struct.pack(SIZE_VALUE_FORMAT, fl2fi(sv, 16)) for sv in sorted(sizes) ] data = bytesjoin( [trackDataHeader] + entryDataList + sizeDataList + perSizeDataList ) return data def decompile(self, data, offset): # initial offset is from the start of trak table to the current TrackData trackDataHeader = data[offset : offset + TRACK_DATA_FORMAT_SIZE] if len(trackDataHeader) != TRACK_DATA_FORMAT_SIZE: raise TTLibError("not enough data to decompile TrackData header") sstruct.unpack(TRACK_DATA_FORMAT, trackDataHeader, self) offset += TRACK_DATA_FORMAT_SIZE nSizes = self.nSizes sizeTableOffset = self.sizeTableOffset sizeTable = [] for i in range(nSizes): sizeValueData = data[ sizeTableOffset : sizeTableOffset + SIZE_VALUE_FORMAT_SIZE ] if len(sizeValueData) < SIZE_VALUE_FORMAT_SIZE: raise TTLibError("not enough data to decompile TrackData size subtable") (sizeValue,) = struct.unpack(SIZE_VALUE_FORMAT, sizeValueData) sizeTable.append(fi2fl(sizeValue, 16)) sizeTableOffset += SIZE_VALUE_FORMAT_SIZE for i in range(self.nTracks): entry = TrackTableEntry() entryData = data[offset : offset + TRACK_TABLE_ENTRY_FORMAT_SIZE] if len(entryData) < TRACK_TABLE_ENTRY_FORMAT_SIZE: raise TTLibError("not enough data to decompile TrackTableEntry record") sstruct.unpack(TRACK_TABLE_ENTRY_FORMAT, entryData, entry) perSizeOffset = entry.offset for j in range(nSizes): size = sizeTable[j] perSizeValueData = data[ perSizeOffset : perSizeOffset + PER_SIZE_VALUE_FORMAT_SIZE ] if len(perSizeValueData) < PER_SIZE_VALUE_FORMAT_SIZE: raise TTLibError( "not enough data to decompile per-size track values" ) (perSizeValue,) = struct.unpack(PER_SIZE_VALUE_FORMAT, perSizeValueData) entry[size] = perSizeValue perSizeOffset += PER_SIZE_VALUE_FORMAT_SIZE self[entry.track] = entry offset += TRACK_TABLE_ENTRY_FORMAT_SIZE def toXML(self, writer, ttFont): nTracks = len(self) nSizes = len(self.sizes()) writer.comment("nTracks=%d, nSizes=%d" % (nTracks, nSizes)) writer.newline() for track, entry in sorted(self.items()): assert entry.nameIndex is not None entry.track = track entry.toXML(writer, ttFont) def fromXML(self, name, attrs, content, ttFont): if name != "trackEntry": return entry = TrackTableEntry() entry.fromXML(name, attrs, content, ttFont) self[entry.track] = entry def sizes(self): if not self: return frozenset() tracks = list(self.tracks()) sizes = self[tracks.pop(0)].sizes() for track in tracks: entrySizes = self[track].sizes() if sizes != entrySizes: raise TTLibError( "'trak' table entries must specify the same sizes: " "%s != %s" % (sorted(sizes), sorted(entrySizes)) ) return frozenset(sizes) def __getitem__(self, track): return self._map[track] def __delitem__(self, track): del self._map[track] def __setitem__(self, track, entry): self._map[track] = entry def __len__(self): return len(self._map) def __iter__(self): return iter(self._map) def keys(self): return self._map.keys() tracks = keys def __repr__(self): return "TrackData({})".format(self._map if self else "") class TrackTableEntry(MutableMapping): def __init__(self, values={}, nameIndex=None): self.nameIndex = nameIndex self._map = dict(values) def toXML(self, writer, ttFont): name = ttFont["name"].getDebugName(self.nameIndex) writer.begintag( "trackEntry", (("value", fl2str(self.track, 16)), ("nameIndex", self.nameIndex)), ) writer.newline() if name: writer.comment(name) writer.newline() for size, perSizeValue in sorted(self.items()): writer.simpletag("track", size=fl2str(size, 16), value=perSizeValue) writer.newline() writer.endtag("trackEntry") writer.newline() def fromXML(self, name, attrs, content, ttFont): self.track = str2fl(attrs["value"], 16) self.nameIndex = safeEval(attrs["nameIndex"]) for element in content: if not isinstance(element, tuple): continue name, attrs, _ = element if name != "track": continue size = str2fl(attrs["size"], 16) self[size] = safeEval(attrs["value"]) def __getitem__(self, size): return self._map[size] def __delitem__(self, size): del self._map[size] def __setitem__(self, size, value): self._map[size] = value def __len__(self): return len(self._map) def __iter__(self): return iter(self._map) def keys(self): return self._map.keys() sizes = keys def __repr__(self): return "TrackTableEntry({}, nameIndex={})".format(self._map, self.nameIndex) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.nameIndex == other.nameIndex and dict(self) == dict(other) def __ne__(self, other): result = self.__eq__(other) return result if result is NotImplemented else not result PKaZZZ�ޘ�"""fontTools/ttLib/tables/_v_h_e_a.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import safeEval from fontTools.misc.fixedTools import ( ensureVersionIsLong as fi2ve, versionToFixed as ve2fi, ) from . import DefaultTable import math vheaFormat = """ > # big endian tableVersion: L ascent: h descent: h lineGap: h advanceHeightMax: H minTopSideBearing: h minBottomSideBearing: h yMaxExtent: h caretSlopeRise: h caretSlopeRun: h caretOffset: h reserved1: h reserved2: h reserved3: h reserved4: h metricDataFormat: h numberOfVMetrics: H """ class table__v_h_e_a(DefaultTable.DefaultTable): # Note: Keep in sync with table__h_h_e_a dependencies = ["vmtx", "glyf", "CFF ", "CFF2"] def decompile(self, data, ttFont): sstruct.unpack(vheaFormat, data, self) def compile(self, ttFont): if ttFont.recalcBBoxes and ( ttFont.isLoaded("glyf") or ttFont.isLoaded("CFF ") or ttFont.isLoaded("CFF2") ): self.recalc(ttFont) self.tableVersion = fi2ve(self.tableVersion) return sstruct.pack(vheaFormat, self) def recalc(self, ttFont): if "vmtx" not in ttFont: return vmtxTable = ttFont["vmtx"] self.advanceHeightMax = max(adv for adv, _ in vmtxTable.metrics.values()) boundsHeightDict = {} if "glyf" in ttFont: glyfTable = ttFont["glyf"] for name in ttFont.getGlyphOrder(): g = glyfTable[name] if g.numberOfContours == 0: continue if g.numberOfContours < 0 and not hasattr(g, "yMax"): # Composite glyph without extents set. # Calculate those. g.recalcBounds(glyfTable) boundsHeightDict[name] = g.yMax - g.yMin elif "CFF " in ttFont or "CFF2" in ttFont: if "CFF " in ttFont: topDict = ttFont["CFF "].cff.topDictIndex[0] else: topDict = ttFont["CFF2"].cff.topDictIndex[0] charStrings = topDict.CharStrings for name in ttFont.getGlyphOrder(): cs = charStrings[name] bounds = cs.calcBounds(charStrings) if bounds is not None: boundsHeightDict[name] = int( math.ceil(bounds[3]) - math.floor(bounds[1]) ) if boundsHeightDict: minTopSideBearing = float("inf") minBottomSideBearing = float("inf") yMaxExtent = -float("inf") for name, boundsHeight in boundsHeightDict.items(): advanceHeight, tsb = vmtxTable[name] bsb = advanceHeight - tsb - boundsHeight extent = tsb + boundsHeight minTopSideBearing = min(minTopSideBearing, tsb) minBottomSideBearing = min(minBottomSideBearing, bsb) yMaxExtent = max(yMaxExtent, extent) self.minTopSideBearing = minTopSideBearing self.minBottomSideBearing = minBottomSideBearing self.yMaxExtent = yMaxExtent else: # No glyph has outlines. self.minTopSideBearing = 0 self.minBottomSideBearing = 0 self.yMaxExtent = 0 def toXML(self, writer, ttFont): formatstring, names, fixes = sstruct.getformat(vheaFormat) for name in names: value = getattr(self, name) if name == "tableVersion": value = fi2ve(value) value = "0x%08x" % value writer.simpletag(name, value=value) writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "tableVersion": setattr(self, name, ve2fi(attrs["value"])) return setattr(self, name, safeEval(attrs["value"])) # reserved0 is caretOffset for legacy reasons @property def reserved0(self): return self.caretOffset @reserved0.setter def reserved0(self, value): self.caretOffset = value PKaZZZ���,��"fontTools/ttLib/tables/_v_m_t_x.pyfrom fontTools import ttLib superclass = ttLib.getTableClass("hmtx") class table__v_m_t_x(superclass): headerTag = "vhea" advanceName = "height" sideBearingName = "tsb" numberOfMetricsName = "numberOfVMetrics" PKaZZZP?�}}$fontTools/ttLib/tables/asciiTable.pyfrom fontTools.misc.textTools import strjoin, tobytes, tostr from . import DefaultTable class asciiTable(DefaultTable.DefaultTable): def toXML(self, writer, ttFont): data = tostr(self.data) # removing null bytes. XXX needed?? data = data.split("\0") data = strjoin(data) writer.begintag("source") writer.newline() writer.write_noindent(data) writer.newline() writer.endtag("source") writer.newline() def fromXML(self, name, attrs, content, ttFont): lines = strjoin(content).split("\n") self.data = tobytes("\n".join(lines[1:-1])) PKaZZZ�Iz���!fontTools/ttLib/tables/grUtils.pyimport struct, warnings try: import lz4 except ImportError: lz4 = None else: import lz4.block # old scheme for VERSION < 0.9 otherwise use lz4.block def decompress(data): (compression,) = struct.unpack(">L", data[4:8]) scheme = compression >> 27 size = compression & 0x07FFFFFF if scheme == 0: pass elif scheme == 1 and lz4: res = lz4.block.decompress(struct.pack("<L", size) + data[8:]) if len(res) != size: warnings.warn("Table decompression failed.") else: data = res else: warnings.warn("Table is compressed with an unsupported compression scheme") return (data, scheme) def compress(scheme, data): hdr = data[:4] + struct.pack(">L", (scheme << 27) + (len(data) & 0x07FFFFFF)) if scheme == 0: return data elif scheme == 1 and lz4: res = lz4.block.compress( data, mode="high_compression", compression=16, store_size=False ) return hdr + res else: warnings.warn("Table failed to compress by unsupported compression scheme") return data def _entries(attrs, sameval): ak = 0 vals = [] lastv = 0 for k, v in attrs: if len(vals) and (k != ak + 1 or (sameval and v != lastv)): yield (ak - len(vals) + 1, len(vals), vals) vals = [] ak = k vals.append(v) lastv = v yield (ak - len(vals) + 1, len(vals), vals) def entries(attributes, sameval=False): g = _entries(sorted(attributes.items(), key=lambda x: int(x[0])), sameval) return g def bininfo(num, size=1): if num == 0: return struct.pack(">4H", 0, 0, 0, 0) srange = 1 select = 0 while srange <= num: srange *= 2 select += 1 select -= 1 srange //= 2 srange *= size shift = num * size - srange return struct.pack(">4H", num, srange, select, shift) def num2tag(n): if n < 0x200000: return str(n) else: return ( struct.unpack("4s", struct.pack(">L", n))[0].replace(b"\000", b"").decode() ) def tag2num(n): try: return int(n) except ValueError: n = (n + " ")[:4] return struct.unpack(">L", n.encode("ascii"))[0] PKaZZZ4��D���� fontTools/ttLib/tables/otBase.pyfrom fontTools.config import OPTIONS from fontTools.misc.textTools import Tag, bytesjoin from .DefaultTable import DefaultTable from enum import IntEnum import sys import array import struct import logging from functools import lru_cache from typing import Iterator, NamedTuple, Optional, Tuple log = logging.getLogger(__name__) have_uharfbuzz = False try: import uharfbuzz as hb # repack method added in uharfbuzz >= 0.23; if uharfbuzz *can* be # imported but repack method is missing, behave as if uharfbuzz # is not available (fallback to the slower Python implementation) have_uharfbuzz = callable(getattr(hb, "repack", None)) except ImportError: pass USE_HARFBUZZ_REPACKER = OPTIONS[f"{__name__}:USE_HARFBUZZ_REPACKER"] class OverflowErrorRecord(object): def __init__(self, overflowTuple): self.tableType = overflowTuple[0] self.LookupListIndex = overflowTuple[1] self.SubTableIndex = overflowTuple[2] self.itemName = overflowTuple[3] self.itemIndex = overflowTuple[4] def __repr__(self): return str( ( self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex, ) ) class OTLOffsetOverflowError(Exception): def __init__(self, overflowErrorRecord): self.value = overflowErrorRecord def __str__(self): return repr(self.value) class RepackerState(IntEnum): # Repacking control flow is implemnted using a state machine. The state machine table: # # State | Packing Success | Packing Failed | Exception Raised | # ------------+-----------------+----------------+------------------+ # PURE_FT | Return result | PURE_FT | Return failure | # HB_FT | Return result | HB_FT | FT_FALLBACK | # FT_FALLBACK | HB_FT | FT_FALLBACK | Return failure | # Pack only with fontTools, don't allow sharing between extensions. PURE_FT = 1 # Attempt to pack with harfbuzz (allowing sharing between extensions) # use fontTools to attempt overflow resolution. HB_FT = 2 # Fallback if HB/FT packing gets stuck. Pack only with fontTools, don't allow sharing between # extensions. FT_FALLBACK = 3 class BaseTTXConverter(DefaultTable): """Generic base class for TTX table converters. It functions as an adapter between the TTX (ttLib actually) table model and the model we use for OpenType tables, which is necessarily subtly different. """ def decompile(self, data, font): """Create an object from the binary data. Called automatically on access.""" from . import otTables reader = OTTableReader(data, tableTag=self.tableTag) tableClass = getattr(otTables, self.tableTag) self.table = tableClass() self.table.decompile(reader, font) def compile(self, font): """Compiles the table into binary. Called automatically on save.""" # General outline: # Create a top-level OTTableWriter for the GPOS/GSUB table. # Call the compile method for the the table # for each 'converter' record in the table converter list # call converter's write method for each item in the value. # - For simple items, the write method adds a string to the # writer's self.items list. # - For Struct/Table/Subtable items, it add first adds new writer to the # to the writer's self.items, then calls the item's compile method. # This creates a tree of writers, rooted at the GUSB/GPOS writer, with # each writer representing a table, and the writer.items list containing # the child data strings and writers. # call the getAllData method # call _doneWriting, which removes duplicates # call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables # Traverse the flat list of tables, calling getDataLength on each to update their position # Traverse the flat list of tables again, calling getData each get the data in the table, now that # pos's and offset are known. # If a lookup subtable overflows an offset, we have to start all over. overflowRecord = None # this is 3-state option: default (None) means automatically use hb.repack or # silently fall back if it fails; True, use it and raise error if not possible # or it errors out; False, don't use it, even if you can. use_hb_repack = font.cfg[USE_HARFBUZZ_REPACKER] if self.tableTag in ("GSUB", "GPOS"): if use_hb_repack is False: log.debug( "hb.repack disabled, compiling '%s' with pure-python serializer", self.tableTag, ) elif not have_uharfbuzz: if use_hb_repack is True: raise ImportError("No module named 'uharfbuzz'") else: assert use_hb_repack is None log.debug( "uharfbuzz not found, compiling '%s' with pure-python serializer", self.tableTag, ) if ( use_hb_repack in (None, True) and have_uharfbuzz and self.tableTag in ("GSUB", "GPOS") ): state = RepackerState.HB_FT else: state = RepackerState.PURE_FT hb_first_error_logged = False lastOverflowRecord = None while True: try: writer = OTTableWriter(tableTag=self.tableTag) self.table.compile(writer, font) if state == RepackerState.HB_FT: return self.tryPackingHarfbuzz(writer, hb_first_error_logged) elif state == RepackerState.PURE_FT: return self.tryPackingFontTools(writer) elif state == RepackerState.FT_FALLBACK: # Run packing with FontTools only, but don't return the result as it will # not be optimally packed. Once a successful packing has been found, state is # changed back to harfbuzz packing to produce the final, optimal, packing. self.tryPackingFontTools(writer) log.debug( "Re-enabling sharing between extensions and switching back to " "harfbuzz+fontTools packing." ) state = RepackerState.HB_FT except OTLOffsetOverflowError as e: hb_first_error_logged = True ok = self.tryResolveOverflow(font, e, lastOverflowRecord) lastOverflowRecord = e.value if ok: continue if state is RepackerState.HB_FT: log.debug( "Harfbuzz packing out of resolutions, disabling sharing between extensions and " "switching to fontTools only packing." ) state = RepackerState.FT_FALLBACK else: raise def tryPackingHarfbuzz(self, writer, hb_first_error_logged): try: log.debug("serializing '%s' with hb.repack", self.tableTag) return writer.getAllDataUsingHarfbuzz(self.tableTag) except (ValueError, MemoryError, hb.RepackerError) as e: # Only log hb repacker errors the first time they occur in # the offset-overflow resolution loop, they are just noisy. # Maybe we can revisit this if/when uharfbuzz actually gives # us more info as to why hb.repack failed... if not hb_first_error_logged: error_msg = f"{type(e).__name__}" if str(e) != "": error_msg += f": {e}" log.warning( "hb.repack failed to serialize '%s', attempting fonttools resolutions " "; the error message was: %s", self.tableTag, error_msg, ) hb_first_error_logged = True return writer.getAllData(remove_duplicate=False) def tryPackingFontTools(self, writer): return writer.getAllData() def tryResolveOverflow(self, font, e, lastOverflowRecord): ok = 0 if lastOverflowRecord == e.value: # Oh well... return ok overflowRecord = e.value log.info("Attempting to fix OTLOffsetOverflowError %s", e) if overflowRecord.itemName is None: from .otTables import fixLookupOverFlows ok = fixLookupOverFlows(font, overflowRecord) else: from .otTables import fixSubTableOverFlows ok = fixSubTableOverFlows(font, overflowRecord) if ok: return ok # Try upgrading lookup to Extension and hope # that cross-lookup sharing not happening would # fix overflow... from .otTables import fixLookupOverFlows return fixLookupOverFlows(font, overflowRecord) def toXML(self, writer, font): self.table.toXML2(writer, font) def fromXML(self, name, attrs, content, font): from . import otTables if not hasattr(self, "table"): tableClass = getattr(otTables, self.tableTag) self.table = tableClass() self.table.fromXML(name, attrs, content, font) self.table.populateDefaults() def ensureDecompiled(self, recurse=True): self.table.ensureDecompiled(recurse=recurse) # https://github.com/fonttools/fonttools/pull/2285#issuecomment-834652928 assert len(struct.pack("i", 0)) == 4 assert array.array("i").itemsize == 4, "Oops, file a bug against fonttools." class OTTableReader(object): """Helper class to retrieve data from an OpenType table.""" __slots__ = ("data", "offset", "pos", "localState", "tableTag") def __init__(self, data, localState=None, offset=0, tableTag=None): self.data = data self.offset = offset self.pos = offset self.localState = localState self.tableTag = tableTag def advance(self, count): self.pos += count def seek(self, pos): self.pos = pos def copy(self): other = self.__class__(self.data, self.localState, self.offset, self.tableTag) other.pos = self.pos return other def getSubReader(self, offset): offset = self.offset + offset return self.__class__(self.data, self.localState, offset, self.tableTag) def readValue(self, typecode, staticSize): pos = self.pos newpos = pos + staticSize (value,) = struct.unpack(f">{typecode}", self.data[pos:newpos]) self.pos = newpos return value def readArray(self, typecode, staticSize, count): pos = self.pos newpos = pos + count * staticSize value = array.array(typecode, self.data[pos:newpos]) if sys.byteorder != "big": value.byteswap() self.pos = newpos return value.tolist() def readInt8(self): return self.readValue("b", staticSize=1) def readInt8Array(self, count): return self.readArray("b", staticSize=1, count=count) def readShort(self): return self.readValue("h", staticSize=2) def readShortArray(self, count): return self.readArray("h", staticSize=2, count=count) def readLong(self): return self.readValue("i", staticSize=4) def readLongArray(self, count): return self.readArray("i", staticSize=4, count=count) def readUInt8(self): return self.readValue("B", staticSize=1) def readUInt8Array(self, count): return self.readArray("B", staticSize=1, count=count) def readUShort(self): return self.readValue("H", staticSize=2) def readUShortArray(self, count): return self.readArray("H", staticSize=2, count=count) def readULong(self): return self.readValue("I", staticSize=4) def readULongArray(self, count): return self.readArray("I", staticSize=4, count=count) def readUInt24(self): pos = self.pos newpos = pos + 3 (value,) = struct.unpack(">l", b"\0" + self.data[pos:newpos]) self.pos = newpos return value def readUInt24Array(self, count): return [self.readUInt24() for _ in range(count)] def readTag(self): pos = self.pos newpos = pos + 4 value = Tag(self.data[pos:newpos]) assert len(value) == 4, value self.pos = newpos return value def readData(self, count): pos = self.pos newpos = pos + count value = self.data[pos:newpos] self.pos = newpos return value def __setitem__(self, name, value): state = self.localState.copy() if self.localState else dict() state[name] = value self.localState = state def __getitem__(self, name): return self.localState and self.localState[name] def __contains__(self, name): return self.localState and name in self.localState class OffsetToWriter(object): def __init__(self, subWriter, offsetSize): self.subWriter = subWriter self.offsetSize = offsetSize def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.subWriter == other.subWriter and self.offsetSize == other.offsetSize def __hash__(self): # only works after self._doneWriting() has been called return hash((self.subWriter, self.offsetSize)) class OTTableWriter(object): """Helper class to gather and assemble data for OpenType tables.""" def __init__(self, localState=None, tableTag=None): self.items = [] self.pos = None self.localState = localState self.tableTag = tableTag self.parent = None def __setitem__(self, name, value): state = self.localState.copy() if self.localState else dict() state[name] = value self.localState = state def __getitem__(self, name): return self.localState[name] def __delitem__(self, name): del self.localState[name] # assembler interface def getDataLength(self): """Return the length of this table in bytes, without subtables.""" l = 0 for item in self.items: if hasattr(item, "getCountData"): l += item.size elif hasattr(item, "subWriter"): l += item.offsetSize else: l = l + len(item) return l def getData(self): """Assemble the data for this writer/table, without subtables.""" items = list(self.items) # make a shallow copy pos = self.pos numItems = len(items) for i in range(numItems): item = items[i] if hasattr(item, "subWriter"): if item.offsetSize == 4: items[i] = packULong(item.subWriter.pos - pos) elif item.offsetSize == 2: try: items[i] = packUShort(item.subWriter.pos - pos) except struct.error: # provide data to fix overflow problem. overflowErrorRecord = self.getOverflowErrorRecord( item.subWriter ) raise OTLOffsetOverflowError(overflowErrorRecord) elif item.offsetSize == 3: items[i] = packUInt24(item.subWriter.pos - pos) else: raise ValueError(item.offsetSize) return bytesjoin(items) def getDataForHarfbuzz(self): """Assemble the data for this writer/table with all offset field set to 0""" items = list(self.items) packFuncs = {2: packUShort, 3: packUInt24, 4: packULong} for i, item in enumerate(items): if hasattr(item, "subWriter"): # Offset value is not needed in harfbuzz repacker, so setting offset to 0 to avoid overflow here if item.offsetSize in packFuncs: items[i] = packFuncs[item.offsetSize](0) else: raise ValueError(item.offsetSize) return bytesjoin(items) def __hash__(self): # only works after self._doneWriting() has been called return hash(self.items) def __ne__(self, other): result = self.__eq__(other) return result if result is NotImplemented else not result def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.items == other.items def _doneWriting(self, internedTables, shareExtension=False): # Convert CountData references to data string items # collapse duplicate table references to a unique entry # "tables" are OTTableWriter objects. # For Extension Lookup types, we can # eliminate duplicates only within the tree under the Extension Lookup, # as offsets may exceed 64K even between Extension LookupTable subtables. isExtension = hasattr(self, "Extension") # Certain versions of Uniscribe reject the font if the GSUB/GPOS top-level # arrays (ScriptList, FeatureList, LookupList) point to the same, possibly # empty, array. So, we don't share those. # See: https://github.com/fonttools/fonttools/issues/518 dontShare = hasattr(self, "DontShare") if isExtension and not shareExtension: internedTables = {} items = self.items for i in range(len(items)): item = items[i] if hasattr(item, "getCountData"): items[i] = item.getCountData() elif hasattr(item, "subWriter"): item.subWriter._doneWriting( internedTables, shareExtension=shareExtension ) # At this point, all subwriters are hashable based on their items. # (See hash and comparison magic methods above.) So the ``setdefault`` # call here will return the first writer object we've seen with # equal content, or store it in the dictionary if it's not been # seen yet. We therefore replace the subwriter object with an equivalent # object, which deduplicates the tree. if not dontShare: items[i].subWriter = internedTables.setdefault( item.subWriter, item.subWriter ) self.items = tuple(items) def _gatherTables(self, tables, extTables, done): # Convert table references in self.items tree to a flat # list of tables in depth-first traversal order. # "tables" are OTTableWriter objects. # We do the traversal in reverse order at each level, in order to # resolve duplicate references to be the last reference in the list of tables. # For extension lookups, duplicate references can be merged only within the # writer tree under the extension lookup. done[id(self)] = True numItems = len(self.items) iRange = list(range(numItems)) iRange.reverse() isExtension = hasattr(self, "Extension") selfTables = tables if isExtension: assert ( extTables is not None ), "Program or XML editing error. Extension subtables cannot contain extensions subtables" tables, extTables, done = extTables, None, {} # add Coverage table if it is sorted last. sortCoverageLast = False if hasattr(self, "sortCoverageLast"): # Find coverage table for i in range(numItems): item = self.items[i] if ( hasattr(item, "subWriter") and getattr(item.subWriter, "name", None) == "Coverage" ): sortCoverageLast = True break if id(item.subWriter) not in done: item.subWriter._gatherTables(tables, extTables, done) else: # We're a new parent of item pass for i in iRange: item = self.items[i] if not hasattr(item, "subWriter"): continue if ( sortCoverageLast and (i == 1) and getattr(item.subWriter, "name", None) == "Coverage" ): # we've already 'gathered' it above continue if id(item.subWriter) not in done: item.subWriter._gatherTables(tables, extTables, done) else: # Item is already written out by other parent pass selfTables.append(self) def _gatherGraphForHarfbuzz(self, tables, obj_list, done, objidx, virtual_edges): real_links = [] virtual_links = [] item_idx = objidx # Merge virtual_links from parent for idx in virtual_edges: virtual_links.append((0, 0, idx)) sortCoverageLast = False coverage_idx = 0 if hasattr(self, "sortCoverageLast"): # Find coverage table for i, item in enumerate(self.items): if getattr(item, "name", None) == "Coverage": sortCoverageLast = True if id(item) not in done: coverage_idx = item_idx = item._gatherGraphForHarfbuzz( tables, obj_list, done, item_idx, virtual_edges ) else: coverage_idx = done[id(item)] virtual_edges.append(coverage_idx) break child_idx = 0 offset_pos = 0 for i, item in enumerate(self.items): if hasattr(item, "subWriter"): pos = offset_pos elif hasattr(item, "getCountData"): offset_pos += item.size continue else: offset_pos = offset_pos + len(item) continue if id(item.subWriter) not in done: child_idx = item_idx = item.subWriter._gatherGraphForHarfbuzz( tables, obj_list, done, item_idx, virtual_edges ) else: child_idx = done[id(item.subWriter)] real_edge = (pos, item.offsetSize, child_idx) real_links.append(real_edge) offset_pos += item.offsetSize tables.append(self) obj_list.append((real_links, virtual_links)) item_idx += 1 done[id(self)] = item_idx if sortCoverageLast: virtual_edges.pop() return item_idx def getAllDataUsingHarfbuzz(self, tableTag): """The Whole table is represented as a Graph. Assemble graph data and call Harfbuzz repacker to pack the table. Harfbuzz repacker is faster and retain as much sub-table sharing as possible, see also: https://github.com/harfbuzz/harfbuzz/blob/main/docs/repacker.md The input format for hb.repack() method is explained here: https://github.com/harfbuzz/uharfbuzz/blob/main/src/uharfbuzz/_harfbuzz.pyx#L1149 """ internedTables = {} self._doneWriting(internedTables, shareExtension=True) tables = [] obj_list = [] done = {} objidx = 0 virtual_edges = [] self._gatherGraphForHarfbuzz(tables, obj_list, done, objidx, virtual_edges) # Gather all data in two passes: the absolute positions of all # subtable are needed before the actual data can be assembled. pos = 0 for table in tables: table.pos = pos pos = pos + table.getDataLength() data = [] for table in tables: tableData = table.getDataForHarfbuzz() data.append(tableData) if hasattr(hb, "repack_with_tag"): return hb.repack_with_tag(str(tableTag), data, obj_list) else: return hb.repack(data, obj_list) def getAllData(self, remove_duplicate=True): """Assemble all data, including all subtables.""" if remove_duplicate: internedTables = {} self._doneWriting(internedTables) tables = [] extTables = [] done = {} self._gatherTables(tables, extTables, done) tables.reverse() extTables.reverse() # Gather all data in two passes: the absolute positions of all # subtable are needed before the actual data can be assembled. pos = 0 for table in tables: table.pos = pos pos = pos + table.getDataLength() for table in extTables: table.pos = pos pos = pos + table.getDataLength() data = [] for table in tables: tableData = table.getData() data.append(tableData) for table in extTables: tableData = table.getData() data.append(tableData) return bytesjoin(data) # interface for gathering data, as used by table.compile() def getSubWriter(self): subwriter = self.__class__(self.localState, self.tableTag) subwriter.parent = ( self # because some subtables have idential values, we discard ) # the duplicates under the getAllData method. Hence some # subtable writers can have more than one parent writer. # But we just care about first one right now. return subwriter def writeValue(self, typecode, value): self.items.append(struct.pack(f">{typecode}", value)) def writeArray(self, typecode, values): a = array.array(typecode, values) if sys.byteorder != "big": a.byteswap() self.items.append(a.tobytes()) def writeInt8(self, value): assert -128 <= value < 128, value self.items.append(struct.pack(">b", value)) def writeInt8Array(self, values): self.writeArray("b", values) def writeShort(self, value): assert -32768 <= value < 32768, value self.items.append(struct.pack(">h", value)) def writeShortArray(self, values): self.writeArray("h", values) def writeLong(self, value): self.items.append(struct.pack(">i", value)) def writeLongArray(self, values): self.writeArray("i", values) def writeUInt8(self, value): assert 0 <= value < 256, value self.items.append(struct.pack(">B", value)) def writeUInt8Array(self, values): self.writeArray("B", values) def writeUShort(self, value): assert 0 <= value < 0x10000, value self.items.append(struct.pack(">H", value)) def writeUShortArray(self, values): self.writeArray("H", values) def writeULong(self, value): self.items.append(struct.pack(">I", value)) def writeULongArray(self, values): self.writeArray("I", values) def writeUInt24(self, value): assert 0 <= value < 0x1000000, value b = struct.pack(">L", value) self.items.append(b[1:]) def writeUInt24Array(self, values): for value in values: self.writeUInt24(value) def writeTag(self, tag): tag = Tag(tag).tobytes() assert len(tag) == 4, tag self.items.append(tag) def writeSubTable(self, subWriter, offsetSize): self.items.append(OffsetToWriter(subWriter, offsetSize)) def writeCountReference(self, table, name, size=2, value=None): ref = CountReference(table, name, size=size, value=value) self.items.append(ref) return ref def writeStruct(self, format, values): data = struct.pack(*(format,) + values) self.items.append(data) def writeData(self, data): self.items.append(data) def getOverflowErrorRecord(self, item): LookupListIndex = SubTableIndex = itemName = itemIndex = None if self.name == "LookupList": LookupListIndex = item.repeatIndex elif self.name == "Lookup": LookupListIndex = self.repeatIndex SubTableIndex = item.repeatIndex else: itemName = getattr(item, "name", "<none>") if hasattr(item, "repeatIndex"): itemIndex = item.repeatIndex if self.name == "SubTable": LookupListIndex = self.parent.repeatIndex SubTableIndex = self.repeatIndex elif self.name == "ExtSubTable": LookupListIndex = self.parent.parent.repeatIndex SubTableIndex = self.parent.repeatIndex else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. itemName = ".".join([self.name, itemName]) p1 = self.parent while p1 and p1.name not in ["ExtSubTable", "SubTable"]: itemName = ".".join([p1.name, itemName]) p1 = p1.parent if p1: if p1.name == "ExtSubTable": LookupListIndex = p1.parent.parent.repeatIndex SubTableIndex = p1.parent.repeatIndex else: LookupListIndex = p1.parent.repeatIndex SubTableIndex = p1.repeatIndex return OverflowErrorRecord( (self.tableTag, LookupListIndex, SubTableIndex, itemName, itemIndex) ) class CountReference(object): """A reference to a Count value, not a count of references.""" def __init__(self, table, name, size=None, value=None): self.table = table self.name = name self.size = size if value is not None: self.setValue(value) def setValue(self, value): table = self.table name = self.name if table[name] is None: table[name] = value else: assert table[name] == value, (name, table[name], value) def getValue(self): return self.table[self.name] def getCountData(self): v = self.table[self.name] if v is None: v = 0 return {1: packUInt8, 2: packUShort, 4: packULong}[self.size](v) def packUInt8(value): return struct.pack(">B", value) def packUShort(value): return struct.pack(">H", value) def packULong(value): assert 0 <= value < 0x100000000, value return struct.pack(">I", value) def packUInt24(value): assert 0 <= value < 0x1000000, value return struct.pack(">I", value)[1:] class BaseTable(object): """Generic base class for all OpenType (sub)tables.""" def __getattr__(self, attr): reader = self.__dict__.get("reader") if reader: del self.reader font = self.font del self.font self.decompile(reader, font) return getattr(self, attr) raise AttributeError(attr) def ensureDecompiled(self, recurse=False): reader = self.__dict__.get("reader") if reader: del self.reader font = self.font del self.font self.decompile(reader, font) if recurse: for subtable in self.iterSubTables(): subtable.value.ensureDecompiled(recurse) def __getstate__(self): # before copying/pickling 'lazy' objects, make a shallow copy of OTTableReader # https://github.com/fonttools/fonttools/issues/2965 if "reader" in self.__dict__: state = self.__dict__.copy() state["reader"] = self.__dict__["reader"].copy() return state return self.__dict__ @classmethod def getRecordSize(cls, reader): totalSize = 0 for conv in cls.converters: size = conv.getRecordSize(reader) if size is NotImplemented: return NotImplemented countValue = 1 if conv.repeat: if conv.repeat in reader: countValue = reader[conv.repeat] + conv.aux else: return NotImplemented totalSize += size * countValue return totalSize def getConverters(self): return self.converters def getConverterByName(self, name): return self.convertersByName[name] def populateDefaults(self, propagator=None): for conv in self.getConverters(): if conv.repeat: if not hasattr(self, conv.name): setattr(self, conv.name, []) countValue = len(getattr(self, conv.name)) - conv.aux try: count_conv = self.getConverterByName(conv.repeat) setattr(self, conv.repeat, countValue) except KeyError: # conv.repeat is a propagated count if propagator and conv.repeat in propagator: propagator[conv.repeat].setValue(countValue) else: if conv.aux and not eval(conv.aux, None, self.__dict__): continue if hasattr(self, conv.name): continue # Warn if it should NOT be present?! if hasattr(conv, "writeNullOffset"): setattr(self, conv.name, None) # Warn? # elif not conv.isCount: # # Warn? # pass if hasattr(conv, "DEFAULT"): # OptionalValue converters (e.g. VarIndex) setattr(self, conv.name, conv.DEFAULT) def decompile(self, reader, font): self.readFormat(reader) table = {} self.__rawTable = table # for debugging for conv in self.getConverters(): if conv.name == "SubTable": conv = conv.getConverter(reader.tableTag, table["LookupType"]) if conv.name == "ExtSubTable": conv = conv.getConverter(reader.tableTag, table["ExtensionLookupType"]) if conv.name == "FeatureParams": conv = conv.getConverter(reader["FeatureTag"]) if conv.name == "SubStruct": conv = conv.getConverter(reader.tableTag, table["MorphType"]) try: if conv.repeat: if isinstance(conv.repeat, int): countValue = conv.repeat elif conv.repeat in table: countValue = table[conv.repeat] else: # conv.repeat is a propagated count countValue = reader[conv.repeat] countValue += conv.aux table[conv.name] = conv.readArray(reader, font, table, countValue) else: if conv.aux and not eval(conv.aux, None, table): continue table[conv.name] = conv.read(reader, font, table) if conv.isPropagated: reader[conv.name] = table[conv.name] except Exception as e: name = conv.name e.args = e.args + (name,) raise if hasattr(self, "postRead"): self.postRead(table, font) else: self.__dict__.update(table) del self.__rawTable # succeeded, get rid of debugging info def compile(self, writer, font): self.ensureDecompiled() # TODO Following hack to be removed by rewriting how FormatSwitching tables # are handled. # https://github.com/fonttools/fonttools/pull/2238#issuecomment-805192631 if hasattr(self, "preWrite"): deleteFormat = not hasattr(self, "Format") table = self.preWrite(font) deleteFormat = deleteFormat and hasattr(self, "Format") else: deleteFormat = False table = self.__dict__.copy() # some count references may have been initialized in a custom preWrite; we set # these in the writer's state beforehand (instead of sequentially) so they will # be propagated to all nested subtables even if the count appears in the current # table only *after* the offset to the subtable that it is counting. for conv in self.getConverters(): if conv.isCount and conv.isPropagated: value = table.get(conv.name) if isinstance(value, CountReference): writer[conv.name] = value if hasattr(self, "sortCoverageLast"): writer.sortCoverageLast = 1 if hasattr(self, "DontShare"): writer.DontShare = True if hasattr(self.__class__, "LookupType"): writer["LookupType"].setValue(self.__class__.LookupType) self.writeFormat(writer) for conv in self.getConverters(): value = table.get( conv.name ) # TODO Handle defaults instead of defaulting to None! if conv.repeat: if value is None: value = [] countValue = len(value) - conv.aux if isinstance(conv.repeat, int): assert len(value) == conv.repeat, "expected %d values, got %d" % ( conv.repeat, len(value), ) elif conv.repeat in table: CountReference(table, conv.repeat, value=countValue) else: # conv.repeat is a propagated count writer[conv.repeat].setValue(countValue) try: conv.writeArray(writer, font, table, value) except Exception as e: e.args = e.args + (conv.name + "[]",) raise elif conv.isCount: # Special-case Count values. # Assumption: a Count field will *always* precede # the actual array(s). # We need a default value, as it may be set later by a nested # table. We will later store it here. # We add a reference: by the time the data is assembled # the Count value will be filled in. # We ignore the current count value since it will be recomputed, # unless it's a CountReference that was already initialized in a custom preWrite. if isinstance(value, CountReference): ref = value ref.size = conv.staticSize writer.writeData(ref) table[conv.name] = ref.getValue() else: ref = writer.writeCountReference(table, conv.name, conv.staticSize) table[conv.name] = None if conv.isPropagated: writer[conv.name] = ref elif conv.isLookupType: # We make sure that subtables have the same lookup type, # and that the type is the same as the one set on the # Lookup object, if any is set. if conv.name not in table: table[conv.name] = None ref = writer.writeCountReference( table, conv.name, conv.staticSize, table[conv.name] ) writer["LookupType"] = ref else: if conv.aux and not eval(conv.aux, None, table): continue try: conv.write(writer, font, table, value) except Exception as e: name = value.__class__.__name__ if value is not None else conv.name e.args = e.args + (name,) raise if conv.isPropagated: writer[conv.name] = value if deleteFormat: del self.Format def readFormat(self, reader): pass def writeFormat(self, writer): pass def toXML(self, xmlWriter, font, attrs=None, name=None): tableName = name if name else self.__class__.__name__ if attrs is None: attrs = [] if hasattr(self, "Format"): attrs = attrs + [("Format", self.Format)] xmlWriter.begintag(tableName, attrs) xmlWriter.newline() self.toXML2(xmlWriter, font) xmlWriter.endtag(tableName) xmlWriter.newline() def toXML2(self, xmlWriter, font): # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB). # This is because in TTX our parent writes our main tag, and in otBase.py we # do it ourselves. I think I'm getting schizophrenic... for conv in self.getConverters(): if conv.repeat: value = getattr(self, conv.name, []) for i in range(len(value)): item = value[i] conv.xmlWrite(xmlWriter, font, item, conv.name, [("index", i)]) else: if conv.aux and not eval(conv.aux, None, vars(self)): continue value = getattr( self, conv.name, None ) # TODO Handle defaults instead of defaulting to None! conv.xmlWrite(xmlWriter, font, value, conv.name, []) def fromXML(self, name, attrs, content, font): try: conv = self.getConverterByName(name) except KeyError: raise # XXX on KeyError, raise nice error value = conv.xmlRead(attrs, content, font) if conv.repeat: seq = getattr(self, conv.name, None) if seq is None: seq = [] setattr(self, conv.name, seq) seq.append(value) else: setattr(self, conv.name, value) def __ne__(self, other): result = self.__eq__(other) return result if result is NotImplemented else not result def __eq__(self, other): if type(self) != type(other): return NotImplemented self.ensureDecompiled() other.ensureDecompiled() return self.__dict__ == other.__dict__ class SubTableEntry(NamedTuple): """See BaseTable.iterSubTables()""" name: str value: "BaseTable" index: Optional[int] = None # index into given array, None for single values def iterSubTables(self) -> Iterator[SubTableEntry]: """Yield (name, value, index) namedtuples for all subtables of current table. A sub-table is an instance of BaseTable (or subclass thereof) that is a child of self, the current parent table. The tuples also contain the attribute name (str) of the of parent table to get a subtable, and optionally, for lists of subtables (i.e. attributes associated with a converter that has a 'repeat'), an index into the list containing the given subtable value. This method can be useful to traverse trees of otTables. """ for conv in self.getConverters(): name = conv.name value = getattr(self, name, None) if value is None: continue if isinstance(value, BaseTable): yield self.SubTableEntry(name, value) elif isinstance(value, list): yield from ( self.SubTableEntry(name, v, index=i) for i, v in enumerate(value) if isinstance(v, BaseTable) ) # instance (not @class)method for consistency with FormatSwitchingBaseTable def getVariableAttrs(self): return getVariableAttrs(self.__class__) class FormatSwitchingBaseTable(BaseTable): """Minor specialization of BaseTable, for tables that have multiple formats, eg. CoverageFormat1 vs. CoverageFormat2.""" @classmethod def getRecordSize(cls, reader): return NotImplemented def getConverters(self): try: fmt = self.Format except AttributeError: # some FormatSwitchingBaseTables (e.g. Coverage) no longer have 'Format' # attribute after fully decompiled, only gain one in preWrite before being # recompiled. In the decompiled state, these hand-coded classes defined in # otTables.py lose their format-specific nature and gain more high-level # attributes that are not tied to converters. return [] return self.converters.get(self.Format, []) def getConverterByName(self, name): return self.convertersByName[self.Format][name] def readFormat(self, reader): self.Format = reader.readUShort() def writeFormat(self, writer): writer.writeUShort(self.Format) def toXML(self, xmlWriter, font, attrs=None, name=None): BaseTable.toXML(self, xmlWriter, font, attrs, name) def getVariableAttrs(self): return getVariableAttrs(self.__class__, self.Format) class UInt8FormatSwitchingBaseTable(FormatSwitchingBaseTable): def readFormat(self, reader): self.Format = reader.readUInt8() def writeFormat(self, writer): writer.writeUInt8(self.Format) formatSwitchingBaseTables = { "uint16": FormatSwitchingBaseTable, "uint8": UInt8FormatSwitchingBaseTable, } def getFormatSwitchingBaseTableClass(formatType): try: return formatSwitchingBaseTables[formatType] except KeyError: raise TypeError(f"Unsupported format type: {formatType!r}") # memoize since these are parsed from otData.py, thus stay constant @lru_cache() def getVariableAttrs(cls: BaseTable, fmt: Optional[int] = None) -> Tuple[str]: """Return sequence of variable table field names (can be empty). Attributes are deemed "variable" when their otData.py's description contain 'VarIndexBase + {offset}', e.g. COLRv1 PaintVar* tables. """ if not issubclass(cls, BaseTable): raise TypeError(cls) if issubclass(cls, FormatSwitchingBaseTable): if fmt is None: raise TypeError(f"'fmt' is required for format-switching {cls.__name__}") converters = cls.convertersByName[fmt] else: converters = cls.convertersByName # assume if no 'VarIndexBase' field is present, table has no variable fields if "VarIndexBase" not in converters: return () varAttrs = {} for name, conv in converters.items(): offset = conv.getVarIndexOffset() if offset is not None: varAttrs[name] = offset return tuple(sorted(varAttrs, key=varAttrs.__getitem__)) # # Support for ValueRecords # # This data type is so different from all other OpenType data types that # it requires quite a bit of code for itself. It even has special support # in OTTableReader and OTTableWriter... # valueRecordFormat = [ # Mask Name isDevice signed (0x0001, "XPlacement", 0, 1), (0x0002, "YPlacement", 0, 1), (0x0004, "XAdvance", 0, 1), (0x0008, "YAdvance", 0, 1), (0x0010, "XPlaDevice", 1, 0), (0x0020, "YPlaDevice", 1, 0), (0x0040, "XAdvDevice", 1, 0), (0x0080, "YAdvDevice", 1, 0), # reserved: (0x0100, "Reserved1", 0, 0), (0x0200, "Reserved2", 0, 0), (0x0400, "Reserved3", 0, 0), (0x0800, "Reserved4", 0, 0), (0x1000, "Reserved5", 0, 0), (0x2000, "Reserved6", 0, 0), (0x4000, "Reserved7", 0, 0), (0x8000, "Reserved8", 0, 0), ] def _buildDict(): d = {} for mask, name, isDevice, signed in valueRecordFormat: d[name] = mask, isDevice, signed return d valueRecordFormatDict = _buildDict() class ValueRecordFactory(object): """Given a format code, this object convert ValueRecords.""" def __init__(self, valueFormat): format = [] for mask, name, isDevice, signed in valueRecordFormat: if valueFormat & mask: format.append((name, isDevice, signed)) self.format = format def __len__(self): return len(self.format) def readValueRecord(self, reader, font): format = self.format if not format: return None valueRecord = ValueRecord() for name, isDevice, signed in format: if signed: value = reader.readShort() else: value = reader.readUShort() if isDevice: if value: from . import otTables subReader = reader.getSubReader(value) value = getattr(otTables, name)() value.decompile(subReader, font) else: value = None setattr(valueRecord, name, value) return valueRecord def writeValueRecord(self, writer, font, valueRecord): for name, isDevice, signed in self.format: value = getattr(valueRecord, name, 0) if isDevice: if value: subWriter = writer.getSubWriter() writer.writeSubTable(subWriter, offsetSize=2) value.compile(subWriter, font) else: writer.writeUShort(0) elif signed: writer.writeShort(value) else: writer.writeUShort(value) class ValueRecord(object): # see ValueRecordFactory def __init__(self, valueFormat=None, src=None): if valueFormat is not None: for mask, name, isDevice, signed in valueRecordFormat: if valueFormat & mask: setattr(self, name, None if isDevice else 0) if src is not None: for key, val in src.__dict__.items(): if not hasattr(self, key): continue setattr(self, key, val) elif src is not None: self.__dict__ = src.__dict__.copy() def getFormat(self): format = 0 for name in self.__dict__.keys(): format = format | valueRecordFormatDict[name][0] return format def getEffectiveFormat(self): format = 0 for name, value in self.__dict__.items(): if value: format = format | valueRecordFormatDict[name][0] return format def toXML(self, xmlWriter, font, valueName, attrs=None): if attrs is None: simpleItems = [] else: simpleItems = list(attrs) for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values if hasattr(self, name): simpleItems.append((name, getattr(self, name))) deviceItems = [] for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records if hasattr(self, name): device = getattr(self, name) if device is not None: deviceItems.append((name, device)) if deviceItems: xmlWriter.begintag(valueName, simpleItems) xmlWriter.newline() for name, deviceRecord in deviceItems: if deviceRecord is not None: deviceRecord.toXML(xmlWriter, font, name=name) xmlWriter.endtag(valueName) xmlWriter.newline() else: xmlWriter.simpletag(valueName, simpleItems) xmlWriter.newline() def fromXML(self, name, attrs, content, font): from . import otTables for k, v in attrs.items(): setattr(self, k, int(v)) for element in content: if not isinstance(element, tuple): continue name, attrs, content = element value = getattr(otTables, name)() for elem2 in content: if not isinstance(elem2, tuple): continue name2, attrs2, content2 = elem2 value.fromXML(name2, attrs2, content2, font) setattr(self, name, value) def __ne__(self, other): result = self.__eq__(other) return result if result is NotImplemented else not result def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.__dict__ == other.__dict__ PKaZZZN`��&fontTools/ttLib/tables/otConverters.pyfrom fontTools.misc.fixedTools import ( fixedToFloat as fi2fl, floatToFixed as fl2fi, floatToFixedToStr as fl2str, strToFixedToFloat as str2fl, ensureVersionIsLong as fi2ve, versionToFixed as ve2fi, ) from fontTools.misc.roundTools import nearestMultipleShortestRepr, otRound from fontTools.misc.textTools import bytesjoin, tobytes, tostr, pad, safeEval from fontTools.ttLib import getSearchRange from .otBase import ( CountReference, FormatSwitchingBaseTable, OTTableReader, OTTableWriter, ValueRecordFactory, ) from .otTables import ( lookupTypes, AATStateTable, AATState, AATAction, ContextualMorphAction, LigatureMorphAction, InsertionMorphAction, MorxSubtable, ExtendMode as _ExtendMode, CompositeMode as _CompositeMode, NO_VARIATION_INDEX, ) from itertools import zip_longest from functools import partial import re import struct from typing import Optional import logging log = logging.getLogger(__name__) istuple = lambda t: isinstance(t, tuple) def buildConverters(tableSpec, tableNamespace): """Given a table spec from otData.py, build a converter object for each field of the table. This is called for each table in otData.py, and the results are assigned to the corresponding class in otTables.py.""" converters = [] convertersByName = {} for tp, name, repeat, aux, descr in tableSpec: tableName = name if name.startswith("ValueFormat"): assert tp == "uint16" converterClass = ValueFormat elif name.endswith("Count") or name in ("StructLength", "MorphType"): converterClass = { "uint8": ComputedUInt8, "uint16": ComputedUShort, "uint32": ComputedULong, }[tp] elif name == "SubTable": converterClass = SubTable elif name == "ExtSubTable": converterClass = ExtSubTable elif name == "SubStruct": converterClass = SubStruct elif name == "FeatureParams": converterClass = FeatureParams elif name in ("CIDGlyphMapping", "GlyphCIDMapping"): converterClass = StructWithLength else: if not tp in converterMapping and "(" not in tp: tableName = tp converterClass = Struct else: converterClass = eval(tp, tableNamespace, converterMapping) conv = converterClass(name, repeat, aux, description=descr) if conv.tableClass: # A "template" such as OffsetTo(AType) knowss the table class already tableClass = conv.tableClass elif tp in ("MortChain", "MortSubtable", "MorxChain"): tableClass = tableNamespace.get(tp) else: tableClass = tableNamespace.get(tableName) if not conv.tableClass: conv.tableClass = tableClass if name in ["SubTable", "ExtSubTable", "SubStruct"]: conv.lookupTypes = tableNamespace["lookupTypes"] # also create reverse mapping for t in conv.lookupTypes.values(): for cls in t.values(): convertersByName[cls.__name__] = Table(name, repeat, aux, cls) if name == "FeatureParams": conv.featureParamTypes = tableNamespace["featureParamTypes"] conv.defaultFeatureParams = tableNamespace["FeatureParams"] for cls in conv.featureParamTypes.values(): convertersByName[cls.__name__] = Table(name, repeat, aux, cls) converters.append(conv) assert name not in convertersByName, name convertersByName[name] = conv return converters, convertersByName class _MissingItem(tuple): __slots__ = () try: from collections import UserList except ImportError: from UserList import UserList class _LazyList(UserList): def __getslice__(self, i, j): return self.__getitem__(slice(i, j)) def __getitem__(self, k): if isinstance(k, slice): indices = range(*k.indices(len(self))) return [self[i] for i in indices] item = self.data[k] if isinstance(item, _MissingItem): self.reader.seek(self.pos + item[0] * self.recordSize) item = self.conv.read(self.reader, self.font, {}) self.data[k] = item return item def __add__(self, other): if isinstance(other, _LazyList): other = list(other) elif isinstance(other, list): pass else: return NotImplemented return list(self) + other def __radd__(self, other): if not isinstance(other, list): return NotImplemented return other + list(self) class BaseConverter(object): """Base class for converter objects. Apart from the constructor, this is an abstract class.""" def __init__(self, name, repeat, aux, tableClass=None, *, description=""): self.name = name self.repeat = repeat self.aux = aux if self.aux and not self.repeat: self.aux = compile(self.aux, "<string>", "eval") self.tableClass = tableClass self.isCount = name.endswith("Count") or name in [ "DesignAxisRecordSize", "ValueRecordSize", ] self.isLookupType = name.endswith("LookupType") or name == "MorphType" self.isPropagated = name in [ "ClassCount", "Class2Count", "FeatureTag", "SettingsCount", "VarRegionCount", "MappingCount", "RegionAxisCount", "DesignAxisCount", "DesignAxisRecordSize", "AxisValueCount", "ValueRecordSize", "AxisCount", "BaseGlyphRecordCount", "LayerRecordCount", ] self.description = description def readArray(self, reader, font, tableDict, count): """Read an array of values from the reader.""" lazy = font.lazy and count > 8 if lazy: recordSize = self.getRecordSize(reader) if recordSize is NotImplemented: lazy = False if not lazy: l = [] for i in range(count): l.append(self.read(reader, font, tableDict)) return l else: l = _LazyList() l.reader = reader.copy() l.pos = l.reader.pos l.font = font l.conv = self l.recordSize = recordSize l.extend(_MissingItem([i]) for i in range(count)) reader.advance(count * recordSize) return l def getRecordSize(self, reader): if hasattr(self, "staticSize"): return self.staticSize return NotImplemented def read(self, reader, font, tableDict): """Read a value from the reader.""" raise NotImplementedError(self) def writeArray(self, writer, font, tableDict, values): try: for i, value in enumerate(values): self.write(writer, font, tableDict, value, i) except Exception as e: e.args = e.args + (i,) raise def write(self, writer, font, tableDict, value, repeatIndex=None): """Write a value to the writer.""" raise NotImplementedError(self) def xmlRead(self, attrs, content, font): """Read a value from XML.""" raise NotImplementedError(self) def xmlWrite(self, xmlWriter, font, value, name, attrs): """Write a value to XML.""" raise NotImplementedError(self) varIndexBasePlusOffsetRE = re.compile(r"VarIndexBase\s*\+\s*(\d+)") def getVarIndexOffset(self) -> Optional[int]: """If description has `VarIndexBase + {offset}`, return the offset else None.""" m = self.varIndexBasePlusOffsetRE.search(self.description) if not m: return None return int(m.group(1)) class SimpleValue(BaseConverter): @staticmethod def toString(value): return value @staticmethod def fromString(value): return value def xmlWrite(self, xmlWriter, font, value, name, attrs): xmlWriter.simpletag(name, attrs + [("value", self.toString(value))]) xmlWriter.newline() def xmlRead(self, attrs, content, font): return self.fromString(attrs["value"]) class OptionalValue(SimpleValue): DEFAULT = None def xmlWrite(self, xmlWriter, font, value, name, attrs): if value != self.DEFAULT: attrs.append(("value", self.toString(value))) xmlWriter.simpletag(name, attrs) xmlWriter.newline() def xmlRead(self, attrs, content, font): if "value" in attrs: return self.fromString(attrs["value"]) return self.DEFAULT class IntValue(SimpleValue): @staticmethod def fromString(value): return int(value, 0) class Long(IntValue): staticSize = 4 def read(self, reader, font, tableDict): return reader.readLong() def readArray(self, reader, font, tableDict, count): return reader.readLongArray(count) def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeLong(value) def writeArray(self, writer, font, tableDict, values): writer.writeLongArray(values) class ULong(IntValue): staticSize = 4 def read(self, reader, font, tableDict): return reader.readULong() def readArray(self, reader, font, tableDict, count): return reader.readULongArray(count) def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeULong(value) def writeArray(self, writer, font, tableDict, values): writer.writeULongArray(values) class Flags32(ULong): @staticmethod def toString(value): return "0x%08X" % value class VarIndex(OptionalValue, ULong): DEFAULT = NO_VARIATION_INDEX class Short(IntValue): staticSize = 2 def read(self, reader, font, tableDict): return reader.readShort() def readArray(self, reader, font, tableDict, count): return reader.readShortArray(count) def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeShort(value) def writeArray(self, writer, font, tableDict, values): writer.writeShortArray(values) class UShort(IntValue): staticSize = 2 def read(self, reader, font, tableDict): return reader.readUShort() def readArray(self, reader, font, tableDict, count): return reader.readUShortArray(count) def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeUShort(value) def writeArray(self, writer, font, tableDict, values): writer.writeUShortArray(values) class Int8(IntValue): staticSize = 1 def read(self, reader, font, tableDict): return reader.readInt8() def readArray(self, reader, font, tableDict, count): return reader.readInt8Array(count) def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeInt8(value) def writeArray(self, writer, font, tableDict, values): writer.writeInt8Array(values) class UInt8(IntValue): staticSize = 1 def read(self, reader, font, tableDict): return reader.readUInt8() def readArray(self, reader, font, tableDict, count): return reader.readUInt8Array(count) def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeUInt8(value) def writeArray(self, writer, font, tableDict, values): writer.writeUInt8Array(values) class UInt24(IntValue): staticSize = 3 def read(self, reader, font, tableDict): return reader.readUInt24() def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeUInt24(value) class ComputedInt(IntValue): def xmlWrite(self, xmlWriter, font, value, name, attrs): if value is not None: xmlWriter.comment("%s=%s" % (name, value)) xmlWriter.newline() class ComputedUInt8(ComputedInt, UInt8): pass class ComputedUShort(ComputedInt, UShort): pass class ComputedULong(ComputedInt, ULong): pass class Tag(SimpleValue): staticSize = 4 def read(self, reader, font, tableDict): return reader.readTag() def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeTag(value) class GlyphID(SimpleValue): staticSize = 2 typecode = "H" def readArray(self, reader, font, tableDict, count): return font.getGlyphNameMany( reader.readArray(self.typecode, self.staticSize, count) ) def read(self, reader, font, tableDict): return font.getGlyphName(reader.readValue(self.typecode, self.staticSize)) def writeArray(self, writer, font, tableDict, values): writer.writeArray(self.typecode, font.getGlyphIDMany(values)) def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeValue(self.typecode, font.getGlyphID(value)) class GlyphID32(GlyphID): staticSize = 4 typecode = "L" class NameID(UShort): def xmlWrite(self, xmlWriter, font, value, name, attrs): xmlWriter.simpletag(name, attrs + [("value", value)]) if font and value: nameTable = font.get("name") if nameTable: name = nameTable.getDebugName(value) xmlWriter.write(" ") if name: xmlWriter.comment(name) else: xmlWriter.comment("missing from name table") log.warning("name id %d missing from name table" % value) xmlWriter.newline() class STATFlags(UShort): def xmlWrite(self, xmlWriter, font, value, name, attrs): xmlWriter.simpletag(name, attrs + [("value", value)]) flags = [] if value & 0x01: flags.append("OlderSiblingFontAttribute") if value & 0x02: flags.append("ElidableAxisValueName") if flags: xmlWriter.write(" ") xmlWriter.comment(" ".join(flags)) xmlWriter.newline() class FloatValue(SimpleValue): @staticmethod def fromString(value): return float(value) class DeciPoints(FloatValue): staticSize = 2 def read(self, reader, font, tableDict): return reader.readUShort() / 10 def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeUShort(round(value * 10)) class BaseFixedValue(FloatValue): staticSize = NotImplemented precisionBits = NotImplemented readerMethod = NotImplemented writerMethod = NotImplemented def read(self, reader, font, tableDict): return self.fromInt(getattr(reader, self.readerMethod)()) def write(self, writer, font, tableDict, value, repeatIndex=None): getattr(writer, self.writerMethod)(self.toInt(value)) @classmethod def fromInt(cls, value): return fi2fl(value, cls.precisionBits) @classmethod def toInt(cls, value): return fl2fi(value, cls.precisionBits) @classmethod def fromString(cls, value): return str2fl(value, cls.precisionBits) @classmethod def toString(cls, value): return fl2str(value, cls.precisionBits) class Fixed(BaseFixedValue): staticSize = 4 precisionBits = 16 readerMethod = "readLong" writerMethod = "writeLong" class F2Dot14(BaseFixedValue): staticSize = 2 precisionBits = 14 readerMethod = "readShort" writerMethod = "writeShort" class Angle(F2Dot14): # angles are specified in degrees, and encoded as F2Dot14 fractions of half # circle: e.g. 1.0 => 180, -0.5 => -90, -2.0 => -360, etc. bias = 0.0 factor = 1.0 / (1 << 14) * 180 # 0.010986328125 @classmethod def fromInt(cls, value): return (super().fromInt(value) + cls.bias) * 180 @classmethod def toInt(cls, value): return super().toInt((value / 180) - cls.bias) @classmethod def fromString(cls, value): # quantize to nearest multiples of minimum fixed-precision angle return otRound(float(value) / cls.factor) * cls.factor @classmethod def toString(cls, value): return nearestMultipleShortestRepr(value, cls.factor) class BiasedAngle(Angle): # A bias of 1.0 is used in the representation of start and end angles # of COLRv1 PaintSweepGradients to allow for encoding +360deg bias = 1.0 class Version(SimpleValue): staticSize = 4 def read(self, reader, font, tableDict): value = reader.readLong() return value def write(self, writer, font, tableDict, value, repeatIndex=None): value = fi2ve(value) writer.writeLong(value) @staticmethod def fromString(value): return ve2fi(value) @staticmethod def toString(value): return "0x%08x" % value @staticmethod def fromFloat(v): return fl2fi(v, 16) class Char64(SimpleValue): """An ASCII string with up to 64 characters. Unused character positions are filled with 0x00 bytes. Used in Apple AAT fonts in the `gcid` table. """ staticSize = 64 def read(self, reader, font, tableDict): data = reader.readData(self.staticSize) zeroPos = data.find(b"\0") if zeroPos >= 0: data = data[:zeroPos] s = tostr(data, encoding="ascii", errors="replace") if s != tostr(data, encoding="ascii", errors="ignore"): log.warning('replaced non-ASCII characters in "%s"' % s) return s def write(self, writer, font, tableDict, value, repeatIndex=None): data = tobytes(value, encoding="ascii", errors="replace") if data != tobytes(value, encoding="ascii", errors="ignore"): log.warning('replacing non-ASCII characters in "%s"' % value) if len(data) > self.staticSize: log.warning( 'truncating overlong "%s" to %d bytes' % (value, self.staticSize) ) data = (data + b"\0" * self.staticSize)[: self.staticSize] writer.writeData(data) class Struct(BaseConverter): def getRecordSize(self, reader): return self.tableClass and self.tableClass.getRecordSize(reader) def read(self, reader, font, tableDict): table = self.tableClass() table.decompile(reader, font) return table def write(self, writer, font, tableDict, value, repeatIndex=None): value.compile(writer, font) def xmlWrite(self, xmlWriter, font, value, name, attrs): if value is None: if attrs: # If there are attributes (probably index), then # don't drop this even if it's NULL. It will mess # up the array indices of the containing element. xmlWriter.simpletag(name, attrs + [("empty", 1)]) xmlWriter.newline() else: pass # NULL table, ignore else: value.toXML(xmlWriter, font, attrs, name=name) def xmlRead(self, attrs, content, font): if "empty" in attrs and safeEval(attrs["empty"]): return None table = self.tableClass() Format = attrs.get("Format") if Format is not None: table.Format = int(Format) noPostRead = not hasattr(table, "postRead") if noPostRead: # TODO Cache table.hasPropagated. cleanPropagation = False for conv in table.getConverters(): if conv.isPropagated: cleanPropagation = True if not hasattr(font, "_propagator"): font._propagator = {} propagator = font._propagator assert conv.name not in propagator, (conv.name, propagator) setattr(table, conv.name, None) propagator[conv.name] = CountReference(table.__dict__, conv.name) for element in content: if isinstance(element, tuple): name, attrs, content = element table.fromXML(name, attrs, content, font) else: pass table.populateDefaults(propagator=getattr(font, "_propagator", None)) if noPostRead: if cleanPropagation: for conv in table.getConverters(): if conv.isPropagated: propagator = font._propagator del propagator[conv.name] if not propagator: del font._propagator return table def __repr__(self): return "Struct of " + repr(self.tableClass) class StructWithLength(Struct): def read(self, reader, font, tableDict): pos = reader.pos table = self.tableClass() table.decompile(reader, font) reader.seek(pos + table.StructLength) return table def write(self, writer, font, tableDict, value, repeatIndex=None): for convIndex, conv in enumerate(value.getConverters()): if conv.name == "StructLength": break lengthIndex = len(writer.items) + convIndex if isinstance(value, FormatSwitchingBaseTable): lengthIndex += 1 # implicit Format field deadbeef = {1: 0xDE, 2: 0xDEAD, 4: 0xDEADBEEF}[conv.staticSize] before = writer.getDataLength() value.StructLength = deadbeef value.compile(writer, font) length = writer.getDataLength() - before lengthWriter = writer.getSubWriter() conv.write(lengthWriter, font, tableDict, length) assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef"[: conv.staticSize] writer.items[lengthIndex] = lengthWriter.getAllData() class Table(Struct): staticSize = 2 def readOffset(self, reader): return reader.readUShort() def writeNullOffset(self, writer): writer.writeUShort(0) def read(self, reader, font, tableDict): offset = self.readOffset(reader) if offset == 0: return None table = self.tableClass() reader = reader.getSubReader(offset) if font.lazy: table.reader = reader table.font = font else: table.decompile(reader, font) return table def write(self, writer, font, tableDict, value, repeatIndex=None): if value is None: self.writeNullOffset(writer) else: subWriter = writer.getSubWriter() subWriter.name = self.name if repeatIndex is not None: subWriter.repeatIndex = repeatIndex writer.writeSubTable(subWriter, offsetSize=self.staticSize) value.compile(subWriter, font) class LTable(Table): staticSize = 4 def readOffset(self, reader): return reader.readULong() def writeNullOffset(self, writer): writer.writeULong(0) # Table pointed to by a 24-bit, 3-byte long offset class Table24(Table): staticSize = 3 def readOffset(self, reader): return reader.readUInt24() def writeNullOffset(self, writer): writer.writeUInt24(0) # TODO Clean / merge the SubTable and SubStruct class SubStruct(Struct): def getConverter(self, tableType, lookupType): tableClass = self.lookupTypes[tableType][lookupType] return self.__class__(self.name, self.repeat, self.aux, tableClass) def xmlWrite(self, xmlWriter, font, value, name, attrs): super(SubStruct, self).xmlWrite(xmlWriter, font, value, None, attrs) class SubTable(Table): def getConverter(self, tableType, lookupType): tableClass = self.lookupTypes[tableType][lookupType] return self.__class__(self.name, self.repeat, self.aux, tableClass) def xmlWrite(self, xmlWriter, font, value, name, attrs): super(SubTable, self).xmlWrite(xmlWriter, font, value, None, attrs) class ExtSubTable(LTable, SubTable): def write(self, writer, font, tableDict, value, repeatIndex=None): writer.Extension = True # actually, mere presence of the field flags it as an Ext Subtable writer. Table.write(self, writer, font, tableDict, value, repeatIndex) class FeatureParams(Table): def getConverter(self, featureTag): tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams) return self.__class__(self.name, self.repeat, self.aux, tableClass) class ValueFormat(IntValue): staticSize = 2 def __init__(self, name, repeat, aux, tableClass=None, *, description=""): BaseConverter.__init__( self, name, repeat, aux, tableClass, description=description ) self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1") def read(self, reader, font, tableDict): format = reader.readUShort() reader[self.which] = ValueRecordFactory(format) return format def write(self, writer, font, tableDict, format, repeatIndex=None): writer.writeUShort(format) writer[self.which] = ValueRecordFactory(format) class ValueRecord(ValueFormat): def getRecordSize(self, reader): return 2 * len(reader[self.which]) def read(self, reader, font, tableDict): return reader[self.which].readValueRecord(reader, font) def write(self, writer, font, tableDict, value, repeatIndex=None): writer[self.which].writeValueRecord(writer, font, value) def xmlWrite(self, xmlWriter, font, value, name, attrs): if value is None: pass # NULL table, ignore else: value.toXML(xmlWriter, font, self.name, attrs) def xmlRead(self, attrs, content, font): from .otBase import ValueRecord value = ValueRecord() value.fromXML(None, attrs, content, font) return value class AATLookup(BaseConverter): BIN_SEARCH_HEADER_SIZE = 10 def __init__(self, name, repeat, aux, tableClass, *, description=""): BaseConverter.__init__( self, name, repeat, aux, tableClass, description=description ) if issubclass(self.tableClass, SimpleValue): self.converter = self.tableClass(name="Value", repeat=None, aux=None) else: self.converter = Table( name="Value", repeat=None, aux=None, tableClass=self.tableClass ) def read(self, reader, font, tableDict): format = reader.readUShort() if format == 0: return self.readFormat0(reader, font) elif format == 2: return self.readFormat2(reader, font) elif format == 4: return self.readFormat4(reader, font) elif format == 6: return self.readFormat6(reader, font) elif format == 8: return self.readFormat8(reader, font) else: assert False, "unsupported lookup format: %d" % format def write(self, writer, font, tableDict, value, repeatIndex=None): values = list( sorted([(font.getGlyphID(glyph), val) for glyph, val in value.items()]) ) # TODO: Also implement format 4. formats = list( sorted( filter( None, [ self.buildFormat0(writer, font, values), self.buildFormat2(writer, font, values), self.buildFormat6(writer, font, values), self.buildFormat8(writer, font, values), ], ) ) ) # We use the format ID as secondary sort key to make the output # deterministic when multiple formats have same encoded size. dataSize, lookupFormat, writeMethod = formats[0] pos = writer.getDataLength() writeMethod() actualSize = writer.getDataLength() - pos assert ( actualSize == dataSize ), "AATLookup format %d claimed to write %d bytes, but wrote %d" % ( lookupFormat, dataSize, actualSize, ) @staticmethod def writeBinSearchHeader(writer, numUnits, unitSize): writer.writeUShort(unitSize) writer.writeUShort(numUnits) searchRange, entrySelector, rangeShift = getSearchRange( n=numUnits, itemSize=unitSize ) writer.writeUShort(searchRange) writer.writeUShort(entrySelector) writer.writeUShort(rangeShift) def buildFormat0(self, writer, font, values): numGlyphs = len(font.getGlyphOrder()) if len(values) != numGlyphs: return None valueSize = self.converter.staticSize return ( 2 + numGlyphs * valueSize, 0, lambda: self.writeFormat0(writer, font, values), ) def writeFormat0(self, writer, font, values): writer.writeUShort(0) for glyphID_, value in values: self.converter.write( writer, font, tableDict=None, value=value, repeatIndex=None ) def buildFormat2(self, writer, font, values): segStart, segValue = values[0] segEnd = segStart segments = [] for glyphID, curValue in values[1:]: if glyphID != segEnd + 1 or curValue != segValue: segments.append((segStart, segEnd, segValue)) segStart = segEnd = glyphID segValue = curValue else: segEnd = glyphID segments.append((segStart, segEnd, segValue)) valueSize = self.converter.staticSize numUnits, unitSize = len(segments) + 1, valueSize + 4 return ( 2 + self.BIN_SEARCH_HEADER_SIZE + numUnits * unitSize, 2, lambda: self.writeFormat2(writer, font, segments), ) def writeFormat2(self, writer, font, segments): writer.writeUShort(2) valueSize = self.converter.staticSize numUnits, unitSize = len(segments), valueSize + 4 self.writeBinSearchHeader(writer, numUnits, unitSize) for firstGlyph, lastGlyph, value in segments: writer.writeUShort(lastGlyph) writer.writeUShort(firstGlyph) self.converter.write( writer, font, tableDict=None, value=value, repeatIndex=None ) writer.writeUShort(0xFFFF) writer.writeUShort(0xFFFF) writer.writeData(b"\x00" * valueSize) def buildFormat6(self, writer, font, values): valueSize = self.converter.staticSize numUnits, unitSize = len(values), valueSize + 2 return ( 2 + self.BIN_SEARCH_HEADER_SIZE + (numUnits + 1) * unitSize, 6, lambda: self.writeFormat6(writer, font, values), ) def writeFormat6(self, writer, font, values): writer.writeUShort(6) valueSize = self.converter.staticSize numUnits, unitSize = len(values), valueSize + 2 self.writeBinSearchHeader(writer, numUnits, unitSize) for glyphID, value in values: writer.writeUShort(glyphID) self.converter.write( writer, font, tableDict=None, value=value, repeatIndex=None ) writer.writeUShort(0xFFFF) writer.writeData(b"\x00" * valueSize) def buildFormat8(self, writer, font, values): minGlyphID, maxGlyphID = values[0][0], values[-1][0] if len(values) != maxGlyphID - minGlyphID + 1: return None valueSize = self.converter.staticSize return ( 6 + len(values) * valueSize, 8, lambda: self.writeFormat8(writer, font, values), ) def writeFormat8(self, writer, font, values): firstGlyphID = values[0][0] writer.writeUShort(8) writer.writeUShort(firstGlyphID) writer.writeUShort(len(values)) for _, value in values: self.converter.write( writer, font, tableDict=None, value=value, repeatIndex=None ) def readFormat0(self, reader, font): numGlyphs = len(font.getGlyphOrder()) data = self.converter.readArray(reader, font, tableDict=None, count=numGlyphs) return {font.getGlyphName(k): value for k, value in enumerate(data)} def readFormat2(self, reader, font): mapping = {} pos = reader.pos - 2 # start of table is at UShort for format unitSize, numUnits = reader.readUShort(), reader.readUShort() assert unitSize >= 4 + self.converter.staticSize, unitSize for i in range(numUnits): reader.seek(pos + i * unitSize + 12) last = reader.readUShort() first = reader.readUShort() value = self.converter.read(reader, font, tableDict=None) if last != 0xFFFF: for k in range(first, last + 1): mapping[font.getGlyphName(k)] = value return mapping def readFormat4(self, reader, font): mapping = {} pos = reader.pos - 2 # start of table is at UShort for format unitSize = reader.readUShort() assert unitSize >= 6, unitSize for i in range(reader.readUShort()): reader.seek(pos + i * unitSize + 12) last = reader.readUShort() first = reader.readUShort() offset = reader.readUShort() if last != 0xFFFF: dataReader = reader.getSubReader(0) # relative to current position dataReader.seek(pos + offset) # relative to start of table data = self.converter.readArray( dataReader, font, tableDict=None, count=last - first + 1 ) for k, v in enumerate(data): mapping[font.getGlyphName(first + k)] = v return mapping def readFormat6(self, reader, font): mapping = {} pos = reader.pos - 2 # start of table is at UShort for format unitSize = reader.readUShort() assert unitSize >= 2 + self.converter.staticSize, unitSize for i in range(reader.readUShort()): reader.seek(pos + i * unitSize + 12) glyphID = reader.readUShort() value = self.converter.read(reader, font, tableDict=None) if glyphID != 0xFFFF: mapping[font.getGlyphName(glyphID)] = value return mapping def readFormat8(self, reader, font): first = reader.readUShort() count = reader.readUShort() data = self.converter.readArray(reader, font, tableDict=None, count=count) return {font.getGlyphName(first + k): value for (k, value) in enumerate(data)} def xmlRead(self, attrs, content, font): value = {} for element in content: if isinstance(element, tuple): name, a, eltContent = element if name == "Lookup": value[a["glyph"]] = self.converter.xmlRead(a, eltContent, font) return value def xmlWrite(self, xmlWriter, font, value, name, attrs): xmlWriter.begintag(name, attrs) xmlWriter.newline() for glyph, value in sorted(value.items()): self.converter.xmlWrite( xmlWriter, font, value=value, name="Lookup", attrs=[("glyph", glyph)] ) xmlWriter.endtag(name) xmlWriter.newline() # The AAT 'ankr' table has an unusual structure: An offset to an AATLookup # followed by an offset to a glyph data table. Other than usual, the # offsets in the AATLookup are not relative to the beginning of # the beginning of the 'ankr' table, but relative to the glyph data table. # So, to find the anchor data for a glyph, one needs to add the offset # to the data table to the offset found in the AATLookup, and then use # the sum of these two offsets to find the actual data. class AATLookupWithDataOffset(BaseConverter): def read(self, reader, font, tableDict): lookupOffset = reader.readULong() dataOffset = reader.readULong() lookupReader = reader.getSubReader(lookupOffset) lookup = AATLookup("DataOffsets", None, None, UShort) offsets = lookup.read(lookupReader, font, tableDict) result = {} for glyph, offset in offsets.items(): dataReader = reader.getSubReader(offset + dataOffset) item = self.tableClass() item.decompile(dataReader, font) result[glyph] = item return result def write(self, writer, font, tableDict, value, repeatIndex=None): # We do not work with OTTableWriter sub-writers because # the offsets in our AATLookup are relative to our data # table, for which we need to provide an offset value itself. # It might have been possible to somehow make a kludge for # performing this indirect offset computation directly inside # OTTableWriter. But this would have made the internal logic # of OTTableWriter even more complex than it already is, # so we decided to roll our own offset computation for the # contents of the AATLookup and associated data table. offsetByGlyph, offsetByData, dataLen = {}, {}, 0 compiledData = [] for glyph in sorted(value, key=font.getGlyphID): subWriter = OTTableWriter() value[glyph].compile(subWriter, font) data = subWriter.getAllData() offset = offsetByData.get(data, None) if offset == None: offset = dataLen dataLen = dataLen + len(data) offsetByData[data] = offset compiledData.append(data) offsetByGlyph[glyph] = offset # For calculating the offsets to our AATLookup and data table, # we can use the regular OTTableWriter infrastructure. lookupWriter = writer.getSubWriter() lookup = AATLookup("DataOffsets", None, None, UShort) lookup.write(lookupWriter, font, tableDict, offsetByGlyph, None) dataWriter = writer.getSubWriter() writer.writeSubTable(lookupWriter, offsetSize=4) writer.writeSubTable(dataWriter, offsetSize=4) for d in compiledData: dataWriter.writeData(d) def xmlRead(self, attrs, content, font): lookup = AATLookup("DataOffsets", None, None, self.tableClass) return lookup.xmlRead(attrs, content, font) def xmlWrite(self, xmlWriter, font, value, name, attrs): lookup = AATLookup("DataOffsets", None, None, self.tableClass) lookup.xmlWrite(xmlWriter, font, value, name, attrs) class MorxSubtableConverter(BaseConverter): _PROCESSING_ORDERS = { # bits 30 and 28 of morx.CoverageFlags; see morx spec (False, False): "LayoutOrder", (True, False): "ReversedLayoutOrder", (False, True): "LogicalOrder", (True, True): "ReversedLogicalOrder", } _PROCESSING_ORDERS_REVERSED = {val: key for key, val in _PROCESSING_ORDERS.items()} def __init__(self, name, repeat, aux, tableClass=None, *, description=""): BaseConverter.__init__( self, name, repeat, aux, tableClass, description=description ) def _setTextDirectionFromCoverageFlags(self, flags, subtable): if (flags & 0x20) != 0: subtable.TextDirection = "Any" elif (flags & 0x80) != 0: subtable.TextDirection = "Vertical" else: subtable.TextDirection = "Horizontal" def read(self, reader, font, tableDict): pos = reader.pos m = MorxSubtable() m.StructLength = reader.readULong() flags = reader.readUInt8() orderKey = ((flags & 0x40) != 0, (flags & 0x10) != 0) m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey] self._setTextDirectionFromCoverageFlags(flags, m) m.Reserved = reader.readUShort() m.Reserved |= (flags & 0xF) << 16 m.MorphType = reader.readUInt8() m.SubFeatureFlags = reader.readULong() tableClass = lookupTypes["morx"].get(m.MorphType) if tableClass is None: assert False, "unsupported 'morx' lookup type %s" % m.MorphType # To decode AAT ligatures, we need to know the subtable size. # The easiest way to pass this along is to create a new reader # that works on just the subtable as its data. headerLength = reader.pos - pos data = reader.data[reader.pos : reader.pos + m.StructLength - headerLength] assert len(data) == m.StructLength - headerLength subReader = OTTableReader(data=data, tableTag=reader.tableTag) m.SubStruct = tableClass() m.SubStruct.decompile(subReader, font) reader.seek(pos + m.StructLength) return m def xmlWrite(self, xmlWriter, font, value, name, attrs): xmlWriter.begintag(name, attrs) xmlWriter.newline() xmlWriter.comment("StructLength=%d" % value.StructLength) xmlWriter.newline() xmlWriter.simpletag("TextDirection", value=value.TextDirection) xmlWriter.newline() xmlWriter.simpletag("ProcessingOrder", value=value.ProcessingOrder) xmlWriter.newline() if value.Reserved != 0: xmlWriter.simpletag("Reserved", value="0x%04x" % value.Reserved) xmlWriter.newline() xmlWriter.comment("MorphType=%d" % value.MorphType) xmlWriter.newline() xmlWriter.simpletag("SubFeatureFlags", value="0x%08x" % value.SubFeatureFlags) xmlWriter.newline() value.SubStruct.toXML(xmlWriter, font) xmlWriter.endtag(name) xmlWriter.newline() def xmlRead(self, attrs, content, font): m = MorxSubtable() covFlags = 0 m.Reserved = 0 for eltName, eltAttrs, eltContent in filter(istuple, content): if eltName == "CoverageFlags": # Only in XML from old versions of fonttools. covFlags = safeEval(eltAttrs["value"]) orderKey = ((covFlags & 0x40) != 0, (covFlags & 0x10) != 0) m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey] self._setTextDirectionFromCoverageFlags(covFlags, m) elif eltName == "ProcessingOrder": m.ProcessingOrder = eltAttrs["value"] assert m.ProcessingOrder in self._PROCESSING_ORDERS_REVERSED, ( "unknown ProcessingOrder: %s" % m.ProcessingOrder ) elif eltName == "TextDirection": m.TextDirection = eltAttrs["value"] assert m.TextDirection in {"Horizontal", "Vertical", "Any"}, ( "unknown TextDirection %s" % m.TextDirection ) elif eltName == "Reserved": m.Reserved = safeEval(eltAttrs["value"]) elif eltName == "SubFeatureFlags": m.SubFeatureFlags = safeEval(eltAttrs["value"]) elif eltName.endswith("Morph"): m.fromXML(eltName, eltAttrs, eltContent, font) else: assert False, eltName m.Reserved = (covFlags & 0xF) << 16 | m.Reserved return m def write(self, writer, font, tableDict, value, repeatIndex=None): covFlags = (value.Reserved & 0x000F0000) >> 16 reverseOrder, logicalOrder = self._PROCESSING_ORDERS_REVERSED[ value.ProcessingOrder ] covFlags |= 0x80 if value.TextDirection == "Vertical" else 0 covFlags |= 0x40 if reverseOrder else 0 covFlags |= 0x20 if value.TextDirection == "Any" else 0 covFlags |= 0x10 if logicalOrder else 0 value.CoverageFlags = covFlags lengthIndex = len(writer.items) before = writer.getDataLength() value.StructLength = 0xDEADBEEF # The high nibble of value.Reserved is actuallly encoded # into coverageFlags, so we need to clear it here. origReserved = value.Reserved # including high nibble value.Reserved = value.Reserved & 0xFFFF # without high nibble value.compile(writer, font) value.Reserved = origReserved # restore original value assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef" length = writer.getDataLength() - before writer.items[lengthIndex] = struct.pack(">L", length) # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6Tables.html#ExtendedStateHeader # TODO: Untangle the implementation of the various lookup-specific formats. class STXHeader(BaseConverter): def __init__(self, name, repeat, aux, tableClass, *, description=""): BaseConverter.__init__( self, name, repeat, aux, tableClass, description=description ) assert issubclass(self.tableClass, AATAction) self.classLookup = AATLookup("GlyphClasses", None, None, UShort) if issubclass(self.tableClass, ContextualMorphAction): self.perGlyphLookup = AATLookup("PerGlyphLookup", None, None, GlyphID) else: self.perGlyphLookup = None def read(self, reader, font, tableDict): table = AATStateTable() pos = reader.pos classTableReader = reader.getSubReader(0) stateArrayReader = reader.getSubReader(0) entryTableReader = reader.getSubReader(0) actionReader = None ligaturesReader = None table.GlyphClassCount = reader.readULong() classTableReader.seek(pos + reader.readULong()) stateArrayReader.seek(pos + reader.readULong()) entryTableReader.seek(pos + reader.readULong()) if self.perGlyphLookup is not None: perGlyphTableReader = reader.getSubReader(0) perGlyphTableReader.seek(pos + reader.readULong()) if issubclass(self.tableClass, LigatureMorphAction): actionReader = reader.getSubReader(0) actionReader.seek(pos + reader.readULong()) ligComponentReader = reader.getSubReader(0) ligComponentReader.seek(pos + reader.readULong()) ligaturesReader = reader.getSubReader(0) ligaturesReader.seek(pos + reader.readULong()) numLigComponents = (ligaturesReader.pos - ligComponentReader.pos) // 2 assert numLigComponents >= 0 table.LigComponents = ligComponentReader.readUShortArray(numLigComponents) table.Ligatures = self._readLigatures(ligaturesReader, font) elif issubclass(self.tableClass, InsertionMorphAction): actionReader = reader.getSubReader(0) actionReader.seek(pos + reader.readULong()) table.GlyphClasses = self.classLookup.read(classTableReader, font, tableDict) numStates = int( (entryTableReader.pos - stateArrayReader.pos) / (table.GlyphClassCount * 2) ) for stateIndex in range(numStates): state = AATState() table.States.append(state) for glyphClass in range(table.GlyphClassCount): entryIndex = stateArrayReader.readUShort() state.Transitions[glyphClass] = self._readTransition( entryTableReader, entryIndex, font, actionReader ) if self.perGlyphLookup is not None: table.PerGlyphLookups = self._readPerGlyphLookups( table, perGlyphTableReader, font ) return table def _readTransition(self, reader, entryIndex, font, actionReader): transition = self.tableClass() entryReader = reader.getSubReader( reader.pos + entryIndex * transition.staticSize ) transition.decompile(entryReader, font, actionReader) return transition def _readLigatures(self, reader, font): limit = len(reader.data) numLigatureGlyphs = (limit - reader.pos) // 2 return font.getGlyphNameMany(reader.readUShortArray(numLigatureGlyphs)) def _countPerGlyphLookups(self, table): # Somewhat annoyingly, the morx table does not encode # the size of the per-glyph table. So we need to find # the maximum value that MorphActions use as index # into this table. numLookups = 0 for state in table.States: for t in state.Transitions.values(): if isinstance(t, ContextualMorphAction): if t.MarkIndex != 0xFFFF: numLookups = max(numLookups, t.MarkIndex + 1) if t.CurrentIndex != 0xFFFF: numLookups = max(numLookups, t.CurrentIndex + 1) return numLookups def _readPerGlyphLookups(self, table, reader, font): pos = reader.pos lookups = [] for _ in range(self._countPerGlyphLookups(table)): lookupReader = reader.getSubReader(0) lookupReader.seek(pos + reader.readULong()) lookups.append(self.perGlyphLookup.read(lookupReader, font, {})) return lookups def write(self, writer, font, tableDict, value, repeatIndex=None): glyphClassWriter = OTTableWriter() self.classLookup.write( glyphClassWriter, font, tableDict, value.GlyphClasses, repeatIndex=None ) glyphClassData = pad(glyphClassWriter.getAllData(), 2) glyphClassCount = max(value.GlyphClasses.values()) + 1 glyphClassTableOffset = 16 # size of STXHeader if self.perGlyphLookup is not None: glyphClassTableOffset += 4 glyphClassTableOffset += self.tableClass.actionHeaderSize actionData, actionIndex = self.tableClass.compileActions(font, value.States) stateArrayData, entryTableData = self._compileStates( font, value.States, glyphClassCount, actionIndex ) stateArrayOffset = glyphClassTableOffset + len(glyphClassData) entryTableOffset = stateArrayOffset + len(stateArrayData) perGlyphOffset = entryTableOffset + len(entryTableData) perGlyphData = pad(self._compilePerGlyphLookups(value, font), 4) if actionData is not None: actionOffset = entryTableOffset + len(entryTableData) else: actionOffset = None ligaturesOffset, ligComponentsOffset = None, None ligComponentsData = self._compileLigComponents(value, font) ligaturesData = self._compileLigatures(value, font) if ligComponentsData is not None: assert len(perGlyphData) == 0 ligComponentsOffset = actionOffset + len(actionData) ligaturesOffset = ligComponentsOffset + len(ligComponentsData) writer.writeULong(glyphClassCount) writer.writeULong(glyphClassTableOffset) writer.writeULong(stateArrayOffset) writer.writeULong(entryTableOffset) if self.perGlyphLookup is not None: writer.writeULong(perGlyphOffset) if actionOffset is not None: writer.writeULong(actionOffset) if ligComponentsOffset is not None: writer.writeULong(ligComponentsOffset) writer.writeULong(ligaturesOffset) writer.writeData(glyphClassData) writer.writeData(stateArrayData) writer.writeData(entryTableData) writer.writeData(perGlyphData) if actionData is not None: writer.writeData(actionData) if ligComponentsData is not None: writer.writeData(ligComponentsData) if ligaturesData is not None: writer.writeData(ligaturesData) def _compileStates(self, font, states, glyphClassCount, actionIndex): stateArrayWriter = OTTableWriter() entries, entryIDs = [], {} for state in states: for glyphClass in range(glyphClassCount): transition = state.Transitions[glyphClass] entryWriter = OTTableWriter() transition.compile(entryWriter, font, actionIndex) entryData = entryWriter.getAllData() assert ( len(entryData) == transition.staticSize ), "%s has staticSize %d, " "but actually wrote %d bytes" % ( repr(transition), transition.staticSize, len(entryData), ) entryIndex = entryIDs.get(entryData) if entryIndex is None: entryIndex = len(entries) entryIDs[entryData] = entryIndex entries.append(entryData) stateArrayWriter.writeUShort(entryIndex) stateArrayData = pad(stateArrayWriter.getAllData(), 4) entryTableData = pad(bytesjoin(entries), 4) return stateArrayData, entryTableData def _compilePerGlyphLookups(self, table, font): if self.perGlyphLookup is None: return b"" numLookups = self._countPerGlyphLookups(table) assert len(table.PerGlyphLookups) == numLookups, ( "len(AATStateTable.PerGlyphLookups) is %d, " "but the actions inside the table refer to %d" % (len(table.PerGlyphLookups), numLookups) ) writer = OTTableWriter() for lookup in table.PerGlyphLookups: lookupWriter = writer.getSubWriter() self.perGlyphLookup.write(lookupWriter, font, {}, lookup, None) writer.writeSubTable(lookupWriter, offsetSize=4) return writer.getAllData() def _compileLigComponents(self, table, font): if not hasattr(table, "LigComponents"): return None writer = OTTableWriter() for component in table.LigComponents: writer.writeUShort(component) return writer.getAllData() def _compileLigatures(self, table, font): if not hasattr(table, "Ligatures"): return None writer = OTTableWriter() for glyphName in table.Ligatures: writer.writeUShort(font.getGlyphID(glyphName)) return writer.getAllData() def xmlWrite(self, xmlWriter, font, value, name, attrs): xmlWriter.begintag(name, attrs) xmlWriter.newline() xmlWriter.comment("GlyphClassCount=%s" % value.GlyphClassCount) xmlWriter.newline() for g, klass in sorted(value.GlyphClasses.items()): xmlWriter.simpletag("GlyphClass", glyph=g, value=klass) xmlWriter.newline() for stateIndex, state in enumerate(value.States): xmlWriter.begintag("State", index=stateIndex) xmlWriter.newline() for glyphClass, trans in sorted(state.Transitions.items()): trans.toXML( xmlWriter, font=font, attrs={"onGlyphClass": glyphClass}, name="Transition", ) xmlWriter.endtag("State") xmlWriter.newline() for i, lookup in enumerate(value.PerGlyphLookups): xmlWriter.begintag("PerGlyphLookup", index=i) xmlWriter.newline() for glyph, val in sorted(lookup.items()): xmlWriter.simpletag("Lookup", glyph=glyph, value=val) xmlWriter.newline() xmlWriter.endtag("PerGlyphLookup") xmlWriter.newline() if hasattr(value, "LigComponents"): xmlWriter.begintag("LigComponents") xmlWriter.newline() for i, val in enumerate(getattr(value, "LigComponents")): xmlWriter.simpletag("LigComponent", index=i, value=val) xmlWriter.newline() xmlWriter.endtag("LigComponents") xmlWriter.newline() self._xmlWriteLigatures(xmlWriter, font, value, name, attrs) xmlWriter.endtag(name) xmlWriter.newline() def _xmlWriteLigatures(self, xmlWriter, font, value, name, attrs): if not hasattr(value, "Ligatures"): return xmlWriter.begintag("Ligatures") xmlWriter.newline() for i, g in enumerate(getattr(value, "Ligatures")): xmlWriter.simpletag("Ligature", index=i, glyph=g) xmlWriter.newline() xmlWriter.endtag("Ligatures") xmlWriter.newline() def xmlRead(self, attrs, content, font): table = AATStateTable() for eltName, eltAttrs, eltContent in filter(istuple, content): if eltName == "GlyphClass": glyph = eltAttrs["glyph"] value = eltAttrs["value"] table.GlyphClasses[glyph] = safeEval(value) elif eltName == "State": state = self._xmlReadState(eltAttrs, eltContent, font) table.States.append(state) elif eltName == "PerGlyphLookup": lookup = self.perGlyphLookup.xmlRead(eltAttrs, eltContent, font) table.PerGlyphLookups.append(lookup) elif eltName == "LigComponents": table.LigComponents = self._xmlReadLigComponents( eltAttrs, eltContent, font ) elif eltName == "Ligatures": table.Ligatures = self._xmlReadLigatures(eltAttrs, eltContent, font) table.GlyphClassCount = max(table.GlyphClasses.values()) + 1 return table def _xmlReadState(self, attrs, content, font): state = AATState() for eltName, eltAttrs, eltContent in filter(istuple, content): if eltName == "Transition": glyphClass = safeEval(eltAttrs["onGlyphClass"]) transition = self.tableClass() transition.fromXML(eltName, eltAttrs, eltContent, font) state.Transitions[glyphClass] = transition return state def _xmlReadLigComponents(self, attrs, content, font): ligComponents = [] for eltName, eltAttrs, _eltContent in filter(istuple, content): if eltName == "LigComponent": ligComponents.append(safeEval(eltAttrs["value"])) return ligComponents def _xmlReadLigatures(self, attrs, content, font): ligs = [] for eltName, eltAttrs, _eltContent in filter(istuple, content): if eltName == "Ligature": ligs.append(eltAttrs["glyph"]) return ligs class CIDGlyphMap(BaseConverter): def read(self, reader, font, tableDict): numCIDs = reader.readUShort() result = {} for cid, glyphID in enumerate(reader.readUShortArray(numCIDs)): if glyphID != 0xFFFF: result[cid] = font.getGlyphName(glyphID) return result def write(self, writer, font, tableDict, value, repeatIndex=None): items = {cid: font.getGlyphID(glyph) for cid, glyph in value.items()} count = max(items) + 1 if items else 0 writer.writeUShort(count) for cid in range(count): writer.writeUShort(items.get(cid, 0xFFFF)) def xmlRead(self, attrs, content, font): result = {} for eName, eAttrs, _eContent in filter(istuple, content): if eName == "CID": result[safeEval(eAttrs["cid"])] = eAttrs["glyph"].strip() return result def xmlWrite(self, xmlWriter, font, value, name, attrs): xmlWriter.begintag(name, attrs) xmlWriter.newline() for cid, glyph in sorted(value.items()): if glyph is not None and glyph != 0xFFFF: xmlWriter.simpletag("CID", cid=cid, glyph=glyph) xmlWriter.newline() xmlWriter.endtag(name) xmlWriter.newline() class GlyphCIDMap(BaseConverter): def read(self, reader, font, tableDict): glyphOrder = font.getGlyphOrder() count = reader.readUShort() cids = reader.readUShortArray(count) if count > len(glyphOrder): log.warning( "GlyphCIDMap has %d elements, " "but the font has only %d glyphs; " "ignoring the rest" % (count, len(glyphOrder)) ) result = {} for glyphID in range(min(len(cids), len(glyphOrder))): cid = cids[glyphID] if cid != 0xFFFF: result[glyphOrder[glyphID]] = cid return result def write(self, writer, font, tableDict, value, repeatIndex=None): items = { font.getGlyphID(g): cid for g, cid in value.items() if cid is not None and cid != 0xFFFF } count = max(items) + 1 if items else 0 writer.writeUShort(count) for glyphID in range(count): writer.writeUShort(items.get(glyphID, 0xFFFF)) def xmlRead(self, attrs, content, font): result = {} for eName, eAttrs, _eContent in filter(istuple, content): if eName == "CID": result[eAttrs["glyph"]] = safeEval(eAttrs["value"]) return result def xmlWrite(self, xmlWriter, font, value, name, attrs): xmlWriter.begintag(name, attrs) xmlWriter.newline() for glyph, cid in sorted(value.items()): if cid is not None and cid != 0xFFFF: xmlWriter.simpletag("CID", glyph=glyph, value=cid) xmlWriter.newline() xmlWriter.endtag(name) xmlWriter.newline() class DeltaValue(BaseConverter): def read(self, reader, font, tableDict): StartSize = tableDict["StartSize"] EndSize = tableDict["EndSize"] DeltaFormat = tableDict["DeltaFormat"] assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat" nItems = EndSize - StartSize + 1 nBits = 1 << DeltaFormat minusOffset = 1 << nBits mask = (1 << nBits) - 1 signMask = 1 << (nBits - 1) DeltaValue = [] tmp, shift = 0, 0 for i in range(nItems): if shift == 0: tmp, shift = reader.readUShort(), 16 shift = shift - nBits value = (tmp >> shift) & mask if value & signMask: value = value - minusOffset DeltaValue.append(value) return DeltaValue def write(self, writer, font, tableDict, value, repeatIndex=None): StartSize = tableDict["StartSize"] EndSize = tableDict["EndSize"] DeltaFormat = tableDict["DeltaFormat"] DeltaValue = value assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat" nItems = EndSize - StartSize + 1 nBits = 1 << DeltaFormat assert len(DeltaValue) == nItems mask = (1 << nBits) - 1 tmp, shift = 0, 16 for value in DeltaValue: shift = shift - nBits tmp = tmp | ((value & mask) << shift) if shift == 0: writer.writeUShort(tmp) tmp, shift = 0, 16 if shift != 16: writer.writeUShort(tmp) def xmlWrite(self, xmlWriter, font, value, name, attrs): xmlWriter.simpletag(name, attrs + [("value", value)]) xmlWriter.newline() def xmlRead(self, attrs, content, font): return safeEval(attrs["value"]) class VarIdxMapValue(BaseConverter): def read(self, reader, font, tableDict): fmt = tableDict["EntryFormat"] nItems = tableDict["MappingCount"] innerBits = 1 + (fmt & 0x000F) innerMask = (1 << innerBits) - 1 outerMask = 0xFFFFFFFF - innerMask outerShift = 16 - innerBits entrySize = 1 + ((fmt & 0x0030) >> 4) readArray = { 1: reader.readUInt8Array, 2: reader.readUShortArray, 3: reader.readUInt24Array, 4: reader.readULongArray, }[entrySize] return [ (((raw & outerMask) << outerShift) | (raw & innerMask)) for raw in readArray(nItems) ] def write(self, writer, font, tableDict, value, repeatIndex=None): fmt = tableDict["EntryFormat"] mapping = value writer["MappingCount"].setValue(len(mapping)) innerBits = 1 + (fmt & 0x000F) innerMask = (1 << innerBits) - 1 outerShift = 16 - innerBits entrySize = 1 + ((fmt & 0x0030) >> 4) writeArray = { 1: writer.writeUInt8Array, 2: writer.writeUShortArray, 3: writer.writeUInt24Array, 4: writer.writeULongArray, }[entrySize] writeArray( [ (((idx & 0xFFFF0000) >> outerShift) | (idx & innerMask)) for idx in mapping ] ) class VarDataValue(BaseConverter): def read(self, reader, font, tableDict): values = [] regionCount = tableDict["VarRegionCount"] wordCount = tableDict["NumShorts"] # https://github.com/fonttools/fonttools/issues/2279 longWords = bool(wordCount & 0x8000) wordCount = wordCount & 0x7FFF if longWords: readBigArray, readSmallArray = reader.readLongArray, reader.readShortArray else: readBigArray, readSmallArray = reader.readShortArray, reader.readInt8Array n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount) values.extend(readBigArray(n1)) values.extend(readSmallArray(n2 - n1)) if n2 > regionCount: # Padding del values[regionCount:] return values def write(self, writer, font, tableDict, values, repeatIndex=None): regionCount = tableDict["VarRegionCount"] wordCount = tableDict["NumShorts"] # https://github.com/fonttools/fonttools/issues/2279 longWords = bool(wordCount & 0x8000) wordCount = wordCount & 0x7FFF (writeBigArray, writeSmallArray) = { False: (writer.writeShortArray, writer.writeInt8Array), True: (writer.writeLongArray, writer.writeShortArray), }[longWords] n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount) writeBigArray(values[:n1]) writeSmallArray(values[n1:regionCount]) if n2 > regionCount: # Padding writer.writeSmallArray([0] * (n2 - regionCount)) def xmlWrite(self, xmlWriter, font, value, name, attrs): xmlWriter.simpletag(name, attrs + [("value", value)]) xmlWriter.newline() def xmlRead(self, attrs, content, font): return safeEval(attrs["value"]) class LookupFlag(UShort): def xmlWrite(self, xmlWriter, font, value, name, attrs): xmlWriter.simpletag(name, attrs + [("value", value)]) flags = [] if value & 0x01: flags.append("rightToLeft") if value & 0x02: flags.append("ignoreBaseGlyphs") if value & 0x04: flags.append("ignoreLigatures") if value & 0x08: flags.append("ignoreMarks") if value & 0x10: flags.append("useMarkFilteringSet") if value & 0xFF00: flags.append("markAttachmentType[%i]" % (value >> 8)) if flags: xmlWriter.comment(" ".join(flags)) xmlWriter.newline() class _UInt8Enum(UInt8): enumClass = NotImplemented def read(self, reader, font, tableDict): return self.enumClass(super().read(reader, font, tableDict)) @classmethod def fromString(cls, value): return getattr(cls.enumClass, value.upper()) @classmethod def toString(cls, value): return cls.enumClass(value).name.lower() class ExtendMode(_UInt8Enum): enumClass = _ExtendMode class CompositeMode(_UInt8Enum): enumClass = _CompositeMode converterMapping = { # type class "int8": Int8, "int16": Short, "uint8": UInt8, "uint16": UShort, "uint24": UInt24, "uint32": ULong, "char64": Char64, "Flags32": Flags32, "VarIndex": VarIndex, "Version": Version, "Tag": Tag, "GlyphID": GlyphID, "GlyphID32": GlyphID32, "NameID": NameID, "DeciPoints": DeciPoints, "Fixed": Fixed, "F2Dot14": F2Dot14, "Angle": Angle, "BiasedAngle": BiasedAngle, "struct": Struct, "Offset": Table, "LOffset": LTable, "Offset24": Table24, "ValueRecord": ValueRecord, "DeltaValue": DeltaValue, "VarIdxMapValue": VarIdxMapValue, "VarDataValue": VarDataValue, "LookupFlag": LookupFlag, "ExtendMode": ExtendMode, "CompositeMode": CompositeMode, "STATFlags": STATFlags, # AAT "CIDGlyphMap": CIDGlyphMap, "GlyphCIDMap": GlyphCIDMap, "MortChain": StructWithLength, "MortSubtable": StructWithLength, "MorxChain": StructWithLength, "MorxSubtable": MorxSubtableConverter, # "Template" types "AATLookup": lambda C: partial(AATLookup, tableClass=C), "AATLookupWithDataOffset": lambda C: partial(AATLookupWithDataOffset, tableClass=C), "STXHeader": lambda C: partial(STXHeader, tableClass=C), "OffsetTo": lambda C: partial(Table, tableClass=C), "LOffsetTo": lambda C: partial(LTable, tableClass=C), "LOffset24To": lambda C: partial(Table24, tableClass=C), } PKaZZZ"�O�O� fontTools/ttLib/tables/otData.pyotData = [ # # common # ("LookupOrder", []), ( "ScriptList", [ ("uint16", "ScriptCount", None, None, "Number of ScriptRecords"), ( "struct", "ScriptRecord", "ScriptCount", 0, "Array of ScriptRecords -listed alphabetically by ScriptTag", ), ], ), ( "ScriptRecord", [ ("Tag", "ScriptTag", None, None, "4-byte ScriptTag identifier"), ( "Offset", "Script", None, None, "Offset to Script table-from beginning of ScriptList", ), ], ), ( "Script", [ ( "Offset", "DefaultLangSys", None, None, "Offset to DefaultLangSys table-from beginning of Script table-may be NULL", ), ( "uint16", "LangSysCount", None, None, "Number of LangSysRecords for this script-excluding the DefaultLangSys", ), ( "struct", "LangSysRecord", "LangSysCount", 0, "Array of LangSysRecords-listed alphabetically by LangSysTag", ), ], ), ( "LangSysRecord", [ ("Tag", "LangSysTag", None, None, "4-byte LangSysTag identifier"), ( "Offset", "LangSys", None, None, "Offset to LangSys table-from beginning of Script table", ), ], ), ( "LangSys", [ ( "Offset", "LookupOrder", None, None, "= NULL (reserved for an offset to a reordering table)", ), ( "uint16", "ReqFeatureIndex", None, None, "Index of a feature required for this language system- if no required features = 0xFFFF", ), ( "uint16", "FeatureCount", None, None, "Number of FeatureIndex values for this language system-excludes the required feature", ), ( "uint16", "FeatureIndex", "FeatureCount", 0, "Array of indices into the FeatureList-in arbitrary order", ), ], ), ( "FeatureList", [ ( "uint16", "FeatureCount", None, None, "Number of FeatureRecords in this table", ), ( "struct", "FeatureRecord", "FeatureCount", 0, "Array of FeatureRecords-zero-based (first feature has FeatureIndex = 0)-listed alphabetically by FeatureTag", ), ], ), ( "FeatureRecord", [ ("Tag", "FeatureTag", None, None, "4-byte feature identification tag"), ( "Offset", "Feature", None, None, "Offset to Feature table-from beginning of FeatureList", ), ], ), ( "Feature", [ ( "Offset", "FeatureParams", None, None, "= NULL (reserved for offset to FeatureParams)", ), ( "uint16", "LookupCount", None, None, "Number of LookupList indices for this feature", ), ( "uint16", "LookupListIndex", "LookupCount", 0, "Array of LookupList indices for this feature -zero-based (first lookup is LookupListIndex = 0)", ), ], ), ("FeatureParams", []), ( "FeatureParamsSize", [ ( "DeciPoints", "DesignSize", None, None, "The design size in 720/inch units (decipoints).", ), ( "uint16", "SubfamilyID", None, None, "Serves as an identifier that associates fonts in a subfamily.", ), ("NameID", "SubfamilyNameID", None, None, "Subfamily NameID."), ( "DeciPoints", "RangeStart", None, None, "Small end of recommended usage range (exclusive) in 720/inch units.", ), ( "DeciPoints", "RangeEnd", None, None, "Large end of recommended usage range (inclusive) in 720/inch units.", ), ], ), ( "FeatureParamsStylisticSet", [ ("uint16", "Version", None, None, "Set to 0."), ("NameID", "UINameID", None, None, "UI NameID."), ], ), ( "FeatureParamsCharacterVariants", [ ("uint16", "Format", None, None, "Set to 0."), ("NameID", "FeatUILabelNameID", None, None, "Feature UI label NameID."), ( "NameID", "FeatUITooltipTextNameID", None, None, "Feature UI tooltip text NameID.", ), ("NameID", "SampleTextNameID", None, None, "Sample text NameID."), ("uint16", "NumNamedParameters", None, None, "Number of named parameters."), ( "NameID", "FirstParamUILabelNameID", None, None, "First NameID of UI feature parameters.", ), ( "uint16", "CharCount", None, None, "Count of characters this feature provides glyph variants for.", ), ( "uint24", "Character", "CharCount", 0, "Unicode characters for which this feature provides glyph variants.", ), ], ), ( "LookupList", [ ("uint16", "LookupCount", None, None, "Number of lookups in this table"), ( "Offset", "Lookup", "LookupCount", 0, "Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)", ), ], ), ( "Lookup", [ ( "uint16", "LookupType", None, None, "Different enumerations for GSUB and GPOS", ), ("LookupFlag", "LookupFlag", None, None, "Lookup qualifiers"), ( "uint16", "SubTableCount", None, None, "Number of SubTables for this lookup", ), ( "Offset", "SubTable", "SubTableCount", 0, "Array of offsets to SubTables-from beginning of Lookup table", ), ( "uint16", "MarkFilteringSet", None, "LookupFlag & 0x0010", "If set, indicates that the lookup table structure is followed by a MarkFilteringSet field. The layout engine skips over all mark glyphs not in the mark filtering set indicated.", ), ], ), ( "CoverageFormat1", [ ("uint16", "CoverageFormat", None, None, "Format identifier-format = 1"), ("uint16", "GlyphCount", None, None, "Number of glyphs in the GlyphArray"), ( "GlyphID", "GlyphArray", "GlyphCount", 0, "Array of GlyphIDs-in numerical order", ), ], ), ( "CoverageFormat2", [ ("uint16", "CoverageFormat", None, None, "Format identifier-format = 2"), ("uint16", "RangeCount", None, None, "Number of RangeRecords"), ( "struct", "RangeRecord", "RangeCount", 0, "Array of glyph ranges-ordered by Start GlyphID", ), ], ), ( "RangeRecord", [ ("GlyphID", "Start", None, None, "First GlyphID in the range"), ("GlyphID", "End", None, None, "Last GlyphID in the range"), ( "uint16", "StartCoverageIndex", None, None, "Coverage Index of first GlyphID in range", ), ], ), ( "ClassDefFormat1", [ ("uint16", "ClassFormat", None, None, "Format identifier-format = 1"), ( "GlyphID", "StartGlyph", None, None, "First GlyphID of the ClassValueArray", ), ("uint16", "GlyphCount", None, None, "Size of the ClassValueArray"), ( "uint16", "ClassValueArray", "GlyphCount", 0, "Array of Class Values-one per GlyphID", ), ], ), ( "ClassDefFormat2", [ ("uint16", "ClassFormat", None, None, "Format identifier-format = 2"), ("uint16", "ClassRangeCount", None, None, "Number of ClassRangeRecords"), ( "struct", "ClassRangeRecord", "ClassRangeCount", 0, "Array of ClassRangeRecords-ordered by Start GlyphID", ), ], ), ( "ClassRangeRecord", [ ("GlyphID", "Start", None, None, "First GlyphID in the range"), ("GlyphID", "End", None, None, "Last GlyphID in the range"), ("uint16", "Class", None, None, "Applied to all glyphs in the range"), ], ), ( "Device", [ ("uint16", "StartSize", None, None, "Smallest size to correct-in ppem"), ("uint16", "EndSize", None, None, "Largest size to correct-in ppem"), ( "uint16", "DeltaFormat", None, None, "Format of DeltaValue array data: 1, 2, or 3", ), ( "DeltaValue", "DeltaValue", "", "DeltaFormat in (1,2,3)", "Array of compressed data", ), ], ), # # gpos # ( "GPOS", [ ( "Version", "Version", None, None, "Version of the GPOS table- 0x00010000 or 0x00010001", ), ( "Offset", "ScriptList", None, None, "Offset to ScriptList table-from beginning of GPOS table", ), ( "Offset", "FeatureList", None, None, "Offset to FeatureList table-from beginning of GPOS table", ), ( "Offset", "LookupList", None, None, "Offset to LookupList table-from beginning of GPOS table", ), ( "LOffset", "FeatureVariations", None, "Version >= 0x00010001", "Offset to FeatureVariations table-from beginning of GPOS table", ), ], ), ( "SinglePosFormat1", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of SinglePos subtable", ), ( "uint16", "ValueFormat", None, None, "Defines the types of data in the ValueRecord", ), ( "ValueRecord", "Value", None, None, "Defines positioning value(s)-applied to all glyphs in the Coverage table", ), ], ), ( "SinglePosFormat2", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 2"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of SinglePos subtable", ), ( "uint16", "ValueFormat", None, None, "Defines the types of data in the ValueRecord", ), ("uint16", "ValueCount", None, None, "Number of ValueRecords"), ( "ValueRecord", "Value", "ValueCount", 0, "Array of ValueRecords-positioning values applied to glyphs", ), ], ), ( "PairPosFormat1", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of PairPos subtable-only the first glyph in each pair", ), ( "uint16", "ValueFormat1", None, None, "Defines the types of data in ValueRecord1-for the first glyph in the pair -may be zero (0)", ), ( "uint16", "ValueFormat2", None, None, "Defines the types of data in ValueRecord2-for the second glyph in the pair -may be zero (0)", ), ("uint16", "PairSetCount", None, None, "Number of PairSet tables"), ( "Offset", "PairSet", "PairSetCount", 0, "Array of offsets to PairSet tables-from beginning of PairPos subtable-ordered by Coverage Index", ), ], ), ( "PairSet", [ ("uint16", "PairValueCount", None, None, "Number of PairValueRecords"), ( "struct", "PairValueRecord", "PairValueCount", 0, "Array of PairValueRecords-ordered by GlyphID of the second glyph", ), ], ), ( "PairValueRecord", [ ( "GlyphID", "SecondGlyph", None, None, "GlyphID of second glyph in the pair-first glyph is listed in the Coverage table", ), ( "ValueRecord", "Value1", None, None, "Positioning data for the first glyph in the pair", ), ( "ValueRecord", "Value2", None, None, "Positioning data for the second glyph in the pair", ), ], ), ( "PairPosFormat2", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 2"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of PairPos subtable-for the first glyph of the pair", ), ( "uint16", "ValueFormat1", None, None, "ValueRecord definition-for the first glyph of the pair-may be zero (0)", ), ( "uint16", "ValueFormat2", None, None, "ValueRecord definition-for the second glyph of the pair-may be zero (0)", ), ( "Offset", "ClassDef1", None, None, "Offset to ClassDef table-from beginning of PairPos subtable-for the first glyph of the pair", ), ( "Offset", "ClassDef2", None, None, "Offset to ClassDef table-from beginning of PairPos subtable-for the second glyph of the pair", ), ( "uint16", "Class1Count", None, None, "Number of classes in ClassDef1 table-includes Class0", ), ( "uint16", "Class2Count", None, None, "Number of classes in ClassDef2 table-includes Class0", ), ( "struct", "Class1Record", "Class1Count", 0, "Array of Class1 records-ordered by Class1", ), ], ), ( "Class1Record", [ ( "struct", "Class2Record", "Class2Count", 0, "Array of Class2 records-ordered by Class2", ), ], ), ( "Class2Record", [ ( "ValueRecord", "Value1", None, None, "Positioning for first glyph-empty if ValueFormat1 = 0", ), ( "ValueRecord", "Value2", None, None, "Positioning for second glyph-empty if ValueFormat2 = 0", ), ], ), ( "CursivePosFormat1", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of CursivePos subtable", ), ("uint16", "EntryExitCount", None, None, "Number of EntryExit records"), ( "struct", "EntryExitRecord", "EntryExitCount", 0, "Array of EntryExit records-in Coverage Index order", ), ], ), ( "EntryExitRecord", [ ( "Offset", "EntryAnchor", None, None, "Offset to EntryAnchor table-from beginning of CursivePos subtable-may be NULL", ), ( "Offset", "ExitAnchor", None, None, "Offset to ExitAnchor table-from beginning of CursivePos subtable-may be NULL", ), ], ), ( "MarkBasePosFormat1", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), ( "Offset", "MarkCoverage", None, None, "Offset to MarkCoverage table-from beginning of MarkBasePos subtable", ), ( "Offset", "BaseCoverage", None, None, "Offset to BaseCoverage table-from beginning of MarkBasePos subtable", ), ("uint16", "ClassCount", None, None, "Number of classes defined for marks"), ( "Offset", "MarkArray", None, None, "Offset to MarkArray table-from beginning of MarkBasePos subtable", ), ( "Offset", "BaseArray", None, None, "Offset to BaseArray table-from beginning of MarkBasePos subtable", ), ], ), ( "BaseArray", [ ("uint16", "BaseCount", None, None, "Number of BaseRecords"), ( "struct", "BaseRecord", "BaseCount", 0, "Array of BaseRecords-in order of BaseCoverage Index", ), ], ), ( "BaseRecord", [ ( "Offset", "BaseAnchor", "ClassCount", 0, "Array of offsets (one per class) to Anchor tables-from beginning of BaseArray table-ordered by class-zero-based", ), ], ), ( "MarkLigPosFormat1", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), ( "Offset", "MarkCoverage", None, None, "Offset to Mark Coverage table-from beginning of MarkLigPos subtable", ), ( "Offset", "LigatureCoverage", None, None, "Offset to Ligature Coverage table-from beginning of MarkLigPos subtable", ), ("uint16", "ClassCount", None, None, "Number of defined mark classes"), ( "Offset", "MarkArray", None, None, "Offset to MarkArray table-from beginning of MarkLigPos subtable", ), ( "Offset", "LigatureArray", None, None, "Offset to LigatureArray table-from beginning of MarkLigPos subtable", ), ], ), ( "LigatureArray", [ ( "uint16", "LigatureCount", None, None, "Number of LigatureAttach table offsets", ), ( "Offset", "LigatureAttach", "LigatureCount", 0, "Array of offsets to LigatureAttach tables-from beginning of LigatureArray table-ordered by LigatureCoverage Index", ), ], ), ( "LigatureAttach", [ ( "uint16", "ComponentCount", None, None, "Number of ComponentRecords in this ligature", ), ( "struct", "ComponentRecord", "ComponentCount", 0, "Array of Component records-ordered in writing direction", ), ], ), ( "ComponentRecord", [ ( "Offset", "LigatureAnchor", "ClassCount", 0, "Array of offsets (one per class) to Anchor tables-from beginning of LigatureAttach table-ordered by class-NULL if a component does not have an attachment for a class-zero-based array", ), ], ), ( "MarkMarkPosFormat1", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), ( "Offset", "Mark1Coverage", None, None, "Offset to Combining Mark Coverage table-from beginning of MarkMarkPos subtable", ), ( "Offset", "Mark2Coverage", None, None, "Offset to Base Mark Coverage table-from beginning of MarkMarkPos subtable", ), ( "uint16", "ClassCount", None, None, "Number of Combining Mark classes defined", ), ( "Offset", "Mark1Array", None, None, "Offset to MarkArray table for Mark1-from beginning of MarkMarkPos subtable", ), ( "Offset", "Mark2Array", None, None, "Offset to Mark2Array table for Mark2-from beginning of MarkMarkPos subtable", ), ], ), ( "Mark2Array", [ ("uint16", "Mark2Count", None, None, "Number of Mark2 records"), ( "struct", "Mark2Record", "Mark2Count", 0, "Array of Mark2 records-in Coverage order", ), ], ), ( "Mark2Record", [ ( "Offset", "Mark2Anchor", "ClassCount", 0, "Array of offsets (one per class) to Anchor tables-from beginning of Mark2Array table-zero-based array", ), ], ), ( "PosLookupRecord", [ ( "uint16", "SequenceIndex", None, None, "Index to input glyph sequence-first glyph = 0", ), ( "uint16", "LookupListIndex", None, None, "Lookup to apply to that position-zero-based", ), ], ), ( "ContextPosFormat1", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of ContextPos subtable", ), ("uint16", "PosRuleSetCount", None, None, "Number of PosRuleSet tables"), ( "Offset", "PosRuleSet", "PosRuleSetCount", 0, "Array of offsets to PosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index", ), ], ), ( "PosRuleSet", [ ("uint16", "PosRuleCount", None, None, "Number of PosRule tables"), ( "Offset", "PosRule", "PosRuleCount", 0, "Array of offsets to PosRule tables-from beginning of PosRuleSet-ordered by preference", ), ], ), ( "PosRule", [ ( "uint16", "GlyphCount", None, None, "Number of glyphs in the Input glyph sequence", ), ("uint16", "PosCount", None, None, "Number of PosLookupRecords"), ( "GlyphID", "Input", "GlyphCount", -1, "Array of input GlyphIDs-starting with the second glyph", ), ( "struct", "PosLookupRecord", "PosCount", 0, "Array of positioning lookups-in design order", ), ], ), ( "ContextPosFormat2", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 2"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of ContextPos subtable", ), ( "Offset", "ClassDef", None, None, "Offset to ClassDef table-from beginning of ContextPos subtable", ), ("uint16", "PosClassSetCount", None, None, "Number of PosClassSet tables"), ( "Offset", "PosClassSet", "PosClassSetCount", 0, "Array of offsets to PosClassSet tables-from beginning of ContextPos subtable-ordered by class-may be NULL", ), ], ), ( "PosClassSet", [ ( "uint16", "PosClassRuleCount", None, None, "Number of PosClassRule tables", ), ( "Offset", "PosClassRule", "PosClassRuleCount", 0, "Array of offsets to PosClassRule tables-from beginning of PosClassSet-ordered by preference", ), ], ), ( "PosClassRule", [ ("uint16", "GlyphCount", None, None, "Number of glyphs to be matched"), ("uint16", "PosCount", None, None, "Number of PosLookupRecords"), ( "uint16", "Class", "GlyphCount", -1, "Array of classes-beginning with the second class-to be matched to the input glyph sequence", ), ( "struct", "PosLookupRecord", "PosCount", 0, "Array of positioning lookups-in design order", ), ], ), ( "ContextPosFormat3", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 3"), ( "uint16", "GlyphCount", None, None, "Number of glyphs in the input sequence", ), ("uint16", "PosCount", None, None, "Number of PosLookupRecords"), ( "Offset", "Coverage", "GlyphCount", 0, "Array of offsets to Coverage tables-from beginning of ContextPos subtable", ), ( "struct", "PosLookupRecord", "PosCount", 0, "Array of positioning lookups-in design order", ), ], ), ( "ChainContextPosFormat1", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of ContextPos subtable", ), ( "uint16", "ChainPosRuleSetCount", None, None, "Number of ChainPosRuleSet tables", ), ( "Offset", "ChainPosRuleSet", "ChainPosRuleSetCount", 0, "Array of offsets to ChainPosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index", ), ], ), ( "ChainPosRuleSet", [ ( "uint16", "ChainPosRuleCount", None, None, "Number of ChainPosRule tables", ), ( "Offset", "ChainPosRule", "ChainPosRuleCount", 0, "Array of offsets to ChainPosRule tables-from beginning of ChainPosRuleSet-ordered by preference", ), ], ), ( "ChainPosRule", [ ( "uint16", "BacktrackGlyphCount", None, None, "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)", ), ( "GlyphID", "Backtrack", "BacktrackGlyphCount", 0, "Array of backtracking GlyphID's (to be matched before the input sequence)", ), ( "uint16", "InputGlyphCount", None, None, "Total number of glyphs in the input sequence (includes the first glyph)", ), ( "GlyphID", "Input", "InputGlyphCount", -1, "Array of input GlyphIDs (start with second glyph)", ), ( "uint16", "LookAheadGlyphCount", None, None, "Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)", ), ( "GlyphID", "LookAhead", "LookAheadGlyphCount", 0, "Array of lookahead GlyphID's (to be matched after the input sequence)", ), ("uint16", "PosCount", None, None, "Number of PosLookupRecords"), ( "struct", "PosLookupRecord", "PosCount", 0, "Array of PosLookupRecords (in design order)", ), ], ), ( "ChainContextPosFormat2", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 2"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of ChainContextPos subtable", ), ( "Offset", "BacktrackClassDef", None, None, "Offset to ClassDef table containing backtrack sequence context-from beginning of ChainContextPos subtable", ), ( "Offset", "InputClassDef", None, None, "Offset to ClassDef table containing input sequence context-from beginning of ChainContextPos subtable", ), ( "Offset", "LookAheadClassDef", None, None, "Offset to ClassDef table containing lookahead sequence context-from beginning of ChainContextPos subtable", ), ( "uint16", "ChainPosClassSetCount", None, None, "Number of ChainPosClassSet tables", ), ( "Offset", "ChainPosClassSet", "ChainPosClassSetCount", 0, "Array of offsets to ChainPosClassSet tables-from beginning of ChainContextPos subtable-ordered by input class-may be NULL", ), ], ), ( "ChainPosClassSet", [ ( "uint16", "ChainPosClassRuleCount", None, None, "Number of ChainPosClassRule tables", ), ( "Offset", "ChainPosClassRule", "ChainPosClassRuleCount", 0, "Array of offsets to ChainPosClassRule tables-from beginning of ChainPosClassSet-ordered by preference", ), ], ), ( "ChainPosClassRule", [ ( "uint16", "BacktrackGlyphCount", None, None, "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)", ), ( "uint16", "Backtrack", "BacktrackGlyphCount", 0, "Array of backtracking classes(to be matched before the input sequence)", ), ( "uint16", "InputGlyphCount", None, None, "Total number of classes in the input sequence (includes the first class)", ), ( "uint16", "Input", "InputGlyphCount", -1, "Array of input classes(start with second class; to be matched with the input glyph sequence)", ), ( "uint16", "LookAheadGlyphCount", None, None, "Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)", ), ( "uint16", "LookAhead", "LookAheadGlyphCount", 0, "Array of lookahead classes(to be matched after the input sequence)", ), ("uint16", "PosCount", None, None, "Number of PosLookupRecords"), ( "struct", "PosLookupRecord", "PosCount", 0, "Array of PosLookupRecords (in design order)", ), ], ), ( "ChainContextPosFormat3", [ ("uint16", "PosFormat", None, None, "Format identifier-format = 3"), ( "uint16", "BacktrackGlyphCount", None, None, "Number of glyphs in the backtracking sequence", ), ( "Offset", "BacktrackCoverage", "BacktrackGlyphCount", 0, "Array of offsets to coverage tables in backtracking sequence, in glyph sequence order", ), ( "uint16", "InputGlyphCount", None, None, "Number of glyphs in input sequence", ), ( "Offset", "InputCoverage", "InputGlyphCount", 0, "Array of offsets to coverage tables in input sequence, in glyph sequence order", ), ( "uint16", "LookAheadGlyphCount", None, None, "Number of glyphs in lookahead sequence", ), ( "Offset", "LookAheadCoverage", "LookAheadGlyphCount", 0, "Array of offsets to coverage tables in lookahead sequence, in glyph sequence order", ), ("uint16", "PosCount", None, None, "Number of PosLookupRecords"), ( "struct", "PosLookupRecord", "PosCount", 0, "Array of PosLookupRecords,in design order", ), ], ), ( "ExtensionPosFormat1", [ ("uint16", "ExtFormat", None, None, "Format identifier. Set to 1."), ( "uint16", "ExtensionLookupType", None, None, "Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).", ), ("LOffset", "ExtSubTable", None, None, "Offset to SubTable"), ], ), # ('ValueRecord', [ # ('int16', 'XPlacement', None, None, 'Horizontal adjustment for placement-in design units'), # ('int16', 'YPlacement', None, None, 'Vertical adjustment for placement-in design units'), # ('int16', 'XAdvance', None, None, 'Horizontal adjustment for advance-in design units (only used for horizontal writing)'), # ('int16', 'YAdvance', None, None, 'Vertical adjustment for advance-in design units (only used for vertical writing)'), # ('Offset', 'XPlaDevice', None, None, 'Offset to Device table for horizontal placement-measured from beginning of PosTable (may be NULL)'), # ('Offset', 'YPlaDevice', None, None, 'Offset to Device table for vertical placement-measured from beginning of PosTable (may be NULL)'), # ('Offset', 'XAdvDevice', None, None, 'Offset to Device table for horizontal advance-measured from beginning of PosTable (may be NULL)'), # ('Offset', 'YAdvDevice', None, None, 'Offset to Device table for vertical advance-measured from beginning of PosTable (may be NULL)'), # ]), ( "AnchorFormat1", [ ("uint16", "AnchorFormat", None, None, "Format identifier-format = 1"), ("int16", "XCoordinate", None, None, "Horizontal value-in design units"), ("int16", "YCoordinate", None, None, "Vertical value-in design units"), ], ), ( "AnchorFormat2", [ ("uint16", "AnchorFormat", None, None, "Format identifier-format = 2"), ("int16", "XCoordinate", None, None, "Horizontal value-in design units"), ("int16", "YCoordinate", None, None, "Vertical value-in design units"), ("uint16", "AnchorPoint", None, None, "Index to glyph contour point"), ], ), ( "AnchorFormat3", [ ("uint16", "AnchorFormat", None, None, "Format identifier-format = 3"), ("int16", "XCoordinate", None, None, "Horizontal value-in design units"), ("int16", "YCoordinate", None, None, "Vertical value-in design units"), ( "Offset", "XDeviceTable", None, None, "Offset to Device table for X coordinate- from beginning of Anchor table (may be NULL)", ), ( "Offset", "YDeviceTable", None, None, "Offset to Device table for Y coordinate- from beginning of Anchor table (may be NULL)", ), ], ), ( "MarkArray", [ ("uint16", "MarkCount", None, None, "Number of MarkRecords"), ( "struct", "MarkRecord", "MarkCount", 0, "Array of MarkRecords-in Coverage order", ), ], ), ( "MarkRecord", [ ("uint16", "Class", None, None, "Class defined for this mark"), ( "Offset", "MarkAnchor", None, None, "Offset to Anchor table-from beginning of MarkArray table", ), ], ), # # gsub # ( "GSUB", [ ( "Version", "Version", None, None, "Version of the GSUB table- 0x00010000 or 0x00010001", ), ( "Offset", "ScriptList", None, None, "Offset to ScriptList table-from beginning of GSUB table", ), ( "Offset", "FeatureList", None, None, "Offset to FeatureList table-from beginning of GSUB table", ), ( "Offset", "LookupList", None, None, "Offset to LookupList table-from beginning of GSUB table", ), ( "LOffset", "FeatureVariations", None, "Version >= 0x00010001", "Offset to FeatureVariations table-from beginning of GSUB table", ), ], ), ( "SingleSubstFormat1", [ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of Substitution table", ), ( "uint16", "DeltaGlyphID", None, None, "Add to original GlyphID modulo 65536 to get substitute GlyphID", ), ], ), ( "SingleSubstFormat2", [ ("uint16", "SubstFormat", None, None, "Format identifier-format = 2"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of Substitution table", ), ( "uint16", "GlyphCount", None, None, "Number of GlyphIDs in the Substitute array", ), ( "GlyphID", "Substitute", "GlyphCount", 0, "Array of substitute GlyphIDs-ordered by Coverage Index", ), ], ), ( "MultipleSubstFormat1", [ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of Substitution table", ), ( "uint16", "SequenceCount", None, None, "Number of Sequence table offsets in the Sequence array", ), ( "Offset", "Sequence", "SequenceCount", 0, "Array of offsets to Sequence tables-from beginning of Substitution table-ordered by Coverage Index", ), ], ), ( "Sequence", [ ( "uint16", "GlyphCount", None, None, "Number of GlyphIDs in the Substitute array. This should always be greater than 0.", ), ( "GlyphID", "Substitute", "GlyphCount", 0, "String of GlyphIDs to substitute", ), ], ), ( "AlternateSubstFormat1", [ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of Substitution table", ), ( "uint16", "AlternateSetCount", None, None, "Number of AlternateSet tables", ), ( "Offset", "AlternateSet", "AlternateSetCount", 0, "Array of offsets to AlternateSet tables-from beginning of Substitution table-ordered by Coverage Index", ), ], ), ( "AlternateSet", [ ( "uint16", "GlyphCount", None, None, "Number of GlyphIDs in the Alternate array", ), ( "GlyphID", "Alternate", "GlyphCount", 0, "Array of alternate GlyphIDs-in arbitrary order", ), ], ), ( "LigatureSubstFormat1", [ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of Substitution table", ), ("uint16", "LigSetCount", None, None, "Number of LigatureSet tables"), ( "Offset", "LigatureSet", "LigSetCount", 0, "Array of offsets to LigatureSet tables-from beginning of Substitution table-ordered by Coverage Index", ), ], ), ( "LigatureSet", [ ("uint16", "LigatureCount", None, None, "Number of Ligature tables"), ( "Offset", "Ligature", "LigatureCount", 0, "Array of offsets to Ligature tables-from beginning of LigatureSet table-ordered by preference", ), ], ), ( "Ligature", [ ("GlyphID", "LigGlyph", None, None, "GlyphID of ligature to substitute"), ("uint16", "CompCount", None, None, "Number of components in the ligature"), ( "GlyphID", "Component", "CompCount", -1, "Array of component GlyphIDs-start with the second component-ordered in writing direction", ), ], ), ( "SubstLookupRecord", [ ( "uint16", "SequenceIndex", None, None, "Index into current glyph sequence-first glyph = 0", ), ( "uint16", "LookupListIndex", None, None, "Lookup to apply to that position-zero-based", ), ], ), ( "ContextSubstFormat1", [ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of Substitution table", ), ( "uint16", "SubRuleSetCount", None, None, "Number of SubRuleSet tables-must equal GlyphCount in Coverage table", ), ( "Offset", "SubRuleSet", "SubRuleSetCount", 0, "Array of offsets to SubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index", ), ], ), ( "SubRuleSet", [ ("uint16", "SubRuleCount", None, None, "Number of SubRule tables"), ( "Offset", "SubRule", "SubRuleCount", 0, "Array of offsets to SubRule tables-from beginning of SubRuleSet table-ordered by preference", ), ], ), ( "SubRule", [ ( "uint16", "GlyphCount", None, None, "Total number of glyphs in input glyph sequence-includes the first glyph", ), ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"), ( "GlyphID", "Input", "GlyphCount", -1, "Array of input GlyphIDs-start with second glyph", ), ( "struct", "SubstLookupRecord", "SubstCount", 0, "Array of SubstLookupRecords-in design order", ), ], ), ( "ContextSubstFormat2", [ ("uint16", "SubstFormat", None, None, "Format identifier-format = 2"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of Substitution table", ), ( "Offset", "ClassDef", None, None, "Offset to glyph ClassDef table-from beginning of Substitution table", ), ("uint16", "SubClassSetCount", None, None, "Number of SubClassSet tables"), ( "Offset", "SubClassSet", "SubClassSetCount", 0, "Array of offsets to SubClassSet tables-from beginning of Substitution table-ordered by class-may be NULL", ), ], ), ( "SubClassSet", [ ( "uint16", "SubClassRuleCount", None, None, "Number of SubClassRule tables", ), ( "Offset", "SubClassRule", "SubClassRuleCount", 0, "Array of offsets to SubClassRule tables-from beginning of SubClassSet-ordered by preference", ), ], ), ( "SubClassRule", [ ( "uint16", "GlyphCount", None, None, "Total number of classes specified for the context in the rule-includes the first class", ), ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"), ( "uint16", "Class", "GlyphCount", -1, "Array of classes-beginning with the second class-to be matched to the input glyph class sequence", ), ( "struct", "SubstLookupRecord", "SubstCount", 0, "Array of Substitution lookups-in design order", ), ], ), ( "ContextSubstFormat3", [ ("uint16", "SubstFormat", None, None, "Format identifier-format = 3"), ( "uint16", "GlyphCount", None, None, "Number of glyphs in the input glyph sequence", ), ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"), ( "Offset", "Coverage", "GlyphCount", 0, "Array of offsets to Coverage table-from beginning of Substitution table-in glyph sequence order", ), ( "struct", "SubstLookupRecord", "SubstCount", 0, "Array of SubstLookupRecords-in design order", ), ], ), ( "ChainContextSubstFormat1", [ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of Substitution table", ), ( "uint16", "ChainSubRuleSetCount", None, None, "Number of ChainSubRuleSet tables-must equal GlyphCount in Coverage table", ), ( "Offset", "ChainSubRuleSet", "ChainSubRuleSetCount", 0, "Array of offsets to ChainSubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index", ), ], ), ( "ChainSubRuleSet", [ ( "uint16", "ChainSubRuleCount", None, None, "Number of ChainSubRule tables", ), ( "Offset", "ChainSubRule", "ChainSubRuleCount", 0, "Array of offsets to ChainSubRule tables-from beginning of ChainSubRuleSet table-ordered by preference", ), ], ), ( "ChainSubRule", [ ( "uint16", "BacktrackGlyphCount", None, None, "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)", ), ( "GlyphID", "Backtrack", "BacktrackGlyphCount", 0, "Array of backtracking GlyphID's (to be matched before the input sequence)", ), ( "uint16", "InputGlyphCount", None, None, "Total number of glyphs in the input sequence (includes the first glyph)", ), ( "GlyphID", "Input", "InputGlyphCount", -1, "Array of input GlyphIDs (start with second glyph)", ), ( "uint16", "LookAheadGlyphCount", None, None, "Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)", ), ( "GlyphID", "LookAhead", "LookAheadGlyphCount", 0, "Array of lookahead GlyphID's (to be matched after the input sequence)", ), ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"), ( "struct", "SubstLookupRecord", "SubstCount", 0, "Array of SubstLookupRecords (in design order)", ), ], ), ( "ChainContextSubstFormat2", [ ("uint16", "SubstFormat", None, None, "Format identifier-format = 2"), ( "Offset", "Coverage", None, None, "Offset to Coverage table-from beginning of Substitution table", ), ( "Offset", "BacktrackClassDef", None, None, "Offset to glyph ClassDef table containing backtrack sequence data-from beginning of Substitution table", ), ( "Offset", "InputClassDef", None, None, "Offset to glyph ClassDef table containing input sequence data-from beginning of Substitution table", ), ( "Offset", "LookAheadClassDef", None, None, "Offset to glyph ClassDef table containing lookahead sequence data-from beginning of Substitution table", ), ( "uint16", "ChainSubClassSetCount", None, None, "Number of ChainSubClassSet tables", ), ( "Offset", "ChainSubClassSet", "ChainSubClassSetCount", 0, "Array of offsets to ChainSubClassSet tables-from beginning of Substitution table-ordered by input class-may be NULL", ), ], ), ( "ChainSubClassSet", [ ( "uint16", "ChainSubClassRuleCount", None, None, "Number of ChainSubClassRule tables", ), ( "Offset", "ChainSubClassRule", "ChainSubClassRuleCount", 0, "Array of offsets to ChainSubClassRule tables-from beginning of ChainSubClassSet-ordered by preference", ), ], ), ( "ChainSubClassRule", [ ( "uint16", "BacktrackGlyphCount", None, None, "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)", ), ( "uint16", "Backtrack", "BacktrackGlyphCount", 0, "Array of backtracking classes(to be matched before the input sequence)", ), ( "uint16", "InputGlyphCount", None, None, "Total number of classes in the input sequence (includes the first class)", ), ( "uint16", "Input", "InputGlyphCount", -1, "Array of input classes(start with second class; to be matched with the input glyph sequence)", ), ( "uint16", "LookAheadGlyphCount", None, None, "Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)", ), ( "uint16", "LookAhead", "LookAheadGlyphCount", 0, "Array of lookahead classes(to be matched after the input sequence)", ), ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"), ( "struct", "SubstLookupRecord", "SubstCount", 0, "Array of SubstLookupRecords (in design order)", ), ], ), ( "ChainContextSubstFormat3", [ ("uint16", "SubstFormat", None, None, "Format identifier-format = 3"), ( "uint16", "BacktrackGlyphCount", None, None, "Number of glyphs in the backtracking sequence", ), ( "Offset", "BacktrackCoverage", "BacktrackGlyphCount", 0, "Array of offsets to coverage tables in backtracking sequence, in glyph sequence order", ), ( "uint16", "InputGlyphCount", None, None, "Number of glyphs in input sequence", ), ( "Offset", "InputCoverage", "InputGlyphCount", 0, "Array of offsets to coverage tables in input sequence, in glyph sequence order", ), ( "uint16", "LookAheadGlyphCount", None, None, "Number of glyphs in lookahead sequence", ), ( "Offset", "LookAheadCoverage", "LookAheadGlyphCount", 0, "Array of offsets to coverage tables in lookahead sequence, in glyph sequence order", ), ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"), ( "struct", "SubstLookupRecord", "SubstCount", 0, "Array of SubstLookupRecords, in design order", ), ], ), ( "ExtensionSubstFormat1", [ ("uint16", "ExtFormat", None, None, "Format identifier. Set to 1."), ( "uint16", "ExtensionLookupType", None, None, "Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).", ), ( "LOffset", "ExtSubTable", None, None, "Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)", ), ], ), ( "ReverseChainSingleSubstFormat1", [ ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), ( "Offset", "Coverage", None, 0, "Offset to Coverage table - from beginning of Substitution table", ), ( "uint16", "BacktrackGlyphCount", None, None, "Number of glyphs in the backtracking sequence", ), ( "Offset", "BacktrackCoverage", "BacktrackGlyphCount", 0, "Array of offsets to coverage tables in backtracking sequence, in glyph sequence order", ), ( "uint16", "LookAheadGlyphCount", None, None, "Number of glyphs in lookahead sequence", ), ( "Offset", "LookAheadCoverage", "LookAheadGlyphCount", 0, "Array of offsets to coverage tables in lookahead sequence, in glyph sequence order", ), ( "uint16", "GlyphCount", None, None, "Number of GlyphIDs in the Substitute array", ), ( "GlyphID", "Substitute", "GlyphCount", 0, "Array of substitute GlyphIDs-ordered by Coverage index", ), ], ), # # gdef # ( "GDEF", [ ( "Version", "Version", None, None, "Version of the GDEF table- 0x00010000, 0x00010002, or 0x00010003", ), ( "Offset", "GlyphClassDef", None, None, "Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)", ), ( "Offset", "AttachList", None, None, "Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)", ), ( "Offset", "LigCaretList", None, None, "Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)", ), ( "Offset", "MarkAttachClassDef", None, None, "Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)", ), ( "Offset", "MarkGlyphSetsDef", None, "Version >= 0x00010002", "Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)", ), ( "LOffset", "VarStore", None, "Version >= 0x00010003", "Offset to variation store (may be NULL)", ), ], ), ( "AttachList", [ ( "Offset", "Coverage", None, None, "Offset to Coverage table - from beginning of AttachList table", ), ( "uint16", "GlyphCount", None, None, "Number of glyphs with attachment points", ), ( "Offset", "AttachPoint", "GlyphCount", 0, "Array of offsets to AttachPoint tables-from beginning of AttachList table-in Coverage Index order", ), ], ), ( "AttachPoint", [ ( "uint16", "PointCount", None, None, "Number of attachment points on this glyph", ), ( "uint16", "PointIndex", "PointCount", 0, "Array of contour point indices -in increasing numerical order", ), ], ), ( "LigCaretList", [ ( "Offset", "Coverage", None, None, "Offset to Coverage table - from beginning of LigCaretList table", ), ("uint16", "LigGlyphCount", None, None, "Number of ligature glyphs"), ( "Offset", "LigGlyph", "LigGlyphCount", 0, "Array of offsets to LigGlyph tables-from beginning of LigCaretList table-in Coverage Index order", ), ], ), ( "LigGlyph", [ ( "uint16", "CaretCount", None, None, "Number of CaretValues for this ligature (components - 1)", ), ( "Offset", "CaretValue", "CaretCount", 0, "Array of offsets to CaretValue tables-from beginning of LigGlyph table-in increasing coordinate order", ), ], ), ( "CaretValueFormat1", [ ("uint16", "CaretValueFormat", None, None, "Format identifier-format = 1"), ("int16", "Coordinate", None, None, "X or Y value, in design units"), ], ), ( "CaretValueFormat2", [ ("uint16", "CaretValueFormat", None, None, "Format identifier-format = 2"), ("uint16", "CaretValuePoint", None, None, "Contour point index on glyph"), ], ), ( "CaretValueFormat3", [ ("uint16", "CaretValueFormat", None, None, "Format identifier-format = 3"), ("int16", "Coordinate", None, None, "X or Y value, in design units"), ( "Offset", "DeviceTable", None, None, "Offset to Device table for X or Y value-from beginning of CaretValue table", ), ], ), ( "MarkGlyphSetsDef", [ ("uint16", "MarkSetTableFormat", None, None, "Format identifier == 1"), ("uint16", "MarkSetCount", None, None, "Number of mark sets defined"), ( "LOffset", "Coverage", "MarkSetCount", 0, "Array of offsets to mark set coverage tables.", ), ], ), # # base # ( "BASE", [ ( "Version", "Version", None, None, "Version of the BASE table-initially 0x00010000", ), ( "Offset", "HorizAxis", None, None, "Offset to horizontal Axis table-from beginning of BASE table-may be NULL", ), ( "Offset", "VertAxis", None, None, "Offset to vertical Axis table-from beginning of BASE table-may be NULL", ), ( "LOffset", "VarStore", None, "Version >= 0x00010001", "Offset to variation store (may be NULL)", ), ], ), ( "Axis", [ ( "Offset", "BaseTagList", None, None, "Offset to BaseTagList table-from beginning of Axis table-may be NULL", ), ( "Offset", "BaseScriptList", None, None, "Offset to BaseScriptList table-from beginning of Axis table", ), ], ), ( "BaseTagList", [ ( "uint16", "BaseTagCount", None, None, "Number of baseline identification tags in this text direction-may be zero (0)", ), ( "Tag", "BaselineTag", "BaseTagCount", 0, "Array of 4-byte baseline identification tags-must be in alphabetical order", ), ], ), ( "BaseScriptList", [ ( "uint16", "BaseScriptCount", None, None, "Number of BaseScriptRecords defined", ), ( "struct", "BaseScriptRecord", "BaseScriptCount", 0, "Array of BaseScriptRecords-in alphabetical order by BaseScriptTag", ), ], ), ( "BaseScriptRecord", [ ("Tag", "BaseScriptTag", None, None, "4-byte script identification tag"), ( "Offset", "BaseScript", None, None, "Offset to BaseScript table-from beginning of BaseScriptList", ), ], ), ( "BaseScript", [ ( "Offset", "BaseValues", None, None, "Offset to BaseValues table-from beginning of BaseScript table-may be NULL", ), ( "Offset", "DefaultMinMax", None, None, "Offset to MinMax table- from beginning of BaseScript table-may be NULL", ), ( "uint16", "BaseLangSysCount", None, None, "Number of BaseLangSysRecords defined-may be zero (0)", ), ( "struct", "BaseLangSysRecord", "BaseLangSysCount", 0, "Array of BaseLangSysRecords-in alphabetical order by BaseLangSysTag", ), ], ), ( "BaseLangSysRecord", [ ( "Tag", "BaseLangSysTag", None, None, "4-byte language system identification tag", ), ( "Offset", "MinMax", None, None, "Offset to MinMax table-from beginning of BaseScript table", ), ], ), ( "BaseValues", [ ( "uint16", "DefaultIndex", None, None, "Index number of default baseline for this script-equals index position of baseline tag in BaselineArray of the BaseTagList", ), ( "uint16", "BaseCoordCount", None, None, "Number of BaseCoord tables defined-should equal BaseTagCount in the BaseTagList", ), ( "Offset", "BaseCoord", "BaseCoordCount", 0, "Array of offsets to BaseCoord-from beginning of BaseValues table-order matches BaselineTag array in the BaseTagList", ), ], ), ( "MinMax", [ ( "Offset", "MinCoord", None, None, "Offset to BaseCoord table-defines minimum extent value-from the beginning of MinMax table-may be NULL", ), ( "Offset", "MaxCoord", None, None, "Offset to BaseCoord table-defines maximum extent value-from the beginning of MinMax table-may be NULL", ), ( "uint16", "FeatMinMaxCount", None, None, "Number of FeatMinMaxRecords-may be zero (0)", ), ( "struct", "FeatMinMaxRecord", "FeatMinMaxCount", 0, "Array of FeatMinMaxRecords-in alphabetical order, by FeatureTableTag", ), ], ), ( "FeatMinMaxRecord", [ ( "Tag", "FeatureTableTag", None, None, "4-byte feature identification tag-must match FeatureTag in FeatureList", ), ( "Offset", "MinCoord", None, None, "Offset to BaseCoord table-defines minimum extent value-from beginning of MinMax table-may be NULL", ), ( "Offset", "MaxCoord", None, None, "Offset to BaseCoord table-defines maximum extent value-from beginning of MinMax table-may be NULL", ), ], ), ( "BaseCoordFormat1", [ ("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 1"), ("int16", "Coordinate", None, None, "X or Y value, in design units"), ], ), ( "BaseCoordFormat2", [ ("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 2"), ("int16", "Coordinate", None, None, "X or Y value, in design units"), ("GlyphID", "ReferenceGlyph", None, None, "GlyphID of control glyph"), ( "uint16", "BaseCoordPoint", None, None, "Index of contour point on the ReferenceGlyph", ), ], ), ( "BaseCoordFormat3", [ ("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 3"), ("int16", "Coordinate", None, None, "X or Y value, in design units"), ( "Offset", "DeviceTable", None, None, "Offset to Device table for X or Y value", ), ], ), # # jstf # ( "JSTF", [ ( "Version", "Version", None, None, "Version of the JSTF table-initially set to 0x00010000", ), ( "uint16", "JstfScriptCount", None, None, "Number of JstfScriptRecords in this table", ), ( "struct", "JstfScriptRecord", "JstfScriptCount", 0, "Array of JstfScriptRecords-in alphabetical order, by JstfScriptTag", ), ], ), ( "JstfScriptRecord", [ ("Tag", "JstfScriptTag", None, None, "4-byte JstfScript identification"), ( "Offset", "JstfScript", None, None, "Offset to JstfScript table-from beginning of JSTF Header", ), ], ), ( "JstfScript", [ ( "Offset", "ExtenderGlyph", None, None, "Offset to ExtenderGlyph table-from beginning of JstfScript table-may be NULL", ), ( "Offset", "DefJstfLangSys", None, None, "Offset to Default JstfLangSys table-from beginning of JstfScript table-may be NULL", ), ( "uint16", "JstfLangSysCount", None, None, "Number of JstfLangSysRecords in this table- may be zero (0)", ), ( "struct", "JstfLangSysRecord", "JstfLangSysCount", 0, "Array of JstfLangSysRecords-in alphabetical order, by JstfLangSysTag", ), ], ), ( "JstfLangSysRecord", [ ("Tag", "JstfLangSysTag", None, None, "4-byte JstfLangSys identifier"), ( "Offset", "JstfLangSys", None, None, "Offset to JstfLangSys table-from beginning of JstfScript table", ), ], ), ( "ExtenderGlyph", [ ( "uint16", "GlyphCount", None, None, "Number of Extender Glyphs in this script", ), ( "GlyphID", "ExtenderGlyph", "GlyphCount", 0, "GlyphIDs-in increasing numerical order", ), ], ), ( "JstfLangSys", [ ( "uint16", "JstfPriorityCount", None, None, "Number of JstfPriority tables", ), ( "Offset", "JstfPriority", "JstfPriorityCount", 0, "Array of offsets to JstfPriority tables-from beginning of JstfLangSys table-in priority order", ), ], ), ( "JstfPriority", [ ( "Offset", "ShrinkageEnableGSUB", None, None, "Offset to Shrinkage Enable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL", ), ( "Offset", "ShrinkageDisableGSUB", None, None, "Offset to Shrinkage Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL", ), ( "Offset", "ShrinkageEnableGPOS", None, None, "Offset to Shrinkage Enable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL", ), ( "Offset", "ShrinkageDisableGPOS", None, None, "Offset to Shrinkage Disable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL", ), ( "Offset", "ShrinkageJstfMax", None, None, "Offset to Shrinkage JstfMax table-from beginning of JstfPriority table -may be NULL", ), ( "Offset", "ExtensionEnableGSUB", None, None, "Offset to Extension Enable JstfGSUBModList table-may be NULL", ), ( "Offset", "ExtensionDisableGSUB", None, None, "Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL", ), ( "Offset", "ExtensionEnableGPOS", None, None, "Offset to Extension Enable JstfGSUBModList table-may be NULL", ), ( "Offset", "ExtensionDisableGPOS", None, None, "Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL", ), ( "Offset", "ExtensionJstfMax", None, None, "Offset to Extension JstfMax table-from beginning of JstfPriority table -may be NULL", ), ], ), ( "JstfGSUBModList", [ ( "uint16", "LookupCount", None, None, "Number of lookups for this modification", ), ( "uint16", "GSUBLookupIndex", "LookupCount", 0, "Array of LookupIndex identifiers in GSUB-in increasing numerical order", ), ], ), ( "JstfGPOSModList", [ ( "uint16", "LookupCount", None, None, "Number of lookups for this modification", ), ( "uint16", "GPOSLookupIndex", "LookupCount", 0, "Array of LookupIndex identifiers in GPOS-in increasing numerical order", ), ], ), ( "JstfMax", [ ( "uint16", "LookupCount", None, None, "Number of lookup Indices for this modification", ), ( "Offset", "Lookup", "LookupCount", 0, "Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order", ), ], ), # # STAT # ( "STAT", [ ( "Version", "Version", None, None, "Version of the table-initially set to 0x00010000, currently 0x00010002.", ), ( "uint16", "DesignAxisRecordSize", None, None, "Size in bytes of each design axis record", ), ("uint16", "DesignAxisCount", None, None, "Number of design axis records"), ( "LOffsetTo(AxisRecordArray)", "DesignAxisRecord", None, None, "Offset in bytes from the beginning of the STAT table to the start of the design axes array", ), ("uint16", "AxisValueCount", None, None, "Number of axis value tables"), ( "LOffsetTo(AxisValueArray)", "AxisValueArray", None, None, "Offset in bytes from the beginning of the STAT table to the start of the axes value offset array", ), ( "NameID", "ElidedFallbackNameID", None, "Version >= 0x00010001", "NameID to use when all style attributes are elided.", ), ], ), ( "AxisRecordArray", [ ("AxisRecord", "Axis", "DesignAxisCount", 0, "Axis records"), ], ), ( "AxisRecord", [ ( "Tag", "AxisTag", None, None, "A tag identifying the axis of design variation", ), ( "NameID", "AxisNameID", None, None, 'The name ID for entries in the "name" table that provide a display string for this axis', ), ( "uint16", "AxisOrdering", None, None, "A value that applications can use to determine primary sorting of face names, or for ordering of descriptors when composing family or face names", ), ( "uint8", "MoreBytes", "DesignAxisRecordSize", -8, "Extra bytes. Set to empty array.", ), ], ), ( "AxisValueArray", [ ("Offset", "AxisValue", "AxisValueCount", 0, "Axis values"), ], ), ( "AxisValueFormat1", [ ("uint16", "Format", None, None, "Format, = 1"), ( "uint16", "AxisIndex", None, None, "Index into the axis record array identifying the axis of design variation to which the axis value record applies.", ), ("STATFlags", "Flags", None, None, "Flags."), ("NameID", "ValueNameID", None, None, ""), ("Fixed", "Value", None, None, ""), ], ), ( "AxisValueFormat2", [ ("uint16", "Format", None, None, "Format, = 2"), ( "uint16", "AxisIndex", None, None, "Index into the axis record array identifying the axis of design variation to which the axis value record applies.", ), ("STATFlags", "Flags", None, None, "Flags."), ("NameID", "ValueNameID", None, None, ""), ("Fixed", "NominalValue", None, None, ""), ("Fixed", "RangeMinValue", None, None, ""), ("Fixed", "RangeMaxValue", None, None, ""), ], ), ( "AxisValueFormat3", [ ("uint16", "Format", None, None, "Format, = 3"), ( "uint16", "AxisIndex", None, None, "Index into the axis record array identifying the axis of design variation to which the axis value record applies.", ), ("STATFlags", "Flags", None, None, "Flags."), ("NameID", "ValueNameID", None, None, ""), ("Fixed", "Value", None, None, ""), ("Fixed", "LinkedValue", None, None, ""), ], ), ( "AxisValueFormat4", [ ("uint16", "Format", None, None, "Format, = 4"), ( "uint16", "AxisCount", None, None, "The total number of axes contributing to this axis-values combination.", ), ("STATFlags", "Flags", None, None, "Flags."), ("NameID", "ValueNameID", None, None, ""), ( "struct", "AxisValueRecord", "AxisCount", 0, "Array of AxisValue records that provide the combination of axis values, one for each contributing axis. ", ), ], ), ( "AxisValueRecord", [ ( "uint16", "AxisIndex", None, None, "Index into the axis record array identifying the axis of design variation to which the axis value record applies.", ), ("Fixed", "Value", None, None, "A numeric value for this attribute value."), ], ), # # Variation fonts # # GSUB/GPOS FeatureVariations ( "FeatureVariations", [ ( "Version", "Version", None, None, "Version of the table-initially set to 0x00010000", ), ( "uint32", "FeatureVariationCount", None, None, "Number of records in the FeatureVariationRecord array", ), ( "struct", "FeatureVariationRecord", "FeatureVariationCount", 0, "Array of FeatureVariationRecord", ), ], ), ( "FeatureVariationRecord", [ ( "LOffset", "ConditionSet", None, None, "Offset to a ConditionSet table, from beginning of the FeatureVariations table.", ), ( "LOffset", "FeatureTableSubstitution", None, None, "Offset to a FeatureTableSubstitution table, from beginning of the FeatureVariations table", ), ], ), ( "ConditionSet", [ ( "uint16", "ConditionCount", None, None, "Number of condition tables in the ConditionTable array", ), ( "LOffset", "ConditionTable", "ConditionCount", 0, "Array of condition tables.", ), ], ), ( "ConditionTableFormat1", [ ("uint16", "Format", None, None, "Format, = 1"), ( "uint16", "AxisIndex", None, None, "Index for the variation axis within the fvar table, base 0.", ), ( "F2Dot14", "FilterRangeMinValue", None, None, "Minimum normalized axis value of the font variation instances that satisfy this condition.", ), ( "F2Dot14", "FilterRangeMaxValue", None, None, "Maximum value that satisfies this condition.", ), ], ), ( "FeatureTableSubstitution", [ ( "Version", "Version", None, None, "Version of the table-initially set to 0x00010000", ), ( "uint16", "SubstitutionCount", None, None, "Number of records in the FeatureVariationRecords array", ), ( "FeatureTableSubstitutionRecord", "SubstitutionRecord", "SubstitutionCount", 0, "Array of FeatureTableSubstitutionRecord", ), ], ), ( "FeatureTableSubstitutionRecord", [ ("uint16", "FeatureIndex", None, None, "The feature table index to match."), ( "LOffset", "Feature", None, None, "Offset to an alternate feature table, from start of the FeatureTableSubstitution table.", ), ], ), # VariationStore ( "VarRegionAxis", [ ("F2Dot14", "StartCoord", None, None, ""), ("F2Dot14", "PeakCoord", None, None, ""), ("F2Dot14", "EndCoord", None, None, ""), ], ), ( "VarRegion", [ ("struct", "VarRegionAxis", "RegionAxisCount", 0, ""), ], ), ( "VarRegionList", [ ("uint16", "RegionAxisCount", None, None, ""), ("uint16", "RegionCount", None, None, ""), ("VarRegion", "Region", "RegionCount", 0, ""), ], ), ( "VarData", [ ("uint16", "ItemCount", None, None, ""), ("uint16", "NumShorts", None, None, ""), ("uint16", "VarRegionCount", None, None, ""), ("uint16", "VarRegionIndex", "VarRegionCount", 0, ""), ("VarDataValue", "Item", "ItemCount", 0, ""), ], ), ( "VarStore", [ ("uint16", "Format", None, None, "Set to 1."), ("LOffset", "VarRegionList", None, None, ""), ("uint16", "VarDataCount", None, None, ""), ("LOffset", "VarData", "VarDataCount", 0, ""), ], ), # Variation helpers ( "VarIdxMap", [ ("uint16", "EntryFormat", None, None, ""), # Automatically computed ("uint16", "MappingCount", None, None, ""), # Automatically computed ("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"), ], ), ( "DeltaSetIndexMapFormat0", [ ("uint8", "Format", None, None, "Format of the DeltaSetIndexMap = 0"), ("uint8", "EntryFormat", None, None, ""), # Automatically computed ("uint16", "MappingCount", None, None, ""), # Automatically computed ("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"), ], ), ( "DeltaSetIndexMapFormat1", [ ("uint8", "Format", None, None, "Format of the DeltaSetIndexMap = 1"), ("uint8", "EntryFormat", None, None, ""), # Automatically computed ("uint32", "MappingCount", None, None, ""), # Automatically computed ("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"), ], ), # Glyph advance variations ( "HVAR", [ ( "Version", "Version", None, None, "Version of the HVAR table-initially = 0x00010000", ), ("LOffset", "VarStore", None, None, ""), ("LOffsetTo(VarIdxMap)", "AdvWidthMap", None, None, ""), ("LOffsetTo(VarIdxMap)", "LsbMap", None, None, ""), ("LOffsetTo(VarIdxMap)", "RsbMap", None, None, ""), ], ), ( "VVAR", [ ( "Version", "Version", None, None, "Version of the VVAR table-initially = 0x00010000", ), ("LOffset", "VarStore", None, None, ""), ("LOffsetTo(VarIdxMap)", "AdvHeightMap", None, None, ""), ("LOffsetTo(VarIdxMap)", "TsbMap", None, None, ""), ("LOffsetTo(VarIdxMap)", "BsbMap", None, None, ""), ("LOffsetTo(VarIdxMap)", "VOrgMap", None, None, "Vertical origin mapping."), ], ), # Font-wide metrics variations ( "MetricsValueRecord", [ ("Tag", "ValueTag", None, None, "4-byte font-wide measure identifier"), ("uint32", "VarIdx", None, None, "Combined outer-inner variation index"), ( "uint8", "MoreBytes", "ValueRecordSize", -8, "Extra bytes. Set to empty array.", ), ], ), ( "MVAR", [ ( "Version", "Version", None, None, "Version of the MVAR table-initially = 0x00010000", ), ("uint16", "Reserved", None, None, "Set to 0"), ("uint16", "ValueRecordSize", None, None, ""), ("uint16", "ValueRecordCount", None, None, ""), ("Offset", "VarStore", None, None, ""), ("MetricsValueRecord", "ValueRecord", "ValueRecordCount", 0, ""), ], ), # # math # ( "MATH", [ ( "Version", "Version", None, None, "Version of the MATH table-initially set to 0x00010000.", ), ( "Offset", "MathConstants", None, None, "Offset to MathConstants table - from the beginning of MATH table.", ), ( "Offset", "MathGlyphInfo", None, None, "Offset to MathGlyphInfo table - from the beginning of MATH table.", ), ( "Offset", "MathVariants", None, None, "Offset to MathVariants table - from the beginning of MATH table.", ), ], ), ( "MathValueRecord", [ ("int16", "Value", None, None, "The X or Y value in design units."), ( "Offset", "DeviceTable", None, None, "Offset to the device table - from the beginning of parent table. May be NULL. Suggested format for device table is 1.", ), ], ), ( "MathConstants", [ ( "int16", "ScriptPercentScaleDown", None, None, "Percentage of scaling down for script level 1. Suggested value: 80%.", ), ( "int16", "ScriptScriptPercentScaleDown", None, None, "Percentage of scaling down for script level 2 (ScriptScript). Suggested value: 60%.", ), ( "uint16", "DelimitedSubFormulaMinHeight", None, None, "Minimum height required for a delimited expression to be treated as a subformula. Suggested value: normal line height x1.5.", ), ( "uint16", "DisplayOperatorMinHeight", None, None, "Minimum height of n-ary operators (such as integral and summation) for formulas in display mode.", ), ( "MathValueRecord", "MathLeading", None, None, "White space to be left between math formulas to ensure proper line spacing. For example, for applications that treat line gap as a part of line ascender, formulas with ink going above (os2.sTypoAscender + os2.sTypoLineGap - MathLeading) or with ink going below os2.sTypoDescender will result in increasing line height.", ), ("MathValueRecord", "AxisHeight", None, None, "Axis height of the font."), ( "MathValueRecord", "AccentBaseHeight", None, None, "Maximum (ink) height of accent base that does not require raising the accents. Suggested: x-height of the font (os2.sxHeight) plus any possible overshots.", ), ( "MathValueRecord", "FlattenedAccentBaseHeight", None, None, "Maximum (ink) height of accent base that does not require flattening the accents. Suggested: cap height of the font (os2.sCapHeight).", ), ( "MathValueRecord", "SubscriptShiftDown", None, None, "The standard shift down applied to subscript elements. Positive for moving in the downward direction. Suggested: os2.ySubscriptYOffset.", ), ( "MathValueRecord", "SubscriptTopMax", None, None, "Maximum allowed height of the (ink) top of subscripts that does not require moving subscripts further down. Suggested: 4/5 x-height.", ), ( "MathValueRecord", "SubscriptBaselineDropMin", None, None, "Minimum allowed drop of the baseline of subscripts relative to the (ink) bottom of the base. Checked for bases that are treated as a box or extended shape. Positive for subscript baseline dropped below the base bottom.", ), ( "MathValueRecord", "SuperscriptShiftUp", None, None, "Standard shift up applied to superscript elements. Suggested: os2.ySuperscriptYOffset.", ), ( "MathValueRecord", "SuperscriptShiftUpCramped", None, None, "Standard shift of superscripts relative to the base, in cramped style.", ), ( "MathValueRecord", "SuperscriptBottomMin", None, None, "Minimum allowed height of the (ink) bottom of superscripts that does not require moving subscripts further up. Suggested: 1/4 x-height.", ), ( "MathValueRecord", "SuperscriptBaselineDropMax", None, None, "Maximum allowed drop of the baseline of superscripts relative to the (ink) top of the base. Checked for bases that are treated as a box or extended shape. Positive for superscript baseline below the base top.", ), ( "MathValueRecord", "SubSuperscriptGapMin", None, None, "Minimum gap between the superscript and subscript ink. Suggested: 4x default rule thickness.", ), ( "MathValueRecord", "SuperscriptBottomMaxWithSubscript", None, None, "The maximum level to which the (ink) bottom of superscript can be pushed to increase the gap between superscript and subscript, before subscript starts being moved down. Suggested: 4/5 x-height.", ), ( "MathValueRecord", "SpaceAfterScript", None, None, "Extra white space to be added after each subscript and superscript. Suggested: 0.5pt for a 12 pt font.", ), ( "MathValueRecord", "UpperLimitGapMin", None, None, "Minimum gap between the (ink) bottom of the upper limit, and the (ink) top of the base operator.", ), ( "MathValueRecord", "UpperLimitBaselineRiseMin", None, None, "Minimum distance between baseline of upper limit and (ink) top of the base operator.", ), ( "MathValueRecord", "LowerLimitGapMin", None, None, "Minimum gap between (ink) top of the lower limit, and (ink) bottom of the base operator.", ), ( "MathValueRecord", "LowerLimitBaselineDropMin", None, None, "Minimum distance between baseline of the lower limit and (ink) bottom of the base operator.", ), ( "MathValueRecord", "StackTopShiftUp", None, None, "Standard shift up applied to the top element of a stack.", ), ( "MathValueRecord", "StackTopDisplayStyleShiftUp", None, None, "Standard shift up applied to the top element of a stack in display style.", ), ( "MathValueRecord", "StackBottomShiftDown", None, None, "Standard shift down applied to the bottom element of a stack. Positive for moving in the downward direction.", ), ( "MathValueRecord", "StackBottomDisplayStyleShiftDown", None, None, "Standard shift down applied to the bottom element of a stack in display style. Positive for moving in the downward direction.", ), ( "MathValueRecord", "StackGapMin", None, None, "Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element. Suggested: 3x default rule thickness.", ), ( "MathValueRecord", "StackDisplayStyleGapMin", None, None, "Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element in display style. Suggested: 7x default rule thickness.", ), ( "MathValueRecord", "StretchStackTopShiftUp", None, None, "Standard shift up applied to the top element of the stretch stack.", ), ( "MathValueRecord", "StretchStackBottomShiftDown", None, None, "Standard shift down applied to the bottom element of the stretch stack. Positive for moving in the downward direction.", ), ( "MathValueRecord", "StretchStackGapAboveMin", None, None, "Minimum gap between the ink of the stretched element, and the (ink) bottom of the element above. Suggested: UpperLimitGapMin", ), ( "MathValueRecord", "StretchStackGapBelowMin", None, None, "Minimum gap between the ink of the stretched element, and the (ink) top of the element below. Suggested: LowerLimitGapMin.", ), ( "MathValueRecord", "FractionNumeratorShiftUp", None, None, "Standard shift up applied to the numerator.", ), ( "MathValueRecord", "FractionNumeratorDisplayStyleShiftUp", None, None, "Standard shift up applied to the numerator in display style. Suggested: StackTopDisplayStyleShiftUp.", ), ( "MathValueRecord", "FractionDenominatorShiftDown", None, None, "Standard shift down applied to the denominator. Positive for moving in the downward direction.", ), ( "MathValueRecord", "FractionDenominatorDisplayStyleShiftDown", None, None, "Standard shift down applied to the denominator in display style. Positive for moving in the downward direction. Suggested: StackBottomDisplayStyleShiftDown.", ), ( "MathValueRecord", "FractionNumeratorGapMin", None, None, "Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar. Suggested: default rule thickness", ), ( "MathValueRecord", "FractionNumDisplayStyleGapMin", None, None, "Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.", ), ( "MathValueRecord", "FractionRuleThickness", None, None, "Thickness of the fraction bar. Suggested: default rule thickness.", ), ( "MathValueRecord", "FractionDenominatorGapMin", None, None, "Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar. Suggested: default rule thickness", ), ( "MathValueRecord", "FractionDenomDisplayStyleGapMin", None, None, "Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.", ), ( "MathValueRecord", "SkewedFractionHorizontalGap", None, None, "Horizontal distance between the top and bottom elements of a skewed fraction.", ), ( "MathValueRecord", "SkewedFractionVerticalGap", None, None, "Vertical distance between the ink of the top and bottom elements of a skewed fraction.", ), ( "MathValueRecord", "OverbarVerticalGap", None, None, "Distance between the overbar and the (ink) top of he base. Suggested: 3x default rule thickness.", ), ( "MathValueRecord", "OverbarRuleThickness", None, None, "Thickness of overbar. Suggested: default rule thickness.", ), ( "MathValueRecord", "OverbarExtraAscender", None, None, "Extra white space reserved above the overbar. Suggested: default rule thickness.", ), ( "MathValueRecord", "UnderbarVerticalGap", None, None, "Distance between underbar and (ink) bottom of the base. Suggested: 3x default rule thickness.", ), ( "MathValueRecord", "UnderbarRuleThickness", None, None, "Thickness of underbar. Suggested: default rule thickness.", ), ( "MathValueRecord", "UnderbarExtraDescender", None, None, "Extra white space reserved below the underbar. Always positive. Suggested: default rule thickness.", ), ( "MathValueRecord", "RadicalVerticalGap", None, None, "Space between the (ink) top of the expression and the bar over it. Suggested: 1 1/4 default rule thickness.", ), ( "MathValueRecord", "RadicalDisplayStyleVerticalGap", None, None, "Space between the (ink) top of the expression and the bar over it. Suggested: default rule thickness + 1/4 x-height.", ), ( "MathValueRecord", "RadicalRuleThickness", None, None, "Thickness of the radical rule. This is the thickness of the rule in designed or constructed radical signs. Suggested: default rule thickness.", ), ( "MathValueRecord", "RadicalExtraAscender", None, None, "Extra white space reserved above the radical. Suggested: RadicalRuleThickness.", ), ( "MathValueRecord", "RadicalKernBeforeDegree", None, None, "Extra horizontal kern before the degree of a radical, if such is present. Suggested: 5/18 of em.", ), ( "MathValueRecord", "RadicalKernAfterDegree", None, None, "Negative kern after the degree of a radical, if such is present. Suggested: 10/18 of em.", ), ( "uint16", "RadicalDegreeBottomRaisePercent", None, None, "Height of the bottom of the radical degree, if such is present, in proportion to the ascender of the radical sign. Suggested: 60%.", ), ], ), ( "MathGlyphInfo", [ ( "Offset", "MathItalicsCorrectionInfo", None, None, "Offset to MathItalicsCorrectionInfo table - from the beginning of MathGlyphInfo table.", ), ( "Offset", "MathTopAccentAttachment", None, None, "Offset to MathTopAccentAttachment table - from the beginning of MathGlyphInfo table.", ), ( "Offset", "ExtendedShapeCoverage", None, None, "Offset to coverage table for Extended Shape glyphs - from the beginning of MathGlyphInfo table. When the left or right glyph of a box is an extended shape variant, the (ink) box (and not the default position defined by values in MathConstants table) should be used for vertical positioning purposes. May be NULL.", ), ( "Offset", "MathKernInfo", None, None, "Offset to MathKernInfo table - from the beginning of MathGlyphInfo table.", ), ], ), ( "MathItalicsCorrectionInfo", [ ( "Offset", "Coverage", None, None, "Offset to Coverage table - from the beginning of MathItalicsCorrectionInfo table.", ), ( "uint16", "ItalicsCorrectionCount", None, None, "Number of italics correction values. Should coincide with the number of covered glyphs.", ), ( "MathValueRecord", "ItalicsCorrection", "ItalicsCorrectionCount", 0, "Array of MathValueRecords defining italics correction values for each covered glyph.", ), ], ), ( "MathTopAccentAttachment", [ ( "Offset", "TopAccentCoverage", None, None, "Offset to Coverage table - from the beginning of MathTopAccentAttachment table.", ), ( "uint16", "TopAccentAttachmentCount", None, None, "Number of top accent attachment point values. Should coincide with the number of covered glyphs", ), ( "MathValueRecord", "TopAccentAttachment", "TopAccentAttachmentCount", 0, "Array of MathValueRecords defining top accent attachment points for each covered glyph", ), ], ), ( "MathKernInfo", [ ( "Offset", "MathKernCoverage", None, None, "Offset to Coverage table - from the beginning of the MathKernInfo table.", ), ("uint16", "MathKernCount", None, None, "Number of MathKernInfoRecords."), ( "MathKernInfoRecord", "MathKernInfoRecords", "MathKernCount", 0, "Array of MathKernInfoRecords, per-glyph information for mathematical positioning of subscripts and superscripts.", ), ], ), ( "MathKernInfoRecord", [ ( "Offset", "TopRightMathKern", None, None, "Offset to MathKern table for top right corner - from the beginning of MathKernInfo table. May be NULL.", ), ( "Offset", "TopLeftMathKern", None, None, "Offset to MathKern table for the top left corner - from the beginning of MathKernInfo table. May be NULL.", ), ( "Offset", "BottomRightMathKern", None, None, "Offset to MathKern table for bottom right corner - from the beginning of MathKernInfo table. May be NULL.", ), ( "Offset", "BottomLeftMathKern", None, None, "Offset to MathKern table for bottom left corner - from the beginning of MathKernInfo table. May be NULL.", ), ], ), ( "MathKern", [ ( "uint16", "HeightCount", None, None, "Number of heights on which the kern value changes.", ), ( "MathValueRecord", "CorrectionHeight", "HeightCount", 0, "Array of correction heights at which the kern value changes. Sorted by the height value in design units.", ), ( "MathValueRecord", "KernValue", "HeightCount", 1, "Array of kern values corresponding to heights. First value is the kern value for all heights less or equal than the first height in this table.Last value is the value to be applied for all heights greater than the last height in this table. Negative values are interpreted as move glyphs closer to each other.", ), ], ), ( "MathVariants", [ ( "uint16", "MinConnectorOverlap", None, None, "Minimum overlap of connecting glyphs during glyph construction, in design units.", ), ( "Offset", "VertGlyphCoverage", None, None, "Offset to Coverage table - from the beginning of MathVariants table.", ), ( "Offset", "HorizGlyphCoverage", None, None, "Offset to Coverage table - from the beginning of MathVariants table.", ), ( "uint16", "VertGlyphCount", None, None, "Number of glyphs for which information is provided for vertically growing variants.", ), ( "uint16", "HorizGlyphCount", None, None, "Number of glyphs for which information is provided for horizontally growing variants.", ), ( "Offset", "VertGlyphConstruction", "VertGlyphCount", 0, "Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in vertical direction.", ), ( "Offset", "HorizGlyphConstruction", "HorizGlyphCount", 0, "Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in horizontal direction.", ), ], ), ( "MathGlyphConstruction", [ ( "Offset", "GlyphAssembly", None, None, "Offset to GlyphAssembly table for this shape - from the beginning of MathGlyphConstruction table. May be NULL", ), ( "uint16", "VariantCount", None, None, "Count of glyph growing variants for this glyph.", ), ( "MathGlyphVariantRecord", "MathGlyphVariantRecord", "VariantCount", 0, "MathGlyphVariantRecords for alternative variants of the glyphs.", ), ], ), ( "MathGlyphVariantRecord", [ ("GlyphID", "VariantGlyph", None, None, "Glyph ID for the variant."), ( "uint16", "AdvanceMeasurement", None, None, "Advance width/height, in design units, of the variant, in the direction of requested glyph extension.", ), ], ), ( "GlyphAssembly", [ ( "MathValueRecord", "ItalicsCorrection", None, None, "Italics correction of this GlyphAssembly. Should not depend on the assembly size.", ), ("uint16", "PartCount", None, None, "Number of parts in this assembly."), ( "GlyphPartRecord", "PartRecords", "PartCount", 0, "Array of part records, from left to right and bottom to top.", ), ], ), ( "GlyphPartRecord", [ ("GlyphID", "glyph", None, None, "Glyph ID for the part."), ( "uint16", "StartConnectorLength", None, None, "Advance width/ height of the straight bar connector material, in design units, is at the beginning of the glyph, in the direction of the extension.", ), ( "uint16", "EndConnectorLength", None, None, "Advance width/ height of the straight bar connector material, in design units, is at the end of the glyph, in the direction of the extension.", ), ( "uint16", "FullAdvance", None, None, "Full advance width/height for this part, in the direction of the extension. In design units.", ), ( "uint16", "PartFlags", None, None, "Part qualifiers. PartFlags enumeration currently uses only one bit: 0x0001 fExtender: If set, the part can be skipped or repeated. 0xFFFE Reserved", ), ], ), ## ## Apple Advanced Typography (AAT) tables ## ( "AATLookupSegment", [ ("uint16", "lastGlyph", None, None, "Last glyph index in this segment."), ("uint16", "firstGlyph", None, None, "First glyph index in this segment."), ( "uint16", "value", None, None, "A 16-bit offset from the start of the table to the data.", ), ], ), # # ankr # ( "ankr", [ ("struct", "AnchorPoints", None, None, "Anchor points table."), ], ), ( "AnchorPointsFormat0", [ ("uint16", "Format", None, None, "Format of the anchor points table, = 0."), ("uint16", "Flags", None, None, "Flags. Currenty unused, set to zero."), ( "AATLookupWithDataOffset(AnchorGlyphData)", "Anchors", None, None, "Table of with anchor overrides for each glyph.", ), ], ), ( "AnchorGlyphData", [ ( "uint32", "AnchorPointCount", None, None, "Number of anchor points for this glyph.", ), ( "struct", "AnchorPoint", "AnchorPointCount", 0, "Individual anchor points.", ), ], ), ( "AnchorPoint", [ ("int16", "XCoordinate", None, None, "X coordinate of this anchor point."), ("int16", "YCoordinate", None, None, "Y coordinate of this anchor point."), ], ), # # bsln # ( "bsln", [ ( "Version", "Version", None, None, "Version number of the AAT baseline table (0x00010000 for the initial version).", ), ("struct", "Baseline", None, None, "Baseline table."), ], ), ( "BaselineFormat0", [ ("uint16", "Format", None, None, "Format of the baseline table, = 0."), ( "uint16", "DefaultBaseline", None, None, "Default baseline value for all glyphs. This value can be from 0 through 31.", ), ( "uint16", "Delta", 32, 0, "These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.", ), ], ), ( "BaselineFormat1", [ ("uint16", "Format", None, None, "Format of the baseline table, = 1."), ( "uint16", "DefaultBaseline", None, None, "Default baseline value for all glyphs. This value can be from 0 through 31.", ), ( "uint16", "Delta", 32, 0, "These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.", ), ( "AATLookup(uint16)", "BaselineValues", None, None, "Lookup table that maps glyphs to their baseline values.", ), ], ), ( "BaselineFormat2", [ ("uint16", "Format", None, None, "Format of the baseline table, = 1."), ( "uint16", "DefaultBaseline", None, None, "Default baseline value for all glyphs. This value can be from 0 through 31.", ), ( "GlyphID", "StandardGlyph", None, None, "Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.", ), ( "uint16", "ControlPoint", 32, 0, "Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.", ), ], ), ( "BaselineFormat3", [ ("uint16", "Format", None, None, "Format of the baseline table, = 1."), ( "uint16", "DefaultBaseline", None, None, "Default baseline value for all glyphs. This value can be from 0 through 31.", ), ( "GlyphID", "StandardGlyph", None, None, "Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.", ), ( "uint16", "ControlPoint", 32, 0, "Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.", ), ( "AATLookup(uint16)", "BaselineValues", None, None, "Lookup table that maps glyphs to their baseline values.", ), ], ), # # cidg # ( "cidg", [ ("struct", "CIDGlyphMapping", None, None, "CID-to-glyph mapping table."), ], ), ( "CIDGlyphMappingFormat0", [ ( "uint16", "Format", None, None, "Format of the CID-to-glyph mapping table, = 0.", ), ("uint16", "DataFormat", None, None, "Currenty unused, set to zero."), ("uint32", "StructLength", None, None, "Size of the table in bytes."), ("uint16", "Registry", None, None, "The registry ID."), ( "char64", "RegistryName", None, None, "The registry name in ASCII; unused bytes should be set to 0.", ), ("uint16", "Order", None, None, "The order ID."), ( "char64", "OrderName", None, None, "The order name in ASCII; unused bytes should be set to 0.", ), ("uint16", "SupplementVersion", None, None, "The supplement version."), ( "CIDGlyphMap", "Mapping", None, None, "A mapping from CIDs to the glyphs in the font, starting with CID 0. If a CID from the identified collection has no glyph in the font, 0xFFFF is used", ), ], ), # # feat # ( "feat", [ ( "Version", "Version", None, None, "Version of the feat table-initially set to 0x00010000.", ), ("FeatureNames", "FeatureNames", None, None, "The feature names."), ], ), ( "FeatureNames", [ ( "uint16", "FeatureNameCount", None, None, "Number of entries in the feature name array.", ), ("uint16", "Reserved1", None, None, "Reserved (set to zero)."), ("uint32", "Reserved2", None, None, "Reserved (set to zero)."), ( "FeatureName", "FeatureName", "FeatureNameCount", 0, "The feature name array.", ), ], ), ( "FeatureName", [ ("uint16", "FeatureType", None, None, "Feature type."), ( "uint16", "SettingsCount", None, None, "The number of records in the setting name array.", ), ( "LOffset", "Settings", None, None, "Offset to setting table for this feature.", ), ( "uint16", "FeatureFlags", None, None, "Single-bit flags associated with the feature type.", ), ( "NameID", "FeatureNameID", None, None, "The name table index for the feature name.", ), ], ), ( "Settings", [ ("Setting", "Setting", "SettingsCount", 0, "The setting array."), ], ), ( "Setting", [ ("uint16", "SettingValue", None, None, "The setting."), ( "NameID", "SettingNameID", None, None, "The name table index for the setting name.", ), ], ), # # gcid # ( "gcid", [ ("struct", "GlyphCIDMapping", None, None, "Glyph to CID mapping table."), ], ), ( "GlyphCIDMappingFormat0", [ ( "uint16", "Format", None, None, "Format of the glyph-to-CID mapping table, = 0.", ), ("uint16", "DataFormat", None, None, "Currenty unused, set to zero."), ("uint32", "StructLength", None, None, "Size of the table in bytes."), ("uint16", "Registry", None, None, "The registry ID."), ( "char64", "RegistryName", None, None, "The registry name in ASCII; unused bytes should be set to 0.", ), ("uint16", "Order", None, None, "The order ID."), ( "char64", "OrderName", None, None, "The order name in ASCII; unused bytes should be set to 0.", ), ("uint16", "SupplementVersion", None, None, "The supplement version."), ( "GlyphCIDMap", "Mapping", None, None, "The CIDs for the glyphs in the font, starting with glyph 0. If a glyph does not correspond to a CID in the identified collection, 0xFFFF is used", ), ], ), # # lcar # ( "lcar", [ ( "Version", "Version", None, None, "Version number of the ligature caret table (0x00010000 for the initial version).", ), ("struct", "LigatureCarets", None, None, "Ligature carets table."), ], ), ( "LigatureCaretsFormat0", [ ( "uint16", "Format", None, None, "Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.", ), ( "AATLookup(LigCaretDistances)", "Carets", None, None, "Lookup table associating ligature glyphs with their caret positions, in font unit distances.", ), ], ), ( "LigatureCaretsFormat1", [ ( "uint16", "Format", None, None, "Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.", ), ( "AATLookup(LigCaretPoints)", "Carets", None, None, "Lookup table associating ligature glyphs with their caret positions, as control points.", ), ], ), ( "LigCaretDistances", [ ("uint16", "DivsionPointCount", None, None, "Number of division points."), ( "int16", "DivisionPoint", "DivsionPointCount", 0, "Distance in font units through which a subdivision is made orthogonally to the baseline.", ), ], ), ( "LigCaretPoints", [ ("uint16", "DivsionPointCount", None, None, "Number of division points."), ( "int16", "DivisionPoint", "DivsionPointCount", 0, "The number of the control point through which a subdivision is made orthogonally to the baseline.", ), ], ), # # mort # ( "mort", [ ("Version", "Version", None, None, "Version of the mort table."), ( "uint32", "MorphChainCount", None, None, "Number of metamorphosis chains.", ), ( "MortChain", "MorphChain", "MorphChainCount", 0, "Array of metamorphosis chains.", ), ], ), ( "MortChain", [ ( "Flags32", "DefaultFlags", None, None, "The default specification for subtables.", ), ( "uint32", "StructLength", None, None, "Total byte count, including this header; must be a multiple of 4.", ), ( "uint16", "MorphFeatureCount", None, None, "Number of metamorphosis feature entries.", ), ( "uint16", "MorphSubtableCount", None, None, "The number of subtables in the chain.", ), ( "struct", "MorphFeature", "MorphFeatureCount", 0, "Array of metamorphosis features.", ), ( "MortSubtable", "MorphSubtable", "MorphSubtableCount", 0, "Array of metamorphosis subtables.", ), ], ), ( "MortSubtable", [ ( "uint16", "StructLength", None, None, "Total subtable length, including this header.", ), ( "uint8", "CoverageFlags", None, None, "Most significant byte of coverage flags.", ), ("uint8", "MorphType", None, None, "Subtable type."), ( "Flags32", "SubFeatureFlags", None, None, "The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).", ), ("SubStruct", "SubStruct", None, None, "SubTable."), ], ), # # morx # ( "morx", [ ("uint16", "Version", None, None, "Version of the morx table."), ("uint16", "Reserved", None, None, "Reserved (set to zero)."), ( "uint32", "MorphChainCount", None, None, "Number of extended metamorphosis chains.", ), ( "MorxChain", "MorphChain", "MorphChainCount", 0, "Array of extended metamorphosis chains.", ), ], ), ( "MorxChain", [ ( "Flags32", "DefaultFlags", None, None, "The default specification for subtables.", ), ( "uint32", "StructLength", None, None, "Total byte count, including this header; must be a multiple of 4.", ), ( "uint32", "MorphFeatureCount", None, None, "Number of feature subtable entries.", ), ( "uint32", "MorphSubtableCount", None, None, "The number of subtables in the chain.", ), ( "MorphFeature", "MorphFeature", "MorphFeatureCount", 0, "Array of metamorphosis features.", ), ( "MorxSubtable", "MorphSubtable", "MorphSubtableCount", 0, "Array of extended metamorphosis subtables.", ), ], ), ( "MorphFeature", [ ("uint16", "FeatureType", None, None, "The type of feature."), ( "uint16", "FeatureSetting", None, None, "The feature's setting (aka selector).", ), ( "Flags32", "EnableFlags", None, None, "Flags for the settings that this feature and setting enables.", ), ( "Flags32", "DisableFlags", None, None, "Complement of flags for the settings that this feature and setting disable.", ), ], ), # Apple TrueType Reference Manual, chapter “The ‘morx’ table”, # section “Metamorphosis Subtables”. # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html ( "MorxSubtable", [ ( "uint32", "StructLength", None, None, "Total subtable length, including this header.", ), ( "uint8", "CoverageFlags", None, None, "Most significant byte of coverage flags.", ), ("uint16", "Reserved", None, None, "Unused."), ("uint8", "MorphType", None, None, "Subtable type."), ( "Flags32", "SubFeatureFlags", None, None, "The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).", ), ("SubStruct", "SubStruct", None, None, "SubTable."), ], ), ( "StateHeader", [ ( "uint32", "ClassCount", None, None, "Number of classes, which is the number of 16-bit entry indices in a single line in the state array.", ), ( "uint32", "MorphClass", None, None, "Offset from the start of this state table header to the start of the class table.", ), ( "uint32", "StateArrayOffset", None, None, "Offset from the start of this state table header to the start of the state array.", ), ( "uint32", "EntryTableOffset", None, None, "Offset from the start of this state table header to the start of the entry table.", ), ], ), ( "RearrangementMorph", [ ( "STXHeader(RearrangementMorphAction)", "StateTable", None, None, "Finite-state transducer table for indic rearrangement.", ), ], ), ( "ContextualMorph", [ ( "STXHeader(ContextualMorphAction)", "StateTable", None, None, "Finite-state transducer for contextual glyph substitution.", ), ], ), ( "LigatureMorph", [ ( "STXHeader(LigatureMorphAction)", "StateTable", None, None, "Finite-state transducer for ligature substitution.", ), ], ), ( "NoncontextualMorph", [ ( "AATLookup(GlyphID)", "Substitution", None, None, "The noncontextual glyph substitution table.", ), ], ), ( "InsertionMorph", [ ( "STXHeader(InsertionMorphAction)", "StateTable", None, None, "Finite-state transducer for glyph insertion.", ), ], ), ( "MorphClass", [ ( "uint16", "FirstGlyph", None, None, "Glyph index of the first glyph in the class table.", ), # ('uint16', 'GlyphCount', None, None, 'Number of glyphs in class table.'), # ('uint8', 'GlyphClass', 'GlyphCount', 0, 'The class codes (indexed by glyph index minus firstGlyph). Class codes range from 0 to the value of stateSize minus 1.'), ], ), # If the 'morx' table version is 3 or greater, then the last subtable in the chain is followed by a subtableGlyphCoverageArray, as described below. # ('Offset', 'MarkGlyphSetsDef', None, 'round(Version*0x10000) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'), # # prop # ( "prop", [ ( "Fixed", "Version", None, None, "Version number of the AAT glyphs property table. Version 1.0 is the initial table version. Version 2.0, which is recognized by macOS 8.5 and later, adds support for the “attaches on right” bit. Version 3.0, which gets recognized by macOS X and iOS, adds support for the additional directional properties defined in Unicode 3.0.", ), ("struct", "GlyphProperties", None, None, "Glyph properties."), ], ), ( "GlyphPropertiesFormat0", [ ("uint16", "Format", None, None, "Format, = 0."), ( "uint16", "DefaultProperties", None, None, "Default properties applied to a glyph. Since there is no lookup table in prop format 0, the default properties get applied to every glyph in the font.", ), ], ), ( "GlyphPropertiesFormat1", [ ("uint16", "Format", None, None, "Format, = 1."), ( "uint16", "DefaultProperties", None, None, "Default properties applied to a glyph if that glyph is not present in the Properties lookup table.", ), ( "AATLookup(uint16)", "Properties", None, None, "Lookup data associating glyphs with their properties.", ), ], ), # # opbd # ( "opbd", [ ( "Version", "Version", None, None, "Version number of the optical bounds table (0x00010000 for the initial version).", ), ("struct", "OpticalBounds", None, None, "Optical bounds table."), ], ), ( "OpticalBoundsFormat0", [ ( "uint16", "Format", None, None, "Format of the optical bounds table, = 0.", ), ( "AATLookup(OpticalBoundsDeltas)", "OpticalBoundsDeltas", None, None, "Lookup table associating glyphs with their optical bounds, given as deltas in font units.", ), ], ), ( "OpticalBoundsFormat1", [ ( "uint16", "Format", None, None, "Format of the optical bounds table, = 1.", ), ( "AATLookup(OpticalBoundsPoints)", "OpticalBoundsPoints", None, None, "Lookup table associating glyphs with their optical bounds, given as references to control points.", ), ], ), ( "OpticalBoundsDeltas", [ ( "int16", "Left", None, None, "Delta value for the left-side optical edge.", ), ("int16", "Top", None, None, "Delta value for the top-side optical edge."), ( "int16", "Right", None, None, "Delta value for the right-side optical edge.", ), ( "int16", "Bottom", None, None, "Delta value for the bottom-side optical edge.", ), ], ), ( "OpticalBoundsPoints", [ ( "int16", "Left", None, None, "Control point index for the left-side optical edge, or -1 if this glyph has none.", ), ( "int16", "Top", None, None, "Control point index for the top-side optical edge, or -1 if this glyph has none.", ), ( "int16", "Right", None, None, "Control point index for the right-side optical edge, or -1 if this glyph has none.", ), ( "int16", "Bottom", None, None, "Control point index for the bottom-side optical edge, or -1 if this glyph has none.", ), ], ), # # TSIC # ( "TSIC", [ ( "Version", "Version", None, None, "Version of table initially set to 0x00010000.", ), ("uint16", "Flags", None, None, "TSIC flags - set to 0"), ("uint16", "AxisCount", None, None, "Axis count from fvar"), ("uint16", "RecordCount", None, None, "TSIC record count"), ("uint16", "Reserved", None, None, "Set to 0"), ("Tag", "AxisArray", "AxisCount", 0, "Array of axis tags in fvar order"), ( "LocationRecord", "RecordLocations", "RecordCount", 0, "Location in variation space of TSIC record", ), ("TSICRecord", "Record", "RecordCount", 0, "Array of TSIC records"), ], ), ( "LocationRecord", [ ("F2Dot14", "Axis", "AxisCount", 0, "Axis record"), ], ), ( "TSICRecord", [ ("uint16", "Flags", None, None, "Record flags - set to 0"), ("uint16", "NumCVTEntries", None, None, "Number of CVT number value pairs"), ("uint16", "NameLength", None, None, "Length of optional user record name"), ("uint16", "NameArray", "NameLength", 0, "Unicode 16 name"), ("uint16", "CVTArray", "NumCVTEntries", 0, "CVT number array"), ("int16", "CVTValueArray", "NumCVTEntries", 0, "CVT value"), ], ), # # COLR # ( "COLR", [ ("uint16", "Version", None, None, "Table version number (starts at 0)."), ( "uint16", "BaseGlyphRecordCount", None, None, "Number of Base Glyph Records.", ), ( "LOffset", "BaseGlyphRecordArray", None, None, "Offset (from beginning of COLR table) to Base Glyph records.", ), ( "LOffset", "LayerRecordArray", None, None, "Offset (from beginning of COLR table) to Layer Records.", ), ("uint16", "LayerRecordCount", None, None, "Number of Layer Records."), ( "LOffset", "BaseGlyphList", None, "Version >= 1", "Offset (from beginning of COLR table) to array of Version-1 Base Glyph records.", ), ( "LOffset", "LayerList", None, "Version >= 1", "Offset (from beginning of COLR table) to LayerList.", ), ( "LOffset", "ClipList", None, "Version >= 1", "Offset to ClipList table (may be NULL)", ), ( "LOffsetTo(DeltaSetIndexMap)", "VarIndexMap", None, "Version >= 1", "Offset to DeltaSetIndexMap table (may be NULL)", ), ( "LOffset", "VarStore", None, "Version >= 1", "Offset to variation store (may be NULL)", ), ], ), ( "BaseGlyphRecordArray", [ ( "BaseGlyphRecord", "BaseGlyphRecord", "BaseGlyphRecordCount", 0, "Base Glyph records.", ), ], ), ( "BaseGlyphRecord", [ ( "GlyphID", "BaseGlyph", None, None, "Glyph ID of reference glyph. This glyph is for reference only and is not rendered for color.", ), ( "uint16", "FirstLayerIndex", None, None, "Index (from beginning of the Layer Records) to the layer record. There will be numLayers consecutive entries for this base glyph.", ), ( "uint16", "NumLayers", None, None, "Number of color layers associated with this glyph.", ), ], ), ( "LayerRecordArray", [ ("LayerRecord", "LayerRecord", "LayerRecordCount", 0, "Layer records."), ], ), ( "LayerRecord", [ ( "GlyphID", "LayerGlyph", None, None, "Glyph ID of layer glyph (must be in z-order from bottom to top).", ), ( "uint16", "PaletteIndex", None, None, "Index value to use with a selected color palette.", ), ], ), ( "BaseGlyphList", [ ( "uint32", "BaseGlyphCount", None, None, "Number of Version-1 Base Glyph records", ), ( "struct", "BaseGlyphPaintRecord", "BaseGlyphCount", 0, "Array of Version-1 Base Glyph records", ), ], ), ( "BaseGlyphPaintRecord", [ ("GlyphID", "BaseGlyph", None, None, "Glyph ID of reference glyph."), ( "LOffset", "Paint", None, None, "Offset (from beginning of BaseGlyphPaintRecord) to Paint, typically a PaintColrLayers.", ), ], ), ( "LayerList", [ ("uint32", "LayerCount", None, None, "Number of Version-1 Layers"), ( "LOffset", "Paint", "LayerCount", 0, "Array of offsets to Paint tables, from the start of the LayerList table.", ), ], ), ( "ClipListFormat1", [ ( "uint8", "Format", None, None, "Format for ClipList with 16bit glyph IDs: 1", ), ("uint32", "ClipCount", None, None, "Number of Clip records."), ( "struct", "ClipRecord", "ClipCount", 0, "Array of Clip records sorted by glyph ID.", ), ], ), ( "ClipRecord", [ ("uint16", "StartGlyphID", None, None, "First glyph ID in the range."), ("uint16", "EndGlyphID", None, None, "Last glyph ID in the range."), ("Offset24", "ClipBox", None, None, "Offset to a ClipBox table."), ], ), ( "ClipBoxFormat1", [ ( "uint8", "Format", None, None, "Format for ClipBox without variation: set to 1.", ), ("int16", "xMin", None, None, "Minimum x of clip box."), ("int16", "yMin", None, None, "Minimum y of clip box."), ("int16", "xMax", None, None, "Maximum x of clip box."), ("int16", "yMax", None, None, "Maximum y of clip box."), ], ), ( "ClipBoxFormat2", [ ("uint8", "Format", None, None, "Format for variable ClipBox: set to 2."), ("int16", "xMin", None, None, "Minimum x of clip box. VarIndexBase + 0."), ("int16", "yMin", None, None, "Minimum y of clip box. VarIndexBase + 1."), ("int16", "xMax", None, None, "Maximum x of clip box. VarIndexBase + 2."), ("int16", "yMax", None, None, "Maximum y of clip box. VarIndexBase + 3."), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # COLRv1 Affine2x3 uses the same column-major order to serialize a 2D # Affine Transformation as the one used by fontTools.misc.transform. # However, for historical reasons, the labels 'xy' and 'yx' are swapped. # Their fundamental meaning is the same though. # COLRv1 Affine2x3 follows the names found in FreeType and Cairo. # In all case, the second element in the 6-tuple correspond to the # y-part of the x basis vector, and the third to the x-part of the y # basis vector. # See https://github.com/googlefonts/colr-gradients-spec/pull/85 ( "Affine2x3", [ ("Fixed", "xx", None, None, "x-part of x basis vector"), ("Fixed", "yx", None, None, "y-part of x basis vector"), ("Fixed", "xy", None, None, "x-part of y basis vector"), ("Fixed", "yy", None, None, "y-part of y basis vector"), ("Fixed", "dx", None, None, "Translation in x direction"), ("Fixed", "dy", None, None, "Translation in y direction"), ], ), ( "VarAffine2x3", [ ("Fixed", "xx", None, None, "x-part of x basis vector. VarIndexBase + 0."), ("Fixed", "yx", None, None, "y-part of x basis vector. VarIndexBase + 1."), ("Fixed", "xy", None, None, "x-part of y basis vector. VarIndexBase + 2."), ("Fixed", "yy", None, None, "y-part of y basis vector. VarIndexBase + 3."), ( "Fixed", "dx", None, None, "Translation in x direction. VarIndexBase + 4.", ), ( "Fixed", "dy", None, None, "Translation in y direction. VarIndexBase + 5.", ), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), ( "ColorStop", [ ("F2Dot14", "StopOffset", None, None, ""), ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."), ("F2Dot14", "Alpha", None, None, "Values outsided [0.,1.] reserved"), ], ), ( "VarColorStop", [ ("F2Dot14", "StopOffset", None, None, "VarIndexBase + 0."), ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."), ( "F2Dot14", "Alpha", None, None, "Values outsided [0.,1.] reserved. VarIndexBase + 1.", ), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), ( "ColorLine", [ ( "ExtendMode", "Extend", None, None, "Enum {PAD = 0, REPEAT = 1, REFLECT = 2}", ), ("uint16", "StopCount", None, None, "Number of Color stops."), ("ColorStop", "ColorStop", "StopCount", 0, "Array of Color stops."), ], ), ( "VarColorLine", [ ( "ExtendMode", "Extend", None, None, "Enum {PAD = 0, REPEAT = 1, REFLECT = 2}", ), ("uint16", "StopCount", None, None, "Number of Color stops."), ("VarColorStop", "ColorStop", "StopCount", 0, "Array of Color stops."), ], ), # PaintColrLayers ( "PaintFormat1", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 1"), ( "uint8", "NumLayers", None, None, "Number of offsets to Paint to read from LayerList.", ), ("uint32", "FirstLayerIndex", None, None, "Index into LayerList."), ], ), # PaintSolid ( "PaintFormat2", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 2"), ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."), ("F2Dot14", "Alpha", None, None, "Values outsided [0.,1.] reserved"), ], ), # PaintVarSolid ( "PaintFormat3", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 3"), ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."), ( "F2Dot14", "Alpha", None, None, "Values outsided [0.,1.] reserved. VarIndexBase + 0.", ), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # PaintLinearGradient ( "PaintFormat4", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 4"), ( "Offset24", "ColorLine", None, None, "Offset (from beginning of PaintLinearGradient table) to ColorLine subtable.", ), ("int16", "x0", None, None, ""), ("int16", "y0", None, None, ""), ("int16", "x1", None, None, ""), ("int16", "y1", None, None, ""), ("int16", "x2", None, None, ""), ("int16", "y2", None, None, ""), ], ), # PaintVarLinearGradient ( "PaintFormat5", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 5"), ( "LOffset24To(VarColorLine)", "ColorLine", None, None, "Offset (from beginning of PaintVarLinearGradient table) to VarColorLine subtable.", ), ("int16", "x0", None, None, "VarIndexBase + 0."), ("int16", "y0", None, None, "VarIndexBase + 1."), ("int16", "x1", None, None, "VarIndexBase + 2."), ("int16", "y1", None, None, "VarIndexBase + 3."), ("int16", "x2", None, None, "VarIndexBase + 4."), ("int16", "y2", None, None, "VarIndexBase + 5."), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # PaintRadialGradient ( "PaintFormat6", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 6"), ( "Offset24", "ColorLine", None, None, "Offset (from beginning of PaintRadialGradient table) to ColorLine subtable.", ), ("int16", "x0", None, None, ""), ("int16", "y0", None, None, ""), ("uint16", "r0", None, None, ""), ("int16", "x1", None, None, ""), ("int16", "y1", None, None, ""), ("uint16", "r1", None, None, ""), ], ), # PaintVarRadialGradient ( "PaintFormat7", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 7"), ( "LOffset24To(VarColorLine)", "ColorLine", None, None, "Offset (from beginning of PaintVarRadialGradient table) to VarColorLine subtable.", ), ("int16", "x0", None, None, "VarIndexBase + 0."), ("int16", "y0", None, None, "VarIndexBase + 1."), ("uint16", "r0", None, None, "VarIndexBase + 2."), ("int16", "x1", None, None, "VarIndexBase + 3."), ("int16", "y1", None, None, "VarIndexBase + 4."), ("uint16", "r1", None, None, "VarIndexBase + 5."), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # PaintSweepGradient ( "PaintFormat8", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 8"), ( "Offset24", "ColorLine", None, None, "Offset (from beginning of PaintSweepGradient table) to ColorLine subtable.", ), ("int16", "centerX", None, None, "Center x coordinate."), ("int16", "centerY", None, None, "Center y coordinate."), ( "BiasedAngle", "startAngle", None, None, "Start of the angular range of the gradient.", ), ( "BiasedAngle", "endAngle", None, None, "End of the angular range of the gradient.", ), ], ), # PaintVarSweepGradient ( "PaintFormat9", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 9"), ( "LOffset24To(VarColorLine)", "ColorLine", None, None, "Offset (from beginning of PaintVarSweepGradient table) to VarColorLine subtable.", ), ("int16", "centerX", None, None, "Center x coordinate. VarIndexBase + 0."), ("int16", "centerY", None, None, "Center y coordinate. VarIndexBase + 1."), ( "BiasedAngle", "startAngle", None, None, "Start of the angular range of the gradient. VarIndexBase + 2.", ), ( "BiasedAngle", "endAngle", None, None, "End of the angular range of the gradient. VarIndexBase + 3.", ), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # PaintGlyph ( "PaintFormat10", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 10"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintGlyph table) to Paint subtable.", ), ("GlyphID", "Glyph", None, None, "Glyph ID for the source outline."), ], ), # PaintColrGlyph ( "PaintFormat11", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 11"), ( "GlyphID", "Glyph", None, None, "Virtual glyph ID for a BaseGlyphList base glyph.", ), ], ), # PaintTransform ( "PaintFormat12", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 12"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintTransform table) to Paint subtable.", ), ( "LOffset24To(Affine2x3)", "Transform", None, None, "2x3 matrix for 2D affine transformations.", ), ], ), # PaintVarTransform ( "PaintFormat13", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 13"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintVarTransform table) to Paint subtable.", ), ( "LOffset24To(VarAffine2x3)", "Transform", None, None, "2x3 matrix for 2D affine transformations.", ), ], ), # PaintTranslate ( "PaintFormat14", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 14"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintTranslate table) to Paint subtable.", ), ("int16", "dx", None, None, "Translation in x direction."), ("int16", "dy", None, None, "Translation in y direction."), ], ), # PaintVarTranslate ( "PaintFormat15", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 15"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintVarTranslate table) to Paint subtable.", ), ( "int16", "dx", None, None, "Translation in x direction. VarIndexBase + 0.", ), ( "int16", "dy", None, None, "Translation in y direction. VarIndexBase + 1.", ), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # PaintScale ( "PaintFormat16", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 16"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintScale table) to Paint subtable.", ), ("F2Dot14", "scaleX", None, None, ""), ("F2Dot14", "scaleY", None, None, ""), ], ), # PaintVarScale ( "PaintFormat17", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 17"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintVarScale table) to Paint subtable.", ), ("F2Dot14", "scaleX", None, None, "VarIndexBase + 0."), ("F2Dot14", "scaleY", None, None, "VarIndexBase + 1."), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # PaintScaleAroundCenter ( "PaintFormat18", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 18"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintScaleAroundCenter table) to Paint subtable.", ), ("F2Dot14", "scaleX", None, None, ""), ("F2Dot14", "scaleY", None, None, ""), ("int16", "centerX", None, None, ""), ("int16", "centerY", None, None, ""), ], ), # PaintVarScaleAroundCenter ( "PaintFormat19", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 19"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintVarScaleAroundCenter table) to Paint subtable.", ), ("F2Dot14", "scaleX", None, None, "VarIndexBase + 0."), ("F2Dot14", "scaleY", None, None, "VarIndexBase + 1."), ("int16", "centerX", None, None, "VarIndexBase + 2."), ("int16", "centerY", None, None, "VarIndexBase + 3."), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # PaintScaleUniform ( "PaintFormat20", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 20"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintScaleUniform table) to Paint subtable.", ), ("F2Dot14", "scale", None, None, ""), ], ), # PaintVarScaleUniform ( "PaintFormat21", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 21"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintVarScaleUniform table) to Paint subtable.", ), ("F2Dot14", "scale", None, None, "VarIndexBase + 0."), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # PaintScaleUniformAroundCenter ( "PaintFormat22", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 22"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintScaleUniformAroundCenter table) to Paint subtable.", ), ("F2Dot14", "scale", None, None, ""), ("int16", "centerX", None, None, ""), ("int16", "centerY", None, None, ""), ], ), # PaintVarScaleUniformAroundCenter ( "PaintFormat23", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 23"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintVarScaleUniformAroundCenter table) to Paint subtable.", ), ("F2Dot14", "scale", None, None, "VarIndexBase + 0"), ("int16", "centerX", None, None, "VarIndexBase + 1"), ("int16", "centerY", None, None, "VarIndexBase + 2"), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # PaintRotate ( "PaintFormat24", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 24"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintRotate table) to Paint subtable.", ), ("Angle", "angle", None, None, ""), ], ), # PaintVarRotate ( "PaintFormat25", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 25"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintVarRotate table) to Paint subtable.", ), ("Angle", "angle", None, None, "VarIndexBase + 0."), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # PaintRotateAroundCenter ( "PaintFormat26", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 26"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintRotateAroundCenter table) to Paint subtable.", ), ("Angle", "angle", None, None, ""), ("int16", "centerX", None, None, ""), ("int16", "centerY", None, None, ""), ], ), # PaintVarRotateAroundCenter ( "PaintFormat27", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 27"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintVarRotateAroundCenter table) to Paint subtable.", ), ("Angle", "angle", None, None, "VarIndexBase + 0."), ("int16", "centerX", None, None, "VarIndexBase + 1."), ("int16", "centerY", None, None, "VarIndexBase + 2."), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # PaintSkew ( "PaintFormat28", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 28"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintSkew table) to Paint subtable.", ), ("Angle", "xSkewAngle", None, None, ""), ("Angle", "ySkewAngle", None, None, ""), ], ), # PaintVarSkew ( "PaintFormat29", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 29"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintVarSkew table) to Paint subtable.", ), ("Angle", "xSkewAngle", None, None, "VarIndexBase + 0."), ("Angle", "ySkewAngle", None, None, "VarIndexBase + 1."), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # PaintSkewAroundCenter ( "PaintFormat30", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 30"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintSkewAroundCenter table) to Paint subtable.", ), ("Angle", "xSkewAngle", None, None, ""), ("Angle", "ySkewAngle", None, None, ""), ("int16", "centerX", None, None, ""), ("int16", "centerY", None, None, ""), ], ), # PaintVarSkewAroundCenter ( "PaintFormat31", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 31"), ( "Offset24", "Paint", None, None, "Offset (from beginning of PaintVarSkewAroundCenter table) to Paint subtable.", ), ("Angle", "xSkewAngle", None, None, "VarIndexBase + 0."), ("Angle", "ySkewAngle", None, None, "VarIndexBase + 1."), ("int16", "centerX", None, None, "VarIndexBase + 2."), ("int16", "centerY", None, None, "VarIndexBase + 3."), ( "VarIndex", "VarIndexBase", None, None, "Base index into DeltaSetIndexMap.", ), ], ), # PaintComposite ( "PaintFormat32", [ ("uint8", "PaintFormat", None, None, "Format identifier-format = 32"), ( "LOffset24To(Paint)", "SourcePaint", None, None, "Offset (from beginning of PaintComposite table) to source Paint subtable.", ), ( "CompositeMode", "CompositeMode", None, None, "A CompositeMode enumeration value.", ), ( "LOffset24To(Paint)", "BackdropPaint", None, None, "Offset (from beginning of PaintComposite table) to backdrop Paint subtable.", ), ], ), # # avar # ( "AxisValueMap", [ ( "F2Dot14", "FromCoordinate", None, None, "A normalized coordinate value obtained using default normalization", ), ( "F2Dot14", "ToCoordinate", None, None, "The modified, normalized coordinate value", ), ], ), ( "AxisSegmentMap", [ ( "uint16", "PositionMapCount", None, None, "The number of correspondence pairs for this axis", ), ( "AxisValueMap", "AxisValueMap", "PositionMapCount", 0, "The array of axis value map records for this axis", ), ], ), ( "avar", [ ( "Version", "Version", None, None, "Version of the avar table- 0x00010000 or 0x00020000", ), ("uint16", "Reserved", None, None, "Permanently reserved; set to zero"), ( "uint16", "AxisCount", None, None, 'The number of variation axes for this font. This must be the same number as axisCount in the "fvar" table', ), ( "AxisSegmentMap", "AxisSegmentMap", "AxisCount", 0, 'The segment maps array — one segment map for each axis, in the order of axes specified in the "fvar" table', ), ( "LOffsetTo(DeltaSetIndexMap)", "VarIdxMap", None, "Version >= 0x00020000", "", ), ("LOffset", "VarStore", None, "Version >= 0x00020000", ""), ], ), ] PKaZZZ���YYEYE"fontTools/ttLib/tables/otTables.py# coding: utf-8 """fontTools.ttLib.tables.otTables -- A collection of classes representing the various OpenType subtables. Most are constructed upon import from data in otData.py, all are populated with converter objects from otConverters.py. """ import copy from enum import IntEnum from functools import reduce from math import radians import itertools from collections import defaultdict, namedtuple from fontTools.ttLib.tables.otTraverse import dfs_base_table from fontTools.misc.arrayTools import quantizeRect from fontTools.misc.roundTools import otRound from fontTools.misc.transform import Transform, Identity from fontTools.misc.textTools import bytesjoin, pad, safeEval from fontTools.pens.boundsPen import ControlBoundsPen from fontTools.pens.transformPen import TransformPen from .otBase import ( BaseTable, FormatSwitchingBaseTable, ValueRecord, CountReference, getFormatSwitchingBaseTableClass, ) from fontTools.feaLib.lookupDebugInfo import LookupDebugInfo, LOOKUP_DEBUG_INFO_KEY import logging import struct from typing import TYPE_CHECKING, Iterator, List, Optional, Set if TYPE_CHECKING: from fontTools.ttLib.ttGlyphSet import _TTGlyphSet log = logging.getLogger(__name__) class AATStateTable(object): def __init__(self): self.GlyphClasses = {} # GlyphID --> GlyphClass self.States = [] # List of AATState, indexed by state number self.PerGlyphLookups = [] # [{GlyphID:GlyphID}, ...] class AATState(object): def __init__(self): self.Transitions = {} # GlyphClass --> AATAction class AATAction(object): _FLAGS = None @staticmethod def compileActions(font, states): return (None, None) def _writeFlagsToXML(self, xmlWriter): flags = [f for f in self._FLAGS if self.__dict__[f]] if flags: xmlWriter.simpletag("Flags", value=",".join(flags)) xmlWriter.newline() if self.ReservedFlags != 0: xmlWriter.simpletag("ReservedFlags", value="0x%04X" % self.ReservedFlags) xmlWriter.newline() def _setFlag(self, flag): assert flag in self._FLAGS, "unsupported flag %s" % flag self.__dict__[flag] = True class RearrangementMorphAction(AATAction): staticSize = 4 actionHeaderSize = 0 _FLAGS = ["MarkFirst", "DontAdvance", "MarkLast"] _VERBS = { 0: "no change", 1: "Ax ⇒ xA", 2: "xD ⇒ Dx", 3: "AxD ⇒ DxA", 4: "ABx ⇒ xAB", 5: "ABx ⇒ xBA", 6: "xCD ⇒ CDx", 7: "xCD ⇒ DCx", 8: "AxCD ⇒ CDxA", 9: "AxCD ⇒ DCxA", 10: "ABxD ⇒ DxAB", 11: "ABxD ⇒ DxBA", 12: "ABxCD ⇒ CDxAB", 13: "ABxCD ⇒ CDxBA", 14: "ABxCD ⇒ DCxAB", 15: "ABxCD ⇒ DCxBA", } def __init__(self): self.NewState = 0 self.Verb = 0 self.MarkFirst = False self.DontAdvance = False self.MarkLast = False self.ReservedFlags = 0 def compile(self, writer, font, actionIndex): assert actionIndex is None writer.writeUShort(self.NewState) assert self.Verb >= 0 and self.Verb <= 15, self.Verb flags = self.Verb | self.ReservedFlags if self.MarkFirst: flags |= 0x8000 if self.DontAdvance: flags |= 0x4000 if self.MarkLast: flags |= 0x2000 writer.writeUShort(flags) def decompile(self, reader, font, actionReader): assert actionReader is None self.NewState = reader.readUShort() flags = reader.readUShort() self.Verb = flags & 0xF self.MarkFirst = bool(flags & 0x8000) self.DontAdvance = bool(flags & 0x4000) self.MarkLast = bool(flags & 0x2000) self.ReservedFlags = flags & 0x1FF0 def toXML(self, xmlWriter, font, attrs, name): xmlWriter.begintag(name, **attrs) xmlWriter.newline() xmlWriter.simpletag("NewState", value=self.NewState) xmlWriter.newline() self._writeFlagsToXML(xmlWriter) xmlWriter.simpletag("Verb", value=self.Verb) verbComment = self._VERBS.get(self.Verb) if verbComment is not None: xmlWriter.comment(verbComment) xmlWriter.newline() xmlWriter.endtag(name) xmlWriter.newline() def fromXML(self, name, attrs, content, font): self.NewState = self.Verb = self.ReservedFlags = 0 self.MarkFirst = self.DontAdvance = self.MarkLast = False content = [t for t in content if isinstance(t, tuple)] for eltName, eltAttrs, eltContent in content: if eltName == "NewState": self.NewState = safeEval(eltAttrs["value"]) elif eltName == "Verb": self.Verb = safeEval(eltAttrs["value"]) elif eltName == "ReservedFlags": self.ReservedFlags = safeEval(eltAttrs["value"]) elif eltName == "Flags": for flag in eltAttrs["value"].split(","): self._setFlag(flag.strip()) class ContextualMorphAction(AATAction): staticSize = 8 actionHeaderSize = 0 _FLAGS = ["SetMark", "DontAdvance"] def __init__(self): self.NewState = 0 self.SetMark, self.DontAdvance = False, False self.ReservedFlags = 0 self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF def compile(self, writer, font, actionIndex): assert actionIndex is None writer.writeUShort(self.NewState) flags = self.ReservedFlags if self.SetMark: flags |= 0x8000 if self.DontAdvance: flags |= 0x4000 writer.writeUShort(flags) writer.writeUShort(self.MarkIndex) writer.writeUShort(self.CurrentIndex) def decompile(self, reader, font, actionReader): assert actionReader is None self.NewState = reader.readUShort() flags = reader.readUShort() self.SetMark = bool(flags & 0x8000) self.DontAdvance = bool(flags & 0x4000) self.ReservedFlags = flags & 0x3FFF self.MarkIndex = reader.readUShort() self.CurrentIndex = reader.readUShort() def toXML(self, xmlWriter, font, attrs, name): xmlWriter.begintag(name, **attrs) xmlWriter.newline() xmlWriter.simpletag("NewState", value=self.NewState) xmlWriter.newline() self._writeFlagsToXML(xmlWriter) xmlWriter.simpletag("MarkIndex", value=self.MarkIndex) xmlWriter.newline() xmlWriter.simpletag("CurrentIndex", value=self.CurrentIndex) xmlWriter.newline() xmlWriter.endtag(name) xmlWriter.newline() def fromXML(self, name, attrs, content, font): self.NewState = self.ReservedFlags = 0 self.SetMark = self.DontAdvance = False self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF content = [t for t in content if isinstance(t, tuple)] for eltName, eltAttrs, eltContent in content: if eltName == "NewState": self.NewState = safeEval(eltAttrs["value"]) elif eltName == "Flags": for flag in eltAttrs["value"].split(","): self._setFlag(flag.strip()) elif eltName == "ReservedFlags": self.ReservedFlags = safeEval(eltAttrs["value"]) elif eltName == "MarkIndex": self.MarkIndex = safeEval(eltAttrs["value"]) elif eltName == "CurrentIndex": self.CurrentIndex = safeEval(eltAttrs["value"]) class LigAction(object): def __init__(self): self.Store = False # GlyphIndexDelta is a (possibly negative) delta that gets # added to the glyph ID at the top of the AAT runtime # execution stack. It is *not* a byte offset into the # morx table. The result of the addition, which is performed # at run time by the shaping engine, is an index into # the ligature components table. See 'morx' specification. # In the AAT specification, this field is called Offset; # but its meaning is quite different from other offsets # in either AAT or OpenType, so we use a different name. self.GlyphIndexDelta = 0 class LigatureMorphAction(AATAction): staticSize = 6 # 4 bytes for each of {action,ligComponents,ligatures}Offset actionHeaderSize = 12 _FLAGS = ["SetComponent", "DontAdvance"] def __init__(self): self.NewState = 0 self.SetComponent, self.DontAdvance = False, False self.ReservedFlags = 0 self.Actions = [] def compile(self, writer, font, actionIndex): assert actionIndex is not None writer.writeUShort(self.NewState) flags = self.ReservedFlags if self.SetComponent: flags |= 0x8000 if self.DontAdvance: flags |= 0x4000 if len(self.Actions) > 0: flags |= 0x2000 writer.writeUShort(flags) if len(self.Actions) > 0: actions = self.compileLigActions() writer.writeUShort(actionIndex[actions]) else: writer.writeUShort(0) def decompile(self, reader, font, actionReader): assert actionReader is not None self.NewState = reader.readUShort() flags = reader.readUShort() self.SetComponent = bool(flags & 0x8000) self.DontAdvance = bool(flags & 0x4000) performAction = bool(flags & 0x2000) # As of 2017-09-12, the 'morx' specification says that # the reserved bitmask in ligature subtables is 0x3FFF. # However, the specification also defines a flag 0x2000, # so the reserved value should actually be 0x1FFF. # TODO: Report this specification bug to Apple. self.ReservedFlags = flags & 0x1FFF actionIndex = reader.readUShort() if performAction: self.Actions = self._decompileLigActions(actionReader, actionIndex) else: self.Actions = [] @staticmethod def compileActions(font, states): result, actions, actionIndex = b"", set(), {} for state in states: for _glyphClass, trans in state.Transitions.items(): actions.add(trans.compileLigActions()) # Sort the compiled actions in decreasing order of # length, so that the longer sequence come before the # shorter ones. For each compiled action ABCD, its # suffixes BCD, CD, and D do not be encoded separately # (in case they occur); instead, we can just store an # index that points into the middle of the longer # sequence. Every compiled AAT ligature sequence is # terminated with an end-of-sequence flag, which can # only be set on the last element of the sequence. # Therefore, it is sufficient to consider just the # suffixes. for a in sorted(actions, key=lambda x: (-len(x), x)): if a not in actionIndex: for i in range(0, len(a), 4): suffix = a[i:] suffixIndex = (len(result) + i) // 4 actionIndex.setdefault(suffix, suffixIndex) result += a result = pad(result, 4) return (result, actionIndex) def compileLigActions(self): result = [] for i, action in enumerate(self.Actions): last = i == len(self.Actions) - 1 value = action.GlyphIndexDelta & 0x3FFFFFFF value |= 0x80000000 if last else 0 value |= 0x40000000 if action.Store else 0 result.append(struct.pack(">L", value)) return bytesjoin(result) def _decompileLigActions(self, actionReader, actionIndex): actions = [] last = False reader = actionReader.getSubReader(actionReader.pos + actionIndex * 4) while not last: value = reader.readULong() last = bool(value & 0x80000000) action = LigAction() actions.append(action) action.Store = bool(value & 0x40000000) delta = value & 0x3FFFFFFF if delta >= 0x20000000: # sign-extend 30-bit value delta = -0x40000000 + delta action.GlyphIndexDelta = delta return actions def fromXML(self, name, attrs, content, font): self.NewState = self.ReservedFlags = 0 self.SetComponent = self.DontAdvance = False self.ReservedFlags = 0 self.Actions = [] content = [t for t in content if isinstance(t, tuple)] for eltName, eltAttrs, eltContent in content: if eltName == "NewState": self.NewState = safeEval(eltAttrs["value"]) elif eltName == "Flags": for flag in eltAttrs["value"].split(","): self._setFlag(flag.strip()) elif eltName == "ReservedFlags": self.ReservedFlags = safeEval(eltAttrs["value"]) elif eltName == "Action": action = LigAction() flags = eltAttrs.get("Flags", "").split(",") flags = [f.strip() for f in flags] action.Store = "Store" in flags action.GlyphIndexDelta = safeEval(eltAttrs["GlyphIndexDelta"]) self.Actions.append(action) def toXML(self, xmlWriter, font, attrs, name): xmlWriter.begintag(name, **attrs) xmlWriter.newline() xmlWriter.simpletag("NewState", value=self.NewState) xmlWriter.newline() self._writeFlagsToXML(xmlWriter) for action in self.Actions: attribs = [("GlyphIndexDelta", action.GlyphIndexDelta)] if action.Store: attribs.append(("Flags", "Store")) xmlWriter.simpletag("Action", attribs) xmlWriter.newline() xmlWriter.endtag(name) xmlWriter.newline() class InsertionMorphAction(AATAction): staticSize = 8 actionHeaderSize = 4 # 4 bytes for actionOffset _FLAGS = [ "SetMark", "DontAdvance", "CurrentIsKashidaLike", "MarkedIsKashidaLike", "CurrentInsertBefore", "MarkedInsertBefore", ] def __init__(self): self.NewState = 0 for flag in self._FLAGS: setattr(self, flag, False) self.ReservedFlags = 0 self.CurrentInsertionAction, self.MarkedInsertionAction = [], [] def compile(self, writer, font, actionIndex): assert actionIndex is not None writer.writeUShort(self.NewState) flags = self.ReservedFlags if self.SetMark: flags |= 0x8000 if self.DontAdvance: flags |= 0x4000 if self.CurrentIsKashidaLike: flags |= 0x2000 if self.MarkedIsKashidaLike: flags |= 0x1000 if self.CurrentInsertBefore: flags |= 0x0800 if self.MarkedInsertBefore: flags |= 0x0400 flags |= len(self.CurrentInsertionAction) << 5 flags |= len(self.MarkedInsertionAction) writer.writeUShort(flags) if len(self.CurrentInsertionAction) > 0: currentIndex = actionIndex[tuple(self.CurrentInsertionAction)] else: currentIndex = 0xFFFF writer.writeUShort(currentIndex) if len(self.MarkedInsertionAction) > 0: markedIndex = actionIndex[tuple(self.MarkedInsertionAction)] else: markedIndex = 0xFFFF writer.writeUShort(markedIndex) def decompile(self, reader, font, actionReader): assert actionReader is not None self.NewState = reader.readUShort() flags = reader.readUShort() self.SetMark = bool(flags & 0x8000) self.DontAdvance = bool(flags & 0x4000) self.CurrentIsKashidaLike = bool(flags & 0x2000) self.MarkedIsKashidaLike = bool(flags & 0x1000) self.CurrentInsertBefore = bool(flags & 0x0800) self.MarkedInsertBefore = bool(flags & 0x0400) self.CurrentInsertionAction = self._decompileInsertionAction( actionReader, font, index=reader.readUShort(), count=((flags & 0x03E0) >> 5) ) self.MarkedInsertionAction = self._decompileInsertionAction( actionReader, font, index=reader.readUShort(), count=(flags & 0x001F) ) def _decompileInsertionAction(self, actionReader, font, index, count): if index == 0xFFFF or count == 0: return [] reader = actionReader.getSubReader(actionReader.pos + index * 2) return font.getGlyphNameMany(reader.readUShortArray(count)) def toXML(self, xmlWriter, font, attrs, name): xmlWriter.begintag(name, **attrs) xmlWriter.newline() xmlWriter.simpletag("NewState", value=self.NewState) xmlWriter.newline() self._writeFlagsToXML(xmlWriter) for g in self.CurrentInsertionAction: xmlWriter.simpletag("CurrentInsertionAction", glyph=g) xmlWriter.newline() for g in self.MarkedInsertionAction: xmlWriter.simpletag("MarkedInsertionAction", glyph=g) xmlWriter.newline() xmlWriter.endtag(name) xmlWriter.newline() def fromXML(self, name, attrs, content, font): self.__init__() content = [t for t in content if isinstance(t, tuple)] for eltName, eltAttrs, eltContent in content: if eltName == "NewState": self.NewState = safeEval(eltAttrs["value"]) elif eltName == "Flags": for flag in eltAttrs["value"].split(","): self._setFlag(flag.strip()) elif eltName == "CurrentInsertionAction": self.CurrentInsertionAction.append(eltAttrs["glyph"]) elif eltName == "MarkedInsertionAction": self.MarkedInsertionAction.append(eltAttrs["glyph"]) else: assert False, eltName @staticmethod def compileActions(font, states): actions, actionIndex, result = set(), {}, b"" for state in states: for _glyphClass, trans in state.Transitions.items(): if trans.CurrentInsertionAction is not None: actions.add(tuple(trans.CurrentInsertionAction)) if trans.MarkedInsertionAction is not None: actions.add(tuple(trans.MarkedInsertionAction)) # Sort the compiled actions in decreasing order of # length, so that the longer sequence come before the # shorter ones. for action in sorted(actions, key=lambda x: (-len(x), x)): # We insert all sub-sequences of the action glyph sequence # into actionIndex. For example, if one action triggers on # glyph sequence [A, B, C, D, E] and another action triggers # on [C, D], we return result=[A, B, C, D, E] (as list of # encoded glyph IDs), and actionIndex={('A','B','C','D','E'): 0, # ('C','D'): 2}. if action in actionIndex: continue for start in range(0, len(action)): startIndex = (len(result) // 2) + start for limit in range(start, len(action)): glyphs = action[start : limit + 1] actionIndex.setdefault(glyphs, startIndex) for glyph in action: glyphID = font.getGlyphID(glyph) result += struct.pack(">H", glyphID) return result, actionIndex class FeatureParams(BaseTable): def compile(self, writer, font): assert ( featureParamTypes.get(writer["FeatureTag"]) == self.__class__ ), "Wrong FeatureParams type for feature '%s': %s" % ( writer["FeatureTag"], self.__class__.__name__, ) BaseTable.compile(self, writer, font) def toXML(self, xmlWriter, font, attrs=None, name=None): BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__) class FeatureParamsSize(FeatureParams): pass class FeatureParamsStylisticSet(FeatureParams): pass class FeatureParamsCharacterVariants(FeatureParams): pass class Coverage(FormatSwitchingBaseTable): # manual implementation to get rid of glyphID dependencies def populateDefaults(self, propagator=None): if not hasattr(self, "glyphs"): self.glyphs = [] def postRead(self, rawTable, font): if self.Format == 1: self.glyphs = rawTable["GlyphArray"] elif self.Format == 2: glyphs = self.glyphs = [] ranges = rawTable["RangeRecord"] # Some SIL fonts have coverage entries that don't have sorted # StartCoverageIndex. If it is so, fixup and warn. We undo # this when writing font out. sorted_ranges = sorted(ranges, key=lambda a: a.StartCoverageIndex) if ranges != sorted_ranges: log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.") ranges = sorted_ranges del sorted_ranges for r in ranges: start = r.Start end = r.End startID = font.getGlyphID(start) endID = font.getGlyphID(end) + 1 glyphs.extend(font.getGlyphNameMany(range(startID, endID))) else: self.glyphs = [] log.warning("Unknown Coverage format: %s", self.Format) del self.Format # Don't need this anymore def preWrite(self, font): glyphs = getattr(self, "glyphs", None) if glyphs is None: glyphs = self.glyphs = [] format = 1 rawTable = {"GlyphArray": glyphs} if glyphs: # find out whether Format 2 is more compact or not glyphIDs = font.getGlyphIDMany(glyphs) brokenOrder = sorted(glyphIDs) != glyphIDs last = glyphIDs[0] ranges = [[last]] for glyphID in glyphIDs[1:]: if glyphID != last + 1: ranges[-1].append(last) ranges.append([glyphID]) last = glyphID ranges[-1].append(last) if brokenOrder or len(ranges) * 3 < len(glyphs): # 3 words vs. 1 word # Format 2 is more compact index = 0 for i in range(len(ranges)): start, end = ranges[i] r = RangeRecord() r.StartID = start r.Start = font.getGlyphName(start) r.End = font.getGlyphName(end) r.StartCoverageIndex = index ranges[i] = r index = index + end - start + 1 if brokenOrder: log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.") ranges.sort(key=lambda a: a.StartID) for r in ranges: del r.StartID format = 2 rawTable = {"RangeRecord": ranges} # else: # fallthrough; Format 1 is more compact self.Format = format return rawTable def toXML2(self, xmlWriter, font): for glyphName in getattr(self, "glyphs", []): xmlWriter.simpletag("Glyph", value=glyphName) xmlWriter.newline() def fromXML(self, name, attrs, content, font): glyphs = getattr(self, "glyphs", None) if glyphs is None: glyphs = [] self.glyphs = glyphs glyphs.append(attrs["value"]) # The special 0xFFFFFFFF delta-set index is used to indicate that there # is no variation data in the ItemVariationStore for a given variable field NO_VARIATION_INDEX = 0xFFFFFFFF class DeltaSetIndexMap(getFormatSwitchingBaseTableClass("uint8")): def populateDefaults(self, propagator=None): if not hasattr(self, "mapping"): self.mapping = [] def postRead(self, rawTable, font): assert (rawTable["EntryFormat"] & 0xFFC0) == 0 self.mapping = rawTable["mapping"] @staticmethod def getEntryFormat(mapping): ored = 0 for idx in mapping: ored |= idx inner = ored & 0xFFFF innerBits = 0 while inner: innerBits += 1 inner >>= 1 innerBits = max(innerBits, 1) assert innerBits <= 16 ored = (ored >> (16 - innerBits)) | (ored & ((1 << innerBits) - 1)) if ored <= 0x000000FF: entrySize = 1 elif ored <= 0x0000FFFF: entrySize = 2 elif ored <= 0x00FFFFFF: entrySize = 3 else: entrySize = 4 return ((entrySize - 1) << 4) | (innerBits - 1) def preWrite(self, font): mapping = getattr(self, "mapping", None) if mapping is None: mapping = self.mapping = [] self.Format = 1 if len(mapping) > 0xFFFF else 0 rawTable = self.__dict__.copy() rawTable["MappingCount"] = len(mapping) rawTable["EntryFormat"] = self.getEntryFormat(mapping) return rawTable def toXML2(self, xmlWriter, font): # Make xml dump less verbose, by omitting no-op entries like: # <Map index="..." outer="65535" inner="65535"/> xmlWriter.comment("Omitted values default to 0xFFFF/0xFFFF (no variations)") xmlWriter.newline() for i, value in enumerate(getattr(self, "mapping", [])): attrs = [("index", i)] if value != NO_VARIATION_INDEX: attrs.extend( [ ("outer", value >> 16), ("inner", value & 0xFFFF), ] ) xmlWriter.simpletag("Map", attrs) xmlWriter.newline() def fromXML(self, name, attrs, content, font): mapping = getattr(self, "mapping", None) if mapping is None: self.mapping = mapping = [] index = safeEval(attrs["index"]) outer = safeEval(attrs.get("outer", "0xFFFF")) inner = safeEval(attrs.get("inner", "0xFFFF")) assert inner <= 0xFFFF mapping.insert(index, (outer << 16) | inner) class VarIdxMap(BaseTable): def populateDefaults(self, propagator=None): if not hasattr(self, "mapping"): self.mapping = {} def postRead(self, rawTable, font): assert (rawTable["EntryFormat"] & 0xFFC0) == 0 glyphOrder = font.getGlyphOrder() mapList = rawTable["mapping"] mapList.extend([mapList[-1]] * (len(glyphOrder) - len(mapList))) self.mapping = dict(zip(glyphOrder, mapList)) def preWrite(self, font): mapping = getattr(self, "mapping", None) if mapping is None: mapping = self.mapping = {} glyphOrder = font.getGlyphOrder() mapping = [mapping[g] for g in glyphOrder] while len(mapping) > 1 and mapping[-2] == mapping[-1]: del mapping[-1] rawTable = {"mapping": mapping} rawTable["MappingCount"] = len(mapping) rawTable["EntryFormat"] = DeltaSetIndexMap.getEntryFormat(mapping) return rawTable def toXML2(self, xmlWriter, font): for glyph, value in sorted(getattr(self, "mapping", {}).items()): attrs = ( ("glyph", glyph), ("outer", value >> 16), ("inner", value & 0xFFFF), ) xmlWriter.simpletag("Map", attrs) xmlWriter.newline() def fromXML(self, name, attrs, content, font): mapping = getattr(self, "mapping", None) if mapping is None: mapping = {} self.mapping = mapping try: glyph = attrs["glyph"] except: # https://github.com/fonttools/fonttools/commit/21cbab8ce9ded3356fef3745122da64dcaf314e9#commitcomment-27649836 glyph = font.getGlyphOrder()[attrs["index"]] outer = safeEval(attrs["outer"]) inner = safeEval(attrs["inner"]) assert inner <= 0xFFFF mapping[glyph] = (outer << 16) | inner class VarRegionList(BaseTable): def preWrite(self, font): # The OT spec says VarStore.VarRegionList.RegionAxisCount should always # be equal to the fvar.axisCount, and OTS < v8.0.0 enforces this rule # even when the VarRegionList is empty. We can't treat RegionAxisCount # like a normal propagated count (== len(Region[i].VarRegionAxis)), # otherwise it would default to 0 if VarRegionList is empty. # Thus, we force it to always be equal to fvar.axisCount. # https://github.com/khaledhosny/ots/pull/192 fvarTable = font.get("fvar") if fvarTable: self.RegionAxisCount = len(fvarTable.axes) return { **self.__dict__, "RegionAxisCount": CountReference(self.__dict__, "RegionAxisCount"), } class SingleSubst(FormatSwitchingBaseTable): def populateDefaults(self, propagator=None): if not hasattr(self, "mapping"): self.mapping = {} def postRead(self, rawTable, font): mapping = {} input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) if self.Format == 1: delta = rawTable["DeltaGlyphID"] inputGIDS = font.getGlyphIDMany(input) outGIDS = [(glyphID + delta) % 65536 for glyphID in inputGIDS] outNames = font.getGlyphNameMany(outGIDS) for inp, out in zip(input, outNames): mapping[inp] = out elif self.Format == 2: assert ( len(input) == rawTable["GlyphCount"] ), "invalid SingleSubstFormat2 table" subst = rawTable["Substitute"] for inp, sub in zip(input, subst): mapping[inp] = sub else: assert 0, "unknown format: %s" % self.Format self.mapping = mapping del self.Format # Don't need this anymore def preWrite(self, font): mapping = getattr(self, "mapping", None) if mapping is None: mapping = self.mapping = {} items = list(mapping.items()) getGlyphID = font.getGlyphID gidItems = [(getGlyphID(a), getGlyphID(b)) for a, b in items] sortableItems = sorted(zip(gidItems, items)) # figure out format format = 2 delta = None for inID, outID in gidItems: if delta is None: delta = (outID - inID) % 65536 if (inID + delta) % 65536 != outID: break else: if delta is None: # the mapping is empty, better use format 2 format = 2 else: format = 1 rawTable = {} self.Format = format cov = Coverage() input = [item[1][0] for item in sortableItems] subst = [item[1][1] for item in sortableItems] cov.glyphs = input rawTable["Coverage"] = cov if format == 1: assert delta is not None rawTable["DeltaGlyphID"] = delta else: rawTable["Substitute"] = subst return rawTable def toXML2(self, xmlWriter, font): items = sorted(self.mapping.items()) for inGlyph, outGlyph in items: xmlWriter.simpletag("Substitution", [("in", inGlyph), ("out", outGlyph)]) xmlWriter.newline() def fromXML(self, name, attrs, content, font): mapping = getattr(self, "mapping", None) if mapping is None: mapping = {} self.mapping = mapping mapping[attrs["in"]] = attrs["out"] class MultipleSubst(FormatSwitchingBaseTable): def populateDefaults(self, propagator=None): if not hasattr(self, "mapping"): self.mapping = {} def postRead(self, rawTable, font): mapping = {} if self.Format == 1: glyphs = _getGlyphsFromCoverageTable(rawTable["Coverage"]) subst = [s.Substitute for s in rawTable["Sequence"]] mapping = dict(zip(glyphs, subst)) else: assert 0, "unknown format: %s" % self.Format self.mapping = mapping del self.Format # Don't need this anymore def preWrite(self, font): mapping = getattr(self, "mapping", None) if mapping is None: mapping = self.mapping = {} cov = Coverage() cov.glyphs = sorted(list(mapping.keys()), key=font.getGlyphID) self.Format = 1 rawTable = { "Coverage": cov, "Sequence": [self.makeSequence_(mapping[glyph]) for glyph in cov.glyphs], } return rawTable def toXML2(self, xmlWriter, font): items = sorted(self.mapping.items()) for inGlyph, outGlyphs in items: out = ",".join(outGlyphs) xmlWriter.simpletag("Substitution", [("in", inGlyph), ("out", out)]) xmlWriter.newline() def fromXML(self, name, attrs, content, font): mapping = getattr(self, "mapping", None) if mapping is None: mapping = {} self.mapping = mapping # TTX v3.0 and earlier. if name == "Coverage": self.old_coverage_ = [] for element in content: if not isinstance(element, tuple): continue element_name, element_attrs, _ = element if element_name == "Glyph": self.old_coverage_.append(element_attrs["value"]) return if name == "Sequence": index = int(attrs.get("index", len(mapping))) glyph = self.old_coverage_[index] glyph_mapping = mapping[glyph] = [] for element in content: if not isinstance(element, tuple): continue element_name, element_attrs, _ = element if element_name == "Substitute": glyph_mapping.append(element_attrs["value"]) return # TTX v3.1 and later. outGlyphs = attrs["out"].split(",") if attrs["out"] else [] mapping[attrs["in"]] = [g.strip() for g in outGlyphs] @staticmethod def makeSequence_(g): seq = Sequence() seq.Substitute = g return seq class ClassDef(FormatSwitchingBaseTable): def populateDefaults(self, propagator=None): if not hasattr(self, "classDefs"): self.classDefs = {} def postRead(self, rawTable, font): classDefs = {} if self.Format == 1: start = rawTable["StartGlyph"] classList = rawTable["ClassValueArray"] startID = font.getGlyphID(start) endID = startID + len(classList) glyphNames = font.getGlyphNameMany(range(startID, endID)) for glyphName, cls in zip(glyphNames, classList): if cls: classDefs[glyphName] = cls elif self.Format == 2: records = rawTable["ClassRangeRecord"] for rec in records: cls = rec.Class if not cls: continue start = rec.Start end = rec.End startID = font.getGlyphID(start) endID = font.getGlyphID(end) + 1 glyphNames = font.getGlyphNameMany(range(startID, endID)) for glyphName in glyphNames: classDefs[glyphName] = cls else: log.warning("Unknown ClassDef format: %s", self.Format) self.classDefs = classDefs del self.Format # Don't need this anymore def _getClassRanges(self, font): classDefs = getattr(self, "classDefs", None) if classDefs is None: self.classDefs = {} return getGlyphID = font.getGlyphID items = [] for glyphName, cls in classDefs.items(): if not cls: continue items.append((getGlyphID(glyphName), glyphName, cls)) if items: items.sort() last, lastName, lastCls = items[0] ranges = [[lastCls, last, lastName]] for glyphID, glyphName, cls in items[1:]: if glyphID != last + 1 or cls != lastCls: ranges[-1].extend([last, lastName]) ranges.append([cls, glyphID, glyphName]) last = glyphID lastName = glyphName lastCls = cls ranges[-1].extend([last, lastName]) return ranges def preWrite(self, font): format = 2 rawTable = {"ClassRangeRecord": []} ranges = self._getClassRanges(font) if ranges: startGlyph = ranges[0][1] endGlyph = ranges[-1][3] glyphCount = endGlyph - startGlyph + 1 if len(ranges) * 3 < glyphCount + 1: # Format 2 is more compact for i in range(len(ranges)): cls, start, startName, end, endName = ranges[i] rec = ClassRangeRecord() rec.Start = startName rec.End = endName rec.Class = cls ranges[i] = rec format = 2 rawTable = {"ClassRangeRecord": ranges} else: # Format 1 is more compact startGlyphName = ranges[0][2] classes = [0] * glyphCount for cls, start, startName, end, endName in ranges: for g in range(start - startGlyph, end - startGlyph + 1): classes[g] = cls format = 1 rawTable = {"StartGlyph": startGlyphName, "ClassValueArray": classes} self.Format = format return rawTable def toXML2(self, xmlWriter, font): items = sorted(self.classDefs.items()) for glyphName, cls in items: xmlWriter.simpletag("ClassDef", [("glyph", glyphName), ("class", cls)]) xmlWriter.newline() def fromXML(self, name, attrs, content, font): classDefs = getattr(self, "classDefs", None) if classDefs is None: classDefs = {} self.classDefs = classDefs classDefs[attrs["glyph"]] = int(attrs["class"]) class AlternateSubst(FormatSwitchingBaseTable): def populateDefaults(self, propagator=None): if not hasattr(self, "alternates"): self.alternates = {} def postRead(self, rawTable, font): alternates = {} if self.Format == 1: input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) alts = rawTable["AlternateSet"] assert len(input) == len(alts) for inp, alt in zip(input, alts): alternates[inp] = alt.Alternate else: assert 0, "unknown format: %s" % self.Format self.alternates = alternates del self.Format # Don't need this anymore def preWrite(self, font): self.Format = 1 alternates = getattr(self, "alternates", None) if alternates is None: alternates = self.alternates = {} items = list(alternates.items()) for i in range(len(items)): glyphName, set = items[i] items[i] = font.getGlyphID(glyphName), glyphName, set items.sort() cov = Coverage() cov.glyphs = [item[1] for item in items] alternates = [] setList = [item[-1] for item in items] for set in setList: alts = AlternateSet() alts.Alternate = set alternates.append(alts) # a special case to deal with the fact that several hundred Adobe Japan1-5 # CJK fonts will overflow an offset if the coverage table isn't pushed to the end. # Also useful in that when splitting a sub-table because of an offset overflow # I don't need to calculate the change in the subtable offset due to the change in the coverage table size. # Allows packing more rules in subtable. self.sortCoverageLast = 1 return {"Coverage": cov, "AlternateSet": alternates} def toXML2(self, xmlWriter, font): items = sorted(self.alternates.items()) for glyphName, alternates in items: xmlWriter.begintag("AlternateSet", glyph=glyphName) xmlWriter.newline() for alt in alternates: xmlWriter.simpletag("Alternate", glyph=alt) xmlWriter.newline() xmlWriter.endtag("AlternateSet") xmlWriter.newline() def fromXML(self, name, attrs, content, font): alternates = getattr(self, "alternates", None) if alternates is None: alternates = {} self.alternates = alternates glyphName = attrs["glyph"] set = [] alternates[glyphName] = set for element in content: if not isinstance(element, tuple): continue name, attrs, content = element set.append(attrs["glyph"]) class LigatureSubst(FormatSwitchingBaseTable): def populateDefaults(self, propagator=None): if not hasattr(self, "ligatures"): self.ligatures = {} def postRead(self, rawTable, font): ligatures = {} if self.Format == 1: input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) ligSets = rawTable["LigatureSet"] assert len(input) == len(ligSets) for i in range(len(input)): ligatures[input[i]] = ligSets[i].Ligature else: assert 0, "unknown format: %s" % self.Format self.ligatures = ligatures del self.Format # Don't need this anymore @staticmethod def _getLigatureSortKey(components): # Computes a key for ordering ligatures in a GSUB Type-4 lookup. # When building the OpenType lookup, we need to make sure that # the longest sequence of components is listed first, so we # use the negative length as the key for sorting. # Note, we no longer need to worry about deterministic order because the # ligature mapping `dict` remembers the insertion order, and this in # turn depends on the order in which the ligatures are written in the FEA. # Since python sort algorithm is stable, the ligatures of equal length # will keep the relative order in which they appear in the feature file. # For example, given the following ligatures (all starting with 'f' and # thus belonging to the same LigatureSet): # # feature liga { # sub f i by f_i; # sub f f f by f_f_f; # sub f f by f_f; # sub f f i by f_f_i; # } liga; # # this should sort to: f_f_f, f_f_i, f_i, f_f # This is also what fea-rs does, see: # https://github.com/adobe-type-tools/afdko/issues/1727 # https://github.com/fonttools/fonttools/issues/3428 # https://github.com/googlefonts/fontc/pull/680 return -len(components) def preWrite(self, font): self.Format = 1 ligatures = getattr(self, "ligatures", None) if ligatures is None: ligatures = self.ligatures = {} if ligatures and isinstance(next(iter(ligatures)), tuple): # New high-level API in v3.1 and later. Note that we just support compiling this # for now. We don't load to this API, and don't do XML with it. # ligatures is map from components-sequence to lig-glyph newLigatures = dict() for comps in sorted(ligatures.keys(), key=self._getLigatureSortKey): ligature = Ligature() ligature.Component = comps[1:] ligature.CompCount = len(comps) ligature.LigGlyph = ligatures[comps] newLigatures.setdefault(comps[0], []).append(ligature) ligatures = newLigatures items = list(ligatures.items()) for i in range(len(items)): glyphName, set = items[i] items[i] = font.getGlyphID(glyphName), glyphName, set items.sort() cov = Coverage() cov.glyphs = [item[1] for item in items] ligSets = [] setList = [item[-1] for item in items] for set in setList: ligSet = LigatureSet() ligs = ligSet.Ligature = [] for lig in set: ligs.append(lig) ligSets.append(ligSet) # Useful in that when splitting a sub-table because of an offset overflow # I don't need to calculate the change in subtabl offset due to the coverage table size. # Allows packing more rules in subtable. self.sortCoverageLast = 1 return {"Coverage": cov, "LigatureSet": ligSets} def toXML2(self, xmlWriter, font): items = sorted(self.ligatures.items()) for glyphName, ligSets in items: xmlWriter.begintag("LigatureSet", glyph=glyphName) xmlWriter.newline() for lig in ligSets: xmlWriter.simpletag( "Ligature", glyph=lig.LigGlyph, components=",".join(lig.Component) ) xmlWriter.newline() xmlWriter.endtag("LigatureSet") xmlWriter.newline() def fromXML(self, name, attrs, content, font): ligatures = getattr(self, "ligatures", None) if ligatures is None: ligatures = {} self.ligatures = ligatures glyphName = attrs["glyph"] ligs = [] ligatures[glyphName] = ligs for element in content: if not isinstance(element, tuple): continue name, attrs, content = element lig = Ligature() lig.LigGlyph = attrs["glyph"] components = attrs["components"] lig.Component = components.split(",") if components else [] lig.CompCount = len(lig.Component) ligs.append(lig) class COLR(BaseTable): def decompile(self, reader, font): # COLRv0 is exceptional in that LayerRecordCount appears *after* the # LayerRecordArray it counts, but the parser logic expects Count fields # to always precede the arrays. Here we work around this by parsing the # LayerRecordCount before the rest of the table, and storing it in # the reader's local state. subReader = reader.getSubReader(offset=0) for conv in self.getConverters(): if conv.name != "LayerRecordCount": subReader.advance(conv.staticSize) continue reader[conv.name] = conv.read(subReader, font, tableDict={}) break else: raise AssertionError("LayerRecordCount converter not found") return BaseTable.decompile(self, reader, font) def preWrite(self, font): # The writer similarly assumes Count values precede the things counted, # thus here we pre-initialize a CountReference; the actual count value # will be set to the lenght of the array by the time this is assembled. self.LayerRecordCount = None return { **self.__dict__, "LayerRecordCount": CountReference(self.__dict__, "LayerRecordCount"), } def computeClipBoxes(self, glyphSet: "_TTGlyphSet", quantization: int = 1): if self.Version == 0: return clips = {} for rec in self.BaseGlyphList.BaseGlyphPaintRecord: try: clipBox = rec.Paint.computeClipBox(self, glyphSet, quantization) except Exception as e: from fontTools.ttLib import TTLibError raise TTLibError( f"Failed to compute COLR ClipBox for {rec.BaseGlyph!r}" ) from e if clipBox is not None: clips[rec.BaseGlyph] = clipBox hasClipList = hasattr(self, "ClipList") and self.ClipList is not None if not clips: if hasClipList: self.ClipList = None else: if not hasClipList: self.ClipList = ClipList() self.ClipList.Format = 1 self.ClipList.clips = clips class LookupList(BaseTable): @property def table(self): for l in self.Lookup: for st in l.SubTable: if type(st).__name__.endswith("Subst"): return "GSUB" if type(st).__name__.endswith("Pos"): return "GPOS" raise ValueError def toXML2(self, xmlWriter, font): if ( not font or "Debg" not in font or LOOKUP_DEBUG_INFO_KEY not in font["Debg"].data ): return super().toXML2(xmlWriter, font) debugData = font["Debg"].data[LOOKUP_DEBUG_INFO_KEY][self.table] for conv in self.getConverters(): if conv.repeat: value = getattr(self, conv.name, []) for lookupIndex, item in enumerate(value): if str(lookupIndex) in debugData: info = LookupDebugInfo(*debugData[str(lookupIndex)]) tag = info.location if info.name: tag = f"{info.name}: {tag}" if info.feature: script, language, feature = info.feature tag = f"{tag} in {feature} ({script}/{language})" xmlWriter.comment(tag) xmlWriter.newline() conv.xmlWrite( xmlWriter, font, item, conv.name, [("index", lookupIndex)] ) else: if conv.aux and not eval(conv.aux, None, vars(self)): continue value = getattr( self, conv.name, None ) # TODO Handle defaults instead of defaulting to None! conv.xmlWrite(xmlWriter, font, value, conv.name, []) class BaseGlyphRecordArray(BaseTable): def preWrite(self, font): self.BaseGlyphRecord = sorted( self.BaseGlyphRecord, key=lambda rec: font.getGlyphID(rec.BaseGlyph) ) return self.__dict__.copy() class BaseGlyphList(BaseTable): def preWrite(self, font): self.BaseGlyphPaintRecord = sorted( self.BaseGlyphPaintRecord, key=lambda rec: font.getGlyphID(rec.BaseGlyph) ) return self.__dict__.copy() class ClipBoxFormat(IntEnum): Static = 1 Variable = 2 def is_variable(self): return self is self.Variable def as_variable(self): return self.Variable class ClipBox(getFormatSwitchingBaseTableClass("uint8")): formatEnum = ClipBoxFormat def as_tuple(self): return tuple(getattr(self, conv.name) for conv in self.getConverters()) def __repr__(self): return f"{self.__class__.__name__}{self.as_tuple()}" class ClipList(getFormatSwitchingBaseTableClass("uint8")): def populateDefaults(self, propagator=None): if not hasattr(self, "clips"): self.clips = {} def postRead(self, rawTable, font): clips = {} glyphOrder = font.getGlyphOrder() for i, rec in enumerate(rawTable["ClipRecord"]): if rec.StartGlyphID > rec.EndGlyphID: log.warning( "invalid ClipRecord[%i].StartGlyphID (%i) > " "EndGlyphID (%i); skipped", i, rec.StartGlyphID, rec.EndGlyphID, ) continue redefinedGlyphs = [] missingGlyphs = [] for glyphID in range(rec.StartGlyphID, rec.EndGlyphID + 1): try: glyph = glyphOrder[glyphID] except IndexError: missingGlyphs.append(glyphID) continue if glyph not in clips: clips[glyph] = copy.copy(rec.ClipBox) else: redefinedGlyphs.append(glyphID) if redefinedGlyphs: log.warning( "ClipRecord[%i] overlaps previous records; " "ignoring redefined clip boxes for the " "following glyph ID range: [%i-%i]", i, min(redefinedGlyphs), max(redefinedGlyphs), ) if missingGlyphs: log.warning( "ClipRecord[%i] range references missing " "glyph IDs: [%i-%i]", i, min(missingGlyphs), max(missingGlyphs), ) self.clips = clips def groups(self): glyphsByClip = defaultdict(list) uniqueClips = {} for glyphName, clipBox in self.clips.items(): key = clipBox.as_tuple() glyphsByClip[key].append(glyphName) if key not in uniqueClips: uniqueClips[key] = clipBox return { frozenset(glyphs): uniqueClips[key] for key, glyphs in glyphsByClip.items() } def preWrite(self, font): if not hasattr(self, "clips"): self.clips = {} clipBoxRanges = {} glyphMap = font.getReverseGlyphMap() for glyphs, clipBox in self.groups().items(): glyphIDs = sorted( glyphMap[glyphName] for glyphName in glyphs if glyphName in glyphMap ) if not glyphIDs: continue last = glyphIDs[0] ranges = [[last]] for glyphID in glyphIDs[1:]: if glyphID != last + 1: ranges[-1].append(last) ranges.append([glyphID]) last = glyphID ranges[-1].append(last) for start, end in ranges: assert (start, end) not in clipBoxRanges clipBoxRanges[(start, end)] = clipBox clipRecords = [] for (start, end), clipBox in sorted(clipBoxRanges.items()): record = ClipRecord() record.StartGlyphID = start record.EndGlyphID = end record.ClipBox = clipBox clipRecords.append(record) rawTable = { "ClipCount": len(clipRecords), "ClipRecord": clipRecords, } return rawTable def toXML(self, xmlWriter, font, attrs=None, name=None): tableName = name if name else self.__class__.__name__ if attrs is None: attrs = [] if hasattr(self, "Format"): attrs.append(("Format", self.Format)) xmlWriter.begintag(tableName, attrs) xmlWriter.newline() # sort clips alphabetically to ensure deterministic XML dump for glyphs, clipBox in sorted( self.groups().items(), key=lambda item: min(item[0]) ): xmlWriter.begintag("Clip") xmlWriter.newline() for glyphName in sorted(glyphs): xmlWriter.simpletag("Glyph", value=glyphName) xmlWriter.newline() xmlWriter.begintag("ClipBox", [("Format", clipBox.Format)]) xmlWriter.newline() clipBox.toXML2(xmlWriter, font) xmlWriter.endtag("ClipBox") xmlWriter.newline() xmlWriter.endtag("Clip") xmlWriter.newline() xmlWriter.endtag(tableName) xmlWriter.newline() def fromXML(self, name, attrs, content, font): clips = getattr(self, "clips", None) if clips is None: self.clips = clips = {} assert name == "Clip" glyphs = [] clipBox = None for elem in content: if not isinstance(elem, tuple): continue name, attrs, content = elem if name == "Glyph": glyphs.append(attrs["value"]) elif name == "ClipBox": clipBox = ClipBox() clipBox.Format = safeEval(attrs["Format"]) for elem in content: if not isinstance(elem, tuple): continue name, attrs, content = elem clipBox.fromXML(name, attrs, content, font) if clipBox: for glyphName in glyphs: clips[glyphName] = clipBox class ExtendMode(IntEnum): PAD = 0 REPEAT = 1 REFLECT = 2 # Porter-Duff modes for COLRv1 PaintComposite: # https://github.com/googlefonts/colr-gradients-spec/tree/off_sub_1#compositemode-enumeration class CompositeMode(IntEnum): CLEAR = 0 SRC = 1 DEST = 2 SRC_OVER = 3 DEST_OVER = 4 SRC_IN = 5 DEST_IN = 6 SRC_OUT = 7 DEST_OUT = 8 SRC_ATOP = 9 DEST_ATOP = 10 XOR = 11 PLUS = 12 SCREEN = 13 OVERLAY = 14 DARKEN = 15 LIGHTEN = 16 COLOR_DODGE = 17 COLOR_BURN = 18 HARD_LIGHT = 19 SOFT_LIGHT = 20 DIFFERENCE = 21 EXCLUSION = 22 MULTIPLY = 23 HSL_HUE = 24 HSL_SATURATION = 25 HSL_COLOR = 26 HSL_LUMINOSITY = 27 class PaintFormat(IntEnum): PaintColrLayers = 1 PaintSolid = 2 PaintVarSolid = 3 PaintLinearGradient = 4 PaintVarLinearGradient = 5 PaintRadialGradient = 6 PaintVarRadialGradient = 7 PaintSweepGradient = 8 PaintVarSweepGradient = 9 PaintGlyph = 10 PaintColrGlyph = 11 PaintTransform = 12 PaintVarTransform = 13 PaintTranslate = 14 PaintVarTranslate = 15 PaintScale = 16 PaintVarScale = 17 PaintScaleAroundCenter = 18 PaintVarScaleAroundCenter = 19 PaintScaleUniform = 20 PaintVarScaleUniform = 21 PaintScaleUniformAroundCenter = 22 PaintVarScaleUniformAroundCenter = 23 PaintRotate = 24 PaintVarRotate = 25 PaintRotateAroundCenter = 26 PaintVarRotateAroundCenter = 27 PaintSkew = 28 PaintVarSkew = 29 PaintSkewAroundCenter = 30 PaintVarSkewAroundCenter = 31 PaintComposite = 32 def is_variable(self): return self.name.startswith("PaintVar") def as_variable(self): if self.is_variable(): return self try: return PaintFormat.__members__[f"PaintVar{self.name[5:]}"] except KeyError: return None class Paint(getFormatSwitchingBaseTableClass("uint8")): formatEnum = PaintFormat def getFormatName(self): try: return self.formatEnum(self.Format).name except ValueError: raise NotImplementedError(f"Unknown Paint format: {self.Format}") def toXML(self, xmlWriter, font, attrs=None, name=None): tableName = name if name else self.__class__.__name__ if attrs is None: attrs = [] attrs.append(("Format", self.Format)) xmlWriter.begintag(tableName, attrs) xmlWriter.comment(self.getFormatName()) xmlWriter.newline() self.toXML2(xmlWriter, font) xmlWriter.endtag(tableName) xmlWriter.newline() def iterPaintSubTables(self, colr: COLR) -> Iterator[BaseTable.SubTableEntry]: if self.Format == PaintFormat.PaintColrLayers: # https://github.com/fonttools/fonttools/issues/2438: don't die when no LayerList exists layers = [] if colr.LayerList is not None: layers = colr.LayerList.Paint yield from ( BaseTable.SubTableEntry(name="Layers", value=v, index=i) for i, v in enumerate( layers[self.FirstLayerIndex : self.FirstLayerIndex + self.NumLayers] ) ) return if self.Format == PaintFormat.PaintColrGlyph: for record in colr.BaseGlyphList.BaseGlyphPaintRecord: if record.BaseGlyph == self.Glyph: yield BaseTable.SubTableEntry(name="BaseGlyph", value=record.Paint) return else: raise KeyError(f"{self.Glyph!r} not in colr.BaseGlyphList") for conv in self.getConverters(): if conv.tableClass is not None and issubclass(conv.tableClass, type(self)): value = getattr(self, conv.name) yield BaseTable.SubTableEntry(name=conv.name, value=value) def getChildren(self, colr) -> List["Paint"]: # this is kept for backward compatibility (e.g. it's used by the subsetter) return [p.value for p in self.iterPaintSubTables(colr)] def traverse(self, colr: COLR, callback): """Depth-first traversal of graph rooted at self, callback on each node.""" if not callable(callback): raise TypeError("callback must be callable") for path in dfs_base_table( self, iter_subtables_fn=lambda paint: paint.iterPaintSubTables(colr) ): paint = path[-1].value callback(paint) def getTransform(self) -> Transform: if self.Format == PaintFormat.PaintTransform: t = self.Transform return Transform(t.xx, t.yx, t.xy, t.yy, t.dx, t.dy) elif self.Format == PaintFormat.PaintTranslate: return Identity.translate(self.dx, self.dy) elif self.Format == PaintFormat.PaintScale: return Identity.scale(self.scaleX, self.scaleY) elif self.Format == PaintFormat.PaintScaleAroundCenter: return ( Identity.translate(self.centerX, self.centerY) .scale(self.scaleX, self.scaleY) .translate(-self.centerX, -self.centerY) ) elif self.Format == PaintFormat.PaintScaleUniform: return Identity.scale(self.scale) elif self.Format == PaintFormat.PaintScaleUniformAroundCenter: return ( Identity.translate(self.centerX, self.centerY) .scale(self.scale) .translate(-self.centerX, -self.centerY) ) elif self.Format == PaintFormat.PaintRotate: return Identity.rotate(radians(self.angle)) elif self.Format == PaintFormat.PaintRotateAroundCenter: return ( Identity.translate(self.centerX, self.centerY) .rotate(radians(self.angle)) .translate(-self.centerX, -self.centerY) ) elif self.Format == PaintFormat.PaintSkew: return Identity.skew(radians(-self.xSkewAngle), radians(self.ySkewAngle)) elif self.Format == PaintFormat.PaintSkewAroundCenter: return ( Identity.translate(self.centerX, self.centerY) .skew(radians(-self.xSkewAngle), radians(self.ySkewAngle)) .translate(-self.centerX, -self.centerY) ) if PaintFormat(self.Format).is_variable(): raise NotImplementedError(f"Variable Paints not supported: {self.Format}") return Identity def computeClipBox( self, colr: COLR, glyphSet: "_TTGlyphSet", quantization: int = 1 ) -> Optional[ClipBox]: pen = ControlBoundsPen(glyphSet) for path in dfs_base_table( self, iter_subtables_fn=lambda paint: paint.iterPaintSubTables(colr) ): paint = path[-1].value if paint.Format == PaintFormat.PaintGlyph: transformation = reduce( Transform.transform, (st.value.getTransform() for st in path), Identity, ) glyphSet[paint.Glyph].draw(TransformPen(pen, transformation)) if pen.bounds is None: return None cb = ClipBox() cb.Format = int(ClipBoxFormat.Static) cb.xMin, cb.yMin, cb.xMax, cb.yMax = quantizeRect(pen.bounds, quantization) return cb # For each subtable format there is a class. However, we don't really distinguish # between "field name" and "format name": often these are the same. Yet there's # a whole bunch of fields with different names. The following dict is a mapping # from "format name" to "field name". _buildClasses() uses this to create a # subclass for each alternate field name. # _equivalents = { "MarkArray": ("Mark1Array",), "LangSys": ("DefaultLangSys",), "Coverage": ( "MarkCoverage", "BaseCoverage", "LigatureCoverage", "Mark1Coverage", "Mark2Coverage", "BacktrackCoverage", "InputCoverage", "LookAheadCoverage", "VertGlyphCoverage", "HorizGlyphCoverage", "TopAccentCoverage", "ExtendedShapeCoverage", "MathKernCoverage", ), "ClassDef": ( "ClassDef1", "ClassDef2", "BacktrackClassDef", "InputClassDef", "LookAheadClassDef", "GlyphClassDef", "MarkAttachClassDef", ), "Anchor": ( "EntryAnchor", "ExitAnchor", "BaseAnchor", "LigatureAnchor", "Mark2Anchor", "MarkAnchor", ), "Device": ( "XPlaDevice", "YPlaDevice", "XAdvDevice", "YAdvDevice", "XDeviceTable", "YDeviceTable", "DeviceTable", ), "Axis": ( "HorizAxis", "VertAxis", ), "MinMax": ("DefaultMinMax",), "BaseCoord": ( "MinCoord", "MaxCoord", ), "JstfLangSys": ("DefJstfLangSys",), "JstfGSUBModList": ( "ShrinkageEnableGSUB", "ShrinkageDisableGSUB", "ExtensionEnableGSUB", "ExtensionDisableGSUB", ), "JstfGPOSModList": ( "ShrinkageEnableGPOS", "ShrinkageDisableGPOS", "ExtensionEnableGPOS", "ExtensionDisableGPOS", ), "JstfMax": ( "ShrinkageJstfMax", "ExtensionJstfMax", ), "MathKern": ( "TopRightMathKern", "TopLeftMathKern", "BottomRightMathKern", "BottomLeftMathKern", ), "MathGlyphConstruction": ("VertGlyphConstruction", "HorizGlyphConstruction"), } # # OverFlow logic, to automatically create ExtensionLookups # XXX This should probably move to otBase.py # def fixLookupOverFlows(ttf, overflowRecord): """Either the offset from the LookupList to a lookup overflowed, or an offset from a lookup to a subtable overflowed. The table layout is: GPSO/GUSB Script List Feature List LookUpList Lookup[0] and contents SubTable offset list SubTable[0] and contents ... SubTable[n] and contents ... Lookup[n] and contents SubTable offset list SubTable[0] and contents ... SubTable[n] and contents If the offset to a lookup overflowed (SubTableIndex is None) we must promote the *previous* lookup to an Extension type. If the offset from a lookup to subtable overflowed, then we must promote it to an Extension Lookup type. """ ok = 0 lookupIndex = overflowRecord.LookupListIndex if overflowRecord.SubTableIndex is None: lookupIndex = lookupIndex - 1 if lookupIndex < 0: return ok if overflowRecord.tableType == "GSUB": extType = 7 elif overflowRecord.tableType == "GPOS": extType = 9 lookups = ttf[overflowRecord.tableType].table.LookupList.Lookup lookup = lookups[lookupIndex] # If the previous lookup is an extType, look further back. Very unlikely, but possible. while lookup.SubTable[0].__class__.LookupType == extType: lookupIndex = lookupIndex - 1 if lookupIndex < 0: return ok lookup = lookups[lookupIndex] for lookupIndex in range(lookupIndex, len(lookups)): lookup = lookups[lookupIndex] if lookup.LookupType != extType: lookup.LookupType = extType for si in range(len(lookup.SubTable)): subTable = lookup.SubTable[si] extSubTableClass = lookupTypes[overflowRecord.tableType][extType] extSubTable = extSubTableClass() extSubTable.Format = 1 extSubTable.ExtSubTable = subTable lookup.SubTable[si] = extSubTable ok = 1 return ok def splitMultipleSubst(oldSubTable, newSubTable, overflowRecord): ok = 1 oldMapping = sorted(oldSubTable.mapping.items()) oldLen = len(oldMapping) if overflowRecord.itemName in ["Coverage", "RangeRecord"]: # Coverage table is written last. Overflow is to or within the # the coverage table. We will just cut the subtable in half. newLen = oldLen // 2 elif overflowRecord.itemName == "Sequence": # We just need to back up by two items from the overflowed # Sequence index to make sure the offset to the Coverage table # doesn't overflow. newLen = overflowRecord.itemIndex - 1 newSubTable.mapping = {} for i in range(newLen, oldLen): item = oldMapping[i] key = item[0] newSubTable.mapping[key] = item[1] del oldSubTable.mapping[key] return ok def splitAlternateSubst(oldSubTable, newSubTable, overflowRecord): ok = 1 if hasattr(oldSubTable, "sortCoverageLast"): newSubTable.sortCoverageLast = oldSubTable.sortCoverageLast oldAlts = sorted(oldSubTable.alternates.items()) oldLen = len(oldAlts) if overflowRecord.itemName in ["Coverage", "RangeRecord"]: # Coverage table is written last. overflow is to or within the # the coverage table. We will just cut the subtable in half. newLen = oldLen // 2 elif overflowRecord.itemName == "AlternateSet": # We just need to back up by two items # from the overflowed AlternateSet index to make sure the offset # to the Coverage table doesn't overflow. newLen = overflowRecord.itemIndex - 1 newSubTable.alternates = {} for i in range(newLen, oldLen): item = oldAlts[i] key = item[0] newSubTable.alternates[key] = item[1] del oldSubTable.alternates[key] return ok def splitLigatureSubst(oldSubTable, newSubTable, overflowRecord): ok = 1 oldLigs = sorted(oldSubTable.ligatures.items()) oldLen = len(oldLigs) if overflowRecord.itemName in ["Coverage", "RangeRecord"]: # Coverage table is written last. overflow is to or within the # the coverage table. We will just cut the subtable in half. newLen = oldLen // 2 elif overflowRecord.itemName == "LigatureSet": # We just need to back up by two items # from the overflowed AlternateSet index to make sure the offset # to the Coverage table doesn't overflow. newLen = overflowRecord.itemIndex - 1 newSubTable.ligatures = {} for i in range(newLen, oldLen): item = oldLigs[i] key = item[0] newSubTable.ligatures[key] = item[1] del oldSubTable.ligatures[key] return ok def splitPairPos(oldSubTable, newSubTable, overflowRecord): st = oldSubTable ok = False newSubTable.Format = oldSubTable.Format if oldSubTable.Format == 1 and len(oldSubTable.PairSet) > 1: for name in "ValueFormat1", "ValueFormat2": setattr(newSubTable, name, getattr(oldSubTable, name)) # Move top half of coverage to new subtable newSubTable.Coverage = oldSubTable.Coverage.__class__() coverage = oldSubTable.Coverage.glyphs records = oldSubTable.PairSet oldCount = len(oldSubTable.PairSet) // 2 oldSubTable.Coverage.glyphs = coverage[:oldCount] oldSubTable.PairSet = records[:oldCount] newSubTable.Coverage.glyphs = coverage[oldCount:] newSubTable.PairSet = records[oldCount:] oldSubTable.PairSetCount = len(oldSubTable.PairSet) newSubTable.PairSetCount = len(newSubTable.PairSet) ok = True elif oldSubTable.Format == 2 and len(oldSubTable.Class1Record) > 1: if not hasattr(oldSubTable, "Class2Count"): oldSubTable.Class2Count = len(oldSubTable.Class1Record[0].Class2Record) for name in "Class2Count", "ClassDef2", "ValueFormat1", "ValueFormat2": setattr(newSubTable, name, getattr(oldSubTable, name)) # The two subtables will still have the same ClassDef2 and the table # sharing will still cause the sharing to overflow. As such, disable # sharing on the one that is serialized second (that's oldSubTable). oldSubTable.DontShare = True # Move top half of class numbers to new subtable newSubTable.Coverage = oldSubTable.Coverage.__class__() newSubTable.ClassDef1 = oldSubTable.ClassDef1.__class__() coverage = oldSubTable.Coverage.glyphs classDefs = oldSubTable.ClassDef1.classDefs records = oldSubTable.Class1Record oldCount = len(oldSubTable.Class1Record) // 2 newGlyphs = set(k for k, v in classDefs.items() if v >= oldCount) oldSubTable.Coverage.glyphs = [g for g in coverage if g not in newGlyphs] oldSubTable.ClassDef1.classDefs = { k: v for k, v in classDefs.items() if v < oldCount } oldSubTable.Class1Record = records[:oldCount] newSubTable.Coverage.glyphs = [g for g in coverage if g in newGlyphs] newSubTable.ClassDef1.classDefs = { k: (v - oldCount) for k, v in classDefs.items() if v > oldCount } newSubTable.Class1Record = records[oldCount:] oldSubTable.Class1Count = len(oldSubTable.Class1Record) newSubTable.Class1Count = len(newSubTable.Class1Record) ok = True return ok def splitMarkBasePos(oldSubTable, newSubTable, overflowRecord): # split half of the mark classes to the new subtable classCount = oldSubTable.ClassCount if classCount < 2: # oh well, not much left to split... return False oldClassCount = classCount // 2 newClassCount = classCount - oldClassCount oldMarkCoverage, oldMarkRecords = [], [] newMarkCoverage, newMarkRecords = [], [] for glyphName, markRecord in zip( oldSubTable.MarkCoverage.glyphs, oldSubTable.MarkArray.MarkRecord ): if markRecord.Class < oldClassCount: oldMarkCoverage.append(glyphName) oldMarkRecords.append(markRecord) else: markRecord.Class -= oldClassCount newMarkCoverage.append(glyphName) newMarkRecords.append(markRecord) oldBaseRecords, newBaseRecords = [], [] for rec in oldSubTable.BaseArray.BaseRecord: oldBaseRecord, newBaseRecord = rec.__class__(), rec.__class__() oldBaseRecord.BaseAnchor = rec.BaseAnchor[:oldClassCount] newBaseRecord.BaseAnchor = rec.BaseAnchor[oldClassCount:] oldBaseRecords.append(oldBaseRecord) newBaseRecords.append(newBaseRecord) newSubTable.Format = oldSubTable.Format oldSubTable.MarkCoverage.glyphs = oldMarkCoverage newSubTable.MarkCoverage = oldSubTable.MarkCoverage.__class__() newSubTable.MarkCoverage.glyphs = newMarkCoverage # share the same BaseCoverage in both halves newSubTable.BaseCoverage = oldSubTable.BaseCoverage oldSubTable.ClassCount = oldClassCount newSubTable.ClassCount = newClassCount oldSubTable.MarkArray.MarkRecord = oldMarkRecords newSubTable.MarkArray = oldSubTable.MarkArray.__class__() newSubTable.MarkArray.MarkRecord = newMarkRecords oldSubTable.MarkArray.MarkCount = len(oldMarkRecords) newSubTable.MarkArray.MarkCount = len(newMarkRecords) oldSubTable.BaseArray.BaseRecord = oldBaseRecords newSubTable.BaseArray = oldSubTable.BaseArray.__class__() newSubTable.BaseArray.BaseRecord = newBaseRecords oldSubTable.BaseArray.BaseCount = len(oldBaseRecords) newSubTable.BaseArray.BaseCount = len(newBaseRecords) return True splitTable = { "GSUB": { # 1: splitSingleSubst, 2: splitMultipleSubst, 3: splitAlternateSubst, 4: splitLigatureSubst, # 5: splitContextSubst, # 6: splitChainContextSubst, # 7: splitExtensionSubst, # 8: splitReverseChainSingleSubst, }, "GPOS": { # 1: splitSinglePos, 2: splitPairPos, # 3: splitCursivePos, 4: splitMarkBasePos, # 5: splitMarkLigPos, # 6: splitMarkMarkPos, # 7: splitContextPos, # 8: splitChainContextPos, # 9: splitExtensionPos, }, } def fixSubTableOverFlows(ttf, overflowRecord): """ An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts. """ table = ttf[overflowRecord.tableType].table lookup = table.LookupList.Lookup[overflowRecord.LookupListIndex] subIndex = overflowRecord.SubTableIndex subtable = lookup.SubTable[subIndex] # First, try not sharing anything for this subtable... if not hasattr(subtable, "DontShare"): subtable.DontShare = True return True if hasattr(subtable, "ExtSubTable"): # We split the subtable of the Extension table, and add a new Extension table # to contain the new subtable. subTableType = subtable.ExtSubTable.__class__.LookupType extSubTable = subtable subtable = extSubTable.ExtSubTable newExtSubTableClass = lookupTypes[overflowRecord.tableType][ extSubTable.__class__.LookupType ] newExtSubTable = newExtSubTableClass() newExtSubTable.Format = extSubTable.Format toInsert = newExtSubTable newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType] newSubTable = newSubTableClass() newExtSubTable.ExtSubTable = newSubTable else: subTableType = subtable.__class__.LookupType newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType] newSubTable = newSubTableClass() toInsert = newSubTable if hasattr(lookup, "SubTableCount"): # may not be defined yet. lookup.SubTableCount = lookup.SubTableCount + 1 try: splitFunc = splitTable[overflowRecord.tableType][subTableType] except KeyError: log.error( "Don't know how to split %s lookup type %s", overflowRecord.tableType, subTableType, ) return False ok = splitFunc(subtable, newSubTable, overflowRecord) if ok: lookup.SubTable.insert(subIndex + 1, toInsert) return ok # End of OverFlow logic def _buildClasses(): import re from .otData import otData formatPat = re.compile(r"([A-Za-z0-9]+)Format(\d+)$") namespace = globals() # populate module with classes for name, table in otData: baseClass = BaseTable m = formatPat.match(name) if m: # XxxFormatN subtable, we only add the "base" table name = m.group(1) # the first row of a format-switching otData table describes the Format; # the first column defines the type of the Format field. # Currently this can be either 'uint16' or 'uint8'. formatType = table[0][0] baseClass = getFormatSwitchingBaseTableClass(formatType) if name not in namespace: # the class doesn't exist yet, so the base implementation is used. cls = type(name, (baseClass,), {}) if name in ("GSUB", "GPOS"): cls.DontShare = True namespace[name] = cls # link Var{Table} <-> {Table} (e.g. ColorStop <-> VarColorStop, etc.) for name, _ in otData: if name.startswith("Var") and len(name) > 3 and name[3:] in namespace: varType = namespace[name] noVarType = namespace[name[3:]] varType.NoVarType = noVarType noVarType.VarType = varType for base, alts in _equivalents.items(): base = namespace[base] for alt in alts: namespace[alt] = base global lookupTypes lookupTypes = { "GSUB": { 1: SingleSubst, 2: MultipleSubst, 3: AlternateSubst, 4: LigatureSubst, 5: ContextSubst, 6: ChainContextSubst, 7: ExtensionSubst, 8: ReverseChainSingleSubst, }, "GPOS": { 1: SinglePos, 2: PairPos, 3: CursivePos, 4: MarkBasePos, 5: MarkLigPos, 6: MarkMarkPos, 7: ContextPos, 8: ChainContextPos, 9: ExtensionPos, }, "mort": { 4: NoncontextualMorph, }, "morx": { 0: RearrangementMorph, 1: ContextualMorph, 2: LigatureMorph, # 3: Reserved, 4: NoncontextualMorph, 5: InsertionMorph, }, } lookupTypes["JSTF"] = lookupTypes["GPOS"] # JSTF contains GPOS for lookupEnum in lookupTypes.values(): for enum, cls in lookupEnum.items(): cls.LookupType = enum global featureParamTypes featureParamTypes = { "size": FeatureParamsSize, } for i in range(1, 20 + 1): featureParamTypes["ss%02d" % i] = FeatureParamsStylisticSet for i in range(1, 99 + 1): featureParamTypes["cv%02d" % i] = FeatureParamsCharacterVariants # add converters to classes from .otConverters import buildConverters for name, table in otData: m = formatPat.match(name) if m: # XxxFormatN subtable, add converter to "base" table name, format = m.groups() format = int(format) cls = namespace[name] if not hasattr(cls, "converters"): cls.converters = {} cls.convertersByName = {} converters, convertersByName = buildConverters(table[1:], namespace) cls.converters[format] = converters cls.convertersByName[format] = convertersByName # XXX Add staticSize? else: cls = namespace[name] cls.converters, cls.convertersByName = buildConverters(table, namespace) # XXX Add staticSize? _buildClasses() def _getGlyphsFromCoverageTable(coverage): if coverage is None: # empty coverage table return [] else: return coverage.glyphs PKaZZZ�9��yy$fontTools/ttLib/tables/otTraverse.py"""Methods for traversing trees of otData-driven OpenType tables.""" from collections import deque from typing import Callable, Deque, Iterable, List, Optional, Tuple from .otBase import BaseTable __all__ = [ "bfs_base_table", "dfs_base_table", "SubTablePath", ] class SubTablePath(Tuple[BaseTable.SubTableEntry, ...]): def __str__(self) -> str: path_parts = [] for entry in self: path_part = entry.name if entry.index is not None: path_part += f"[{entry.index}]" path_parts.append(path_part) return ".".join(path_parts) # Given f(current frontier, new entries) add new entries to frontier AddToFrontierFn = Callable[[Deque[SubTablePath], List[SubTablePath]], None] def dfs_base_table( root: BaseTable, root_accessor: Optional[str] = None, skip_root: bool = False, predicate: Optional[Callable[[SubTablePath], bool]] = None, iter_subtables_fn: Optional[ Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]] ] = None, ) -> Iterable[SubTablePath]: """Depth-first search tree of BaseTables. Args: root (BaseTable): the root of the tree. root_accessor (Optional[str]): attribute name for the root table, if any (mostly useful for debugging). skip_root (Optional[bool]): if True, the root itself is not visited, only its children. predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out paths. If True, the path is yielded and its subtables are added to the queue. If False, the path is skipped and its subtables are not traversed. iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]): function to iterate over subtables of a table. If None, the default BaseTable.iterSubTables() is used. Yields: SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples for each of the nodes in the tree. The last entry in a path is the current subtable, whereas preceding ones refer to its parent tables all the way up to the root. """ yield from _traverse_ot_data( root, root_accessor, skip_root, predicate, lambda frontier, new: frontier.extendleft(reversed(new)), iter_subtables_fn, ) def bfs_base_table( root: BaseTable, root_accessor: Optional[str] = None, skip_root: bool = False, predicate: Optional[Callable[[SubTablePath], bool]] = None, iter_subtables_fn: Optional[ Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]] ] = None, ) -> Iterable[SubTablePath]: """Breadth-first search tree of BaseTables. Args: the root of the tree. root_accessor (Optional[str]): attribute name for the root table, if any (mostly useful for debugging). skip_root (Optional[bool]): if True, the root itself is not visited, only its children. predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out paths. If True, the path is yielded and its subtables are added to the queue. If False, the path is skipped and its subtables are not traversed. iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]): function to iterate over subtables of a table. If None, the default BaseTable.iterSubTables() is used. Yields: SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples for each of the nodes in the tree. The last entry in a path is the current subtable, whereas preceding ones refer to its parent tables all the way up to the root. """ yield from _traverse_ot_data( root, root_accessor, skip_root, predicate, lambda frontier, new: frontier.extend(new), iter_subtables_fn, ) def _traverse_ot_data( root: BaseTable, root_accessor: Optional[str], skip_root: bool, predicate: Optional[Callable[[SubTablePath], bool]], add_to_frontier_fn: AddToFrontierFn, iter_subtables_fn: Optional[ Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]] ] = None, ) -> Iterable[SubTablePath]: # no visited because general otData cannot cycle (forward-offset only) if root_accessor is None: root_accessor = type(root).__name__ if predicate is None: def predicate(path): return True if iter_subtables_fn is None: def iter_subtables_fn(table): return table.iterSubTables() frontier: Deque[SubTablePath] = deque() root_entry = BaseTable.SubTableEntry(root_accessor, root) if not skip_root: frontier.append((root_entry,)) else: add_to_frontier_fn( frontier, [ (root_entry, subtable_entry) for subtable_entry in iter_subtables_fn(root) ], ) while frontier: # path is (value, attr_name) tuples. attr_name is attr of parent to get value path = frontier.popleft() current = path[-1].value if not predicate(path): continue yield SubTablePath(path) new_entries = [ path + (subtable_entry,) for subtable_entry in iter_subtables_fn(current) ] add_to_frontier_fn(frontier, new_entries) PKaZZZ� 9���#fontTools/ttLib/tables/sbixGlyph.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import readHex, safeEval import struct sbixGlyphHeaderFormat = """ > originOffsetX: h # The x-value of the point in the glyph relative to its # lower-left corner which corresponds to the origin of # the glyph on the screen, that is the point on the # baseline at the left edge of the glyph. originOffsetY: h # The y-value of the point in the glyph relative to its # lower-left corner which corresponds to the origin of # the glyph on the screen, that is the point on the # baseline at the left edge of the glyph. graphicType: 4s # e.g. "png " """ sbixGlyphHeaderFormatSize = sstruct.calcsize(sbixGlyphHeaderFormat) class Glyph(object): def __init__( self, glyphName=None, referenceGlyphName=None, originOffsetX=0, originOffsetY=0, graphicType=None, imageData=None, rawdata=None, gid=0, ): self.gid = gid self.glyphName = glyphName self.referenceGlyphName = referenceGlyphName self.originOffsetX = originOffsetX self.originOffsetY = originOffsetY self.rawdata = rawdata self.graphicType = graphicType self.imageData = imageData # fix self.graphicType if it is null terminated or too short if self.graphicType is not None: if self.graphicType[-1] == "\0": self.graphicType = self.graphicType[:-1] if len(self.graphicType) > 4: from fontTools import ttLib raise ttLib.TTLibError( "Glyph.graphicType must not be longer than 4 characters." ) elif len(self.graphicType) < 4: # pad with spaces self.graphicType += " "[: (4 - len(self.graphicType))] def is_reference_type(self): """Returns True if this glyph is a reference to another glyph's image data.""" return self.graphicType == "dupe" or self.graphicType == "flip" def decompile(self, ttFont): self.glyphName = ttFont.getGlyphName(self.gid) if self.rawdata is None: from fontTools import ttLib raise ttLib.TTLibError("No table data to decompile") if len(self.rawdata) > 0: if len(self.rawdata) < sbixGlyphHeaderFormatSize: from fontTools import ttLib # print "Glyph %i header too short: Expected %x, got %x." % (self.gid, sbixGlyphHeaderFormatSize, len(self.rawdata)) raise ttLib.TTLibError("Glyph header too short.") sstruct.unpack( sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self ) if self.is_reference_type(): # this glyph is a reference to another glyph's image data (gid,) = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:]) self.referenceGlyphName = ttFont.getGlyphName(gid) else: self.imageData = self.rawdata[sbixGlyphHeaderFormatSize:] self.referenceGlyphName = None # clean up del self.rawdata del self.gid def compile(self, ttFont): if self.glyphName is None: from fontTools import ttLib raise ttLib.TTLibError("Can't compile Glyph without glyph name") # TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index? # (needed if you just want to compile the sbix table on its own) self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName)) if self.graphicType is None: rawdata = b"" else: rawdata = sstruct.pack(sbixGlyphHeaderFormat, self) if self.is_reference_type(): rawdata += struct.pack(">H", ttFont.getGlyphID(self.referenceGlyphName)) else: assert self.imageData is not None rawdata += self.imageData self.rawdata = rawdata def toXML(self, xmlWriter, ttFont): if self.graphicType is None: # TODO: ignore empty glyphs? # a glyph data entry is required for each glyph, # but empty ones can be calculated at compile time xmlWriter.simpletag("glyph", name=self.glyphName) xmlWriter.newline() return xmlWriter.begintag( "glyph", graphicType=self.graphicType, name=self.glyphName, originOffsetX=self.originOffsetX, originOffsetY=self.originOffsetY, ) xmlWriter.newline() if self.is_reference_type(): # this glyph is a reference to another glyph id. xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName) else: xmlWriter.begintag("hexdata") xmlWriter.newline() xmlWriter.dumphex(self.imageData) xmlWriter.endtag("hexdata") xmlWriter.newline() xmlWriter.endtag("glyph") xmlWriter.newline() def fromXML(self, name, attrs, content, ttFont): if name == "ref": # this glyph i.e. a reference to another glyph's image data. # in this case imageData contains the glyph id of the reference glyph # get glyph id from glyphname glyphname = safeEval("'''" + attrs["glyphname"] + "'''") self.imageData = struct.pack(">H", ttFont.getGlyphID(glyphname)) self.referenceGlyphName = glyphname elif name == "hexdata": self.imageData = readHex(content) else: from fontTools import ttLib raise ttLib.TTLibError("can't handle '%s' element" % name) PKaZZZe�K�$fontTools/ttLib/tables/sbixStrike.pyfrom fontTools.misc import sstruct from fontTools.misc.textTools import safeEval from .sbixGlyph import Glyph import struct sbixStrikeHeaderFormat = """ > ppem: H # The PPEM for which this strike was designed (e.g., 9, # 12, 24) resolution: H # The screen resolution (in dpi) for which this strike # was designed (e.g., 72) """ sbixGlyphDataOffsetFormat = """ > glyphDataOffset: L # Offset from the beginning of the strike data record # to data for the individual glyph """ sbixStrikeHeaderFormatSize = sstruct.calcsize(sbixStrikeHeaderFormat) sbixGlyphDataOffsetFormatSize = sstruct.calcsize(sbixGlyphDataOffsetFormat) class Strike(object): def __init__(self, rawdata=None, ppem=0, resolution=72): self.data = rawdata self.ppem = ppem self.resolution = resolution self.glyphs = {} def decompile(self, ttFont): if self.data is None: from fontTools import ttLib raise ttLib.TTLibError if len(self.data) < sbixStrikeHeaderFormatSize: from fontTools import ttLib raise ( ttLib.TTLibError, "Strike header too short: Expected %x, got %x.", ) % (sbixStrikeHeaderFormatSize, len(self.data)) # read Strike header from raw data sstruct.unpack( sbixStrikeHeaderFormat, self.data[:sbixStrikeHeaderFormatSize], self ) # calculate number of glyphs (firstGlyphDataOffset,) = struct.unpack( ">L", self.data[ sbixStrikeHeaderFormatSize : sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize ], ) self.numGlyphs = ( firstGlyphDataOffset - sbixStrikeHeaderFormatSize ) // sbixGlyphDataOffsetFormatSize - 1 # ^ -1 because there's one more offset than glyphs # build offset list for single glyph data offsets self.glyphDataOffsets = [] for i in range( self.numGlyphs + 1 ): # + 1 because there's one more offset than glyphs start = i * sbixGlyphDataOffsetFormatSize + sbixStrikeHeaderFormatSize (current_offset,) = struct.unpack( ">L", self.data[start : start + sbixGlyphDataOffsetFormatSize] ) self.glyphDataOffsets.append(current_offset) # iterate through offset list and slice raw data into glyph data records for i in range(self.numGlyphs): current_glyph = Glyph( rawdata=self.data[ self.glyphDataOffsets[i] : self.glyphDataOffsets[i + 1] ], gid=i, ) current_glyph.decompile(ttFont) self.glyphs[current_glyph.glyphName] = current_glyph del self.glyphDataOffsets del self.numGlyphs del self.data def compile(self, ttFont): self.glyphDataOffsets = b"" self.bitmapData = b"" glyphOrder = ttFont.getGlyphOrder() # first glyph starts right after the header currentGlyphDataOffset = ( sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize * (len(glyphOrder) + 1) ) for glyphName in glyphOrder: if glyphName in self.glyphs: # we have glyph data for this glyph current_glyph = self.glyphs[glyphName] else: # must add empty glyph data record for this glyph current_glyph = Glyph(glyphName=glyphName) current_glyph.compile(ttFont) current_glyph.glyphDataOffset = currentGlyphDataOffset self.bitmapData += current_glyph.rawdata currentGlyphDataOffset += len(current_glyph.rawdata) self.glyphDataOffsets += sstruct.pack( sbixGlyphDataOffsetFormat, current_glyph ) # add last "offset", really the end address of the last glyph data record dummy = Glyph() dummy.glyphDataOffset = currentGlyphDataOffset self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, dummy) # pack header self.data = sstruct.pack(sbixStrikeHeaderFormat, self) # add offsets and image data after header self.data += self.glyphDataOffsets + self.bitmapData def toXML(self, xmlWriter, ttFont): xmlWriter.begintag("strike") xmlWriter.newline() xmlWriter.simpletag("ppem", value=self.ppem) xmlWriter.newline() xmlWriter.simpletag("resolution", value=self.resolution) xmlWriter.newline() glyphOrder = ttFont.getGlyphOrder() for i in range(len(glyphOrder)): if glyphOrder[i] in self.glyphs: self.glyphs[glyphOrder[i]].toXML(xmlWriter, ttFont) # TODO: what if there are more glyph data records than (glyf table) glyphs? xmlWriter.endtag("strike") xmlWriter.newline() def fromXML(self, name, attrs, content, ttFont): if name in ["ppem", "resolution"]: setattr(self, name, safeEval(attrs["value"])) elif name == "glyph": if "graphicType" in attrs: myFormat = safeEval("'''" + attrs["graphicType"] + "'''") else: myFormat = None if "glyphname" in attrs: myGlyphName = safeEval("'''" + attrs["glyphname"] + "'''") elif "name" in attrs: myGlyphName = safeEval("'''" + attrs["name"] + "'''") else: from fontTools import ttLib raise ttLib.TTLibError("Glyph must have a glyph name.") if "originOffsetX" in attrs: myOffsetX = safeEval(attrs["originOffsetX"]) else: myOffsetX = 0 if "originOffsetY" in attrs: myOffsetY = safeEval(attrs["originOffsetY"]) else: myOffsetY = 0 current_glyph = Glyph( glyphName=myGlyphName, graphicType=myFormat, originOffsetX=myOffsetX, originOffsetY=myOffsetY, ) for element in content: if isinstance(element, tuple): name, attrs, content = element current_glyph.fromXML(name, attrs, content, ttFont) current_glyph.compile(ttFont) self.glyphs[current_glyph.glyphName] = current_glyph else: from fontTools import ttLib raise ttLib.TTLibError("can't handle '%s' element" % name) PKaZZZLL6� � +fontTools/ttLib/tables/table_API_readme.txtThis folder is a subpackage of ttLib. Each module here is a specialized TT/OT table converter: they can convert raw data to Python objects and vice versa. Usually you don't need to use the modules directly: they are imported and used automatically when needed by ttLib. If you are writing you own table converter the following is important. The modules here have pretty strange names: this is due to the fact that we need to map TT table tags (which are case sensitive) to filenames (which on Mac and Win aren't case sensitive) as well as to Python identifiers. The latter means it can only contain [A-Za-z0-9_] and cannot start with a number. ttLib provides functions to expand a tag into the format used here: >>> from fontTools import ttLib >>> ttLib.tagToIdentifier("FOO ") 'F_O_O_' >>> ttLib.tagToIdentifier("cvt ") '_c_v_t' >>> ttLib.tagToIdentifier("OS/2") 'O_S_2f_2' >>> ttLib.tagToIdentifier("glyf") '_g_l_y_f' >>> And vice versa: >>> ttLib.identifierToTag("F_O_O_") 'FOO ' >>> ttLib.identifierToTag("_c_v_t") 'cvt ' >>> ttLib.identifierToTag("O_S_2f_2") 'OS/2' >>> ttLib.identifierToTag("_g_l_y_f") 'glyf' >>> Eg. the 'glyf' table converter lives in a Python file called: _g_l_y_f.py The converter itself is a class, named "table_" + expandedtag. Eg: class table__g_l_y_f: etc. Note that if you _do_ need to use such modules or classes manually, there are two convenient API functions that let you find them by tag: >>> ttLib.getTableModule('glyf') <module 'ttLib.tables._g_l_y_f'> >>> ttLib.getTableClass('glyf') <class ttLib.tables._g_l_y_f.table__g_l_y_f at 645f400> >>> You must subclass from DefaultTable.DefaultTable. It provides some default behavior, as well as a constructor method (__init__) that you don't need to override. Your converter should minimally provide two methods: class table_F_O_O_(DefaultTable.DefaultTable): # converter for table 'FOO ' def decompile(self, data, ttFont): # 'data' is the raw table data. Unpack it into a # Python data structure. # 'ttFont' is a ttLib.TTfile instance, enabling you to # refer to other tables. Do ***not*** keep a reference to # it: it will cause a circular reference (ttFont saves # a reference to us), and that means we'll be leaking # memory. If you need to use it in other methods, just # pass it around as a method argument. def compile(self, ttFont): # Return the raw data, as converted from the Python # data structure. # Again, 'ttFont' is there so you can access other tables. # Same warning applies. If you want to support TTX import/export as well, you need to provide two additional methods: def toXML(self, writer, ttFont): # XXX def fromXML(self, (name, attrs, content), ttFont): # XXX PKaZZZ�I�0�0�#fontTools/ttLib/tables/ttProgram.py"""ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs.""" from __future__ import annotations from fontTools.misc.textTools import num2binary, binary2num, readHex, strjoin import array from io import StringIO from typing import List import re import logging log = logging.getLogger(__name__) # fmt: off # first, the list of instructions that eat bytes or words from the instruction stream streamInstructions = [ # # opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes # (0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn (0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn (0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn (0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn ] # next, the list of "normal" instructions instructions = [ # # opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes # (0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p - (0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n| (0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2) (0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 - (0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue - (0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b (0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f - (0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n) (0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek (0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack - (0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n - (0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - (0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - (0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - (0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - (0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - (0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - (0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n (0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2 (0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e (0x59, 'EIF', 0, 'EndIf', 0, 0), # - - (0x1b, 'ELSE', 0, 'Else', 0, 0), # - - (0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - - (0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b (0x57, 'EVEN', 0, 'Even', 1, 1), # e b (0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f - (0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - - (0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - - (0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue - (0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l - (0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l - (0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n) (0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c (0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result (0x91, 'GETVARIATION', 0, 'GetVariation', 0, -1), # - a1,..,an (0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py (0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py (0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b (0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b (0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f - (0x58, 'IF', 0, 'If', 1, 0), # e - (0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v - (0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue - (0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p - (0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - - (0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset - (0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset - (0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset - (0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count - (0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b (0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b (0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2) (0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d (0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p - (0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p - (0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p - (0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2) (0x26, 'MINDEX', 0, 'MoveXToTopStack', 1, 1), # k ek (0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 2, 0), # n, p - (0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem (0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize (0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p - (0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64 (0x65, 'NEG', 0, 'Negate', 1, 1), # n -n (0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b (0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e ) (0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2 (0x56, 'ODD', 0, 'Odd', 1, 1), # e b (0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b (0x21, 'POP', 0, 'PopTopStack', 1, 0), # e - (0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value (0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - - (0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - - (0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c (0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2 (0x43, 'RS', 0, 'ReadStore', 1, 1), # n v (0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - - (0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - - (0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - - (0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - - (0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n - (0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight - (0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n - (0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n - (0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p - (0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n - (0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n - (0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 - (0x5f, 'SDS', 0, 'SetDeltaShiftInGState', 1, 0), # n - (0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x - (0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - - (0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 - (0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - - (0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c - (0x32, 'SHP', 1, 'ShiftPointByLastPoint', -1, 0), # p1, p2, ..., ploopvalue - (0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue - (0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e - (0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n - (0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance - (0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x - (0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - - (0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 - (0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n - (0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p - (0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p - (0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p - (0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n - (0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n - (0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2) (0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - - (0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2 (0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n - (0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n - (0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n - (0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n - (0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p - (0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l - (0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l - (0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l - ] # fmt: on def bitRepr(value, bits): s = "" for i in range(bits): s = "01"[value & 0x1] + s value = value >> 1 return s _mnemonicPat = re.compile(r"[A-Z][A-Z0-9]*$") def _makeDict(instructionList): opcodeDict = {} mnemonicDict = {} for op, mnemonic, argBits, name, pops, pushes in instructionList: assert _mnemonicPat.match(mnemonic) mnemonicDict[mnemonic] = op, argBits, name if argBits: argoffset = op for i in range(1 << argBits): opcodeDict[op + i] = mnemonic, argBits, argoffset, name else: opcodeDict[op] = mnemonic, 0, 0, name return opcodeDict, mnemonicDict streamOpcodeDict, streamMnemonicDict = _makeDict(streamInstructions) opcodeDict, mnemonicDict = _makeDict(instructions) class tt_instructions_error(Exception): def __init__(self, error): self.error = error def __str__(self): return "TT instructions error: %s" % repr(self.error) _comment = r"/\*.*?\*/" _instruction = r"([A-Z][A-Z0-9]*)\s*\[(.*?)\]" _number = r"-?[0-9]+" _token = "(%s)|(%s)|(%s)" % (_instruction, _number, _comment) _tokenRE = re.compile(_token) _whiteRE = re.compile(r"\s*") _pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]+).*?\*/") _indentRE = re.compile(r"^FDEF|IF|ELSE\[ \]\t.+") _unindentRE = re.compile(r"^ELSE|ENDF|EIF\[ \]\t.+") def _skipWhite(data, pos): m = _whiteRE.match(data, pos) newPos = m.regs[0][1] assert newPos >= pos return newPos class Program(object): def __init__(self) -> None: pass def fromBytecode(self, bytecode: bytes) -> None: self.bytecode = array.array("B", bytecode) if hasattr(self, "assembly"): del self.assembly def fromAssembly(self, assembly: List[str] | str) -> None: if isinstance(assembly, list): self.assembly = assembly elif isinstance(assembly, str): self.assembly = assembly.splitlines() else: raise TypeError(f"expected str or List[str], got {type(assembly).__name__}") if hasattr(self, "bytecode"): del self.bytecode def getBytecode(self) -> bytes: if not hasattr(self, "bytecode"): self._assemble() return self.bytecode.tobytes() def getAssembly(self, preserve=True) -> List[str]: if not hasattr(self, "assembly"): self._disassemble(preserve=preserve) return self.assembly def toXML(self, writer, ttFont) -> None: if ( not hasattr(ttFont, "disassembleInstructions") or ttFont.disassembleInstructions ): try: assembly = self.getAssembly() except: import traceback tmp = StringIO() traceback.print_exc(file=tmp) msg = "An exception occurred during the decompilation of glyph program:\n\n" msg += tmp.getvalue() log.error(msg) writer.begintag("bytecode") writer.newline() writer.comment(msg.strip()) writer.newline() writer.dumphex(self.getBytecode()) writer.endtag("bytecode") writer.newline() else: if not assembly: return writer.begintag("assembly") writer.newline() i = 0 indent = 0 nInstr = len(assembly) while i < nInstr: instr = assembly[i] if _unindentRE.match(instr): indent -= 1 writer.write(writer.indentwhite * indent) writer.write(instr) writer.newline() m = _pushCountPat.match(instr) i = i + 1 if m: nValues = int(m.group(1)) line: List[str] = [] j = 0 for j in range(nValues): if j and not (j % 25): writer.write(writer.indentwhite * indent) writer.write(" ".join(line)) writer.newline() line = [] line.append(assembly[i + j]) writer.write(writer.indentwhite * indent) writer.write(" ".join(line)) writer.newline() i = i + j + 1 if _indentRE.match(instr): indent += 1 writer.endtag("assembly") writer.newline() else: bytecode = self.getBytecode() if not bytecode: return writer.begintag("bytecode") writer.newline() writer.dumphex(bytecode) writer.endtag("bytecode") writer.newline() def fromXML(self, name, attrs, content, ttFont) -> None: if name == "assembly": self.fromAssembly(strjoin(content)) self._assemble() del self.assembly else: assert name == "bytecode" self.fromBytecode(readHex(content)) def _assemble(self) -> None: assembly = " ".join(getattr(self, "assembly", [])) bytecode: List[int] = [] push = bytecode.append lenAssembly = len(assembly) pos = _skipWhite(assembly, 0) while pos < lenAssembly: m = _tokenRE.match(assembly, pos) if m is None: raise tt_instructions_error( "Syntax error in TT program (%s)" % assembly[pos - 5 : pos + 15] ) dummy, mnemonic, arg, number, comment = m.groups() pos = m.regs[0][1] if comment: pos = _skipWhite(assembly, pos) continue arg = arg.strip() if mnemonic.startswith("INSTR"): # Unknown instruction op = int(mnemonic[5:]) push(op) elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"): op, argBits, name = mnemonicDict[mnemonic] if len(arg) != argBits: raise tt_instructions_error( "Incorrect number of argument bits (%s[%s])" % (mnemonic, arg) ) if arg: arg = binary2num(arg) push(op + arg) else: push(op) else: args = [] pos = _skipWhite(assembly, pos) while pos < lenAssembly: m = _tokenRE.match(assembly, pos) if m is None: raise tt_instructions_error( "Syntax error in TT program (%s)" % assembly[pos : pos + 15] ) dummy, _mnemonic, arg, number, comment = m.groups() if number is None and comment is None: break pos = m.regs[0][1] pos = _skipWhite(assembly, pos) if comment is not None: continue args.append(int(number)) nArgs = len(args) if mnemonic == "PUSH": # Automatically choose the most compact representation nWords = 0 while nArgs: while ( nWords < nArgs and nWords < 255 and not (0 <= args[nWords] <= 255) ): nWords += 1 nBytes = 0 while ( nWords + nBytes < nArgs and nBytes < 255 and 0 <= args[nWords + nBytes] <= 255 ): nBytes += 1 if ( nBytes < 2 and nWords + nBytes < 255 and nWords + nBytes != nArgs ): # Will write bytes as words nWords += nBytes continue # Write words if nWords: if nWords <= 8: op, argBits, name = streamMnemonicDict["PUSHW"] op = op + nWords - 1 push(op) else: op, argBits, name = streamMnemonicDict["NPUSHW"] push(op) push(nWords) for value in args[:nWords]: assert -32768 <= value < 32768, ( "PUSH value out of range %d" % value ) push((value >> 8) & 0xFF) push(value & 0xFF) # Write bytes if nBytes: pass if nBytes <= 8: op, argBits, name = streamMnemonicDict["PUSHB"] op = op + nBytes - 1 push(op) else: op, argBits, name = streamMnemonicDict["NPUSHB"] push(op) push(nBytes) for value in args[nWords : nWords + nBytes]: push(value) nTotal = nWords + nBytes args = args[nTotal:] nArgs -= nTotal nWords = 0 else: # Write exactly what we've been asked to words = mnemonic[-1] == "W" op, argBits, name = streamMnemonicDict[mnemonic] if mnemonic[0] != "N": assert nArgs <= 8, nArgs op = op + nArgs - 1 push(op) else: assert nArgs < 256 push(op) push(nArgs) if words: for value in args: assert -32768 <= value < 32768, ( "PUSHW value out of range %d" % value ) push((value >> 8) & 0xFF) push(value & 0xFF) else: for value in args: assert 0 <= value < 256, ( "PUSHB value out of range %d" % value ) push(value) pos = _skipWhite(assembly, pos) if bytecode: assert max(bytecode) < 256 and min(bytecode) >= 0 self.bytecode = array.array("B", bytecode) def _disassemble(self, preserve=False) -> None: assembly = [] i = 0 bytecode = getattr(self, "bytecode", []) numBytecode = len(bytecode) while i < numBytecode: op = bytecode[i] try: mnemonic, argBits, argoffset, name = opcodeDict[op] except KeyError: if op in streamOpcodeDict: values = [] # Merge consecutive PUSH operations while bytecode[i] in streamOpcodeDict: op = bytecode[i] mnemonic, argBits, argoffset, name = streamOpcodeDict[op] words = mnemonic[-1] == "W" if argBits: nValues = op - argoffset + 1 else: i = i + 1 nValues = bytecode[i] i = i + 1 assert nValues > 0 if not words: for j in range(nValues): value = bytecode[i] values.append(repr(value)) i = i + 1 else: for j in range(nValues): # cast to signed int16 value = (bytecode[i] << 8) | bytecode[i + 1] if value >= 0x8000: value = value - 0x10000 values.append(repr(value)) i = i + 2 if preserve: break if not preserve: mnemonic = "PUSH" nValues = len(values) if nValues == 1: assembly.append("%s[ ] /* 1 value pushed */" % mnemonic) else: assembly.append( "%s[ ] /* %s values pushed */" % (mnemonic, nValues) ) assembly.extend(values) else: assembly.append("INSTR%d[ ]" % op) i = i + 1 else: if argBits: assembly.append( mnemonic + "[%s] /* %s */" % (num2binary(op - argoffset, argBits), name) ) else: assembly.append(mnemonic + "[ ] /* %s */" % name) i = i + 1 self.assembly = assembly def __bool__(self) -> bool: """ >>> p = Program() >>> bool(p) False >>> bc = array.array("B", [0]) >>> p.fromBytecode(bc) >>> bool(p) True >>> p.bytecode.pop() 0 >>> bool(p) False >>> p = Program() >>> asm = ['SVTCA[0]'] >>> p.fromAssembly(asm) >>> bool(p) True >>> p.assembly.pop() 'SVTCA[0]' >>> bool(p) False """ return (hasattr(self, "assembly") and len(self.assembly) > 0) or ( hasattr(self, "bytecode") and len(self.bytecode) > 0 ) __nonzero__ = __bool__ def __eq__(self, other) -> bool: if type(self) != type(other): return NotImplemented return self.__dict__ == other.__dict__ def __ne__(self, other) -> bool: result = self.__eq__(other) return result if result is NotImplemented else not result def _test(): """ >>> _test() True """ bc = b"""@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-""" p = Program() p.fromBytecode(bc) asm = p.getAssembly(preserve=True) p.fromAssembly(asm) print(bc == p.getBytecode()) if __name__ == "__main__": import sys import doctest sys.exit(doctest.testmod().failed) PKaZZZl��T�m�mfontTools/ufoLib/__init__.pyimport os from copy import deepcopy from os import fsdecode import logging import zipfile import enum from collections import OrderedDict import fs import fs.base import fs.subfs import fs.errors import fs.copy import fs.osfs import fs.zipfs import fs.tempfs import fs.tools from fontTools.misc import plistlib from fontTools.ufoLib.validators import * from fontTools.ufoLib.filenames import userNameToFileName from fontTools.ufoLib.converters import convertUFO1OrUFO2KerningToUFO3Kerning from fontTools.ufoLib.errors import UFOLibError from fontTools.ufoLib.utils import numberTypes, _VersionTupleEnumMixin """ A library for importing .ufo files and their descendants. Refer to http://unifiedfontobject.com for the UFO specification. The UFOReader and UFOWriter classes support versions 1, 2 and 3 of the specification. Sets that list the font info attribute names for the fontinfo.plist formats are available for external use. These are: fontInfoAttributesVersion1 fontInfoAttributesVersion2 fontInfoAttributesVersion3 A set listing the fontinfo.plist attributes that were deprecated in version 2 is available for external use: deprecatedFontInfoAttributesVersion2 Functions that do basic validation on values for fontinfo.plist are available for external use. These are validateFontInfoVersion2ValueForAttribute validateFontInfoVersion3ValueForAttribute Value conversion functions are available for converting fontinfo.plist values between the possible format versions. convertFontInfoValueForAttributeFromVersion1ToVersion2 convertFontInfoValueForAttributeFromVersion2ToVersion1 convertFontInfoValueForAttributeFromVersion2ToVersion3 convertFontInfoValueForAttributeFromVersion3ToVersion2 """ __all__ = [ "makeUFOPath", "UFOLibError", "UFOReader", "UFOWriter", "UFOReaderWriter", "UFOFileStructure", "fontInfoAttributesVersion1", "fontInfoAttributesVersion2", "fontInfoAttributesVersion3", "deprecatedFontInfoAttributesVersion2", "validateFontInfoVersion2ValueForAttribute", "validateFontInfoVersion3ValueForAttribute", "convertFontInfoValueForAttributeFromVersion1ToVersion2", "convertFontInfoValueForAttributeFromVersion2ToVersion1", ] __version__ = "3.0.0" logger = logging.getLogger(__name__) # --------- # Constants # --------- DEFAULT_GLYPHS_DIRNAME = "glyphs" DATA_DIRNAME = "data" IMAGES_DIRNAME = "images" METAINFO_FILENAME = "metainfo.plist" FONTINFO_FILENAME = "fontinfo.plist" LIB_FILENAME = "lib.plist" GROUPS_FILENAME = "groups.plist" KERNING_FILENAME = "kerning.plist" FEATURES_FILENAME = "features.fea" LAYERCONTENTS_FILENAME = "layercontents.plist" LAYERINFO_FILENAME = "layerinfo.plist" DEFAULT_LAYER_NAME = "public.default" class UFOFormatVersion(tuple, _VersionTupleEnumMixin, enum.Enum): FORMAT_1_0 = (1, 0) FORMAT_2_0 = (2, 0) FORMAT_3_0 = (3, 0) # python 3.11 doesn't like when a mixin overrides a dunder method like __str__ # for some reasons it keep using Enum.__str__, see # https://github.com/fonttools/fonttools/pull/2655 UFOFormatVersion.__str__ = _VersionTupleEnumMixin.__str__ class UFOFileStructure(enum.Enum): ZIP = "zip" PACKAGE = "package" # -------------- # Shared Methods # -------------- class _UFOBaseIO: def getFileModificationTime(self, path): """ Returns the modification time for the file at the given path, as a floating point number giving the number of seconds since the epoch. The path must be relative to the UFO path. Returns None if the file does not exist. """ try: dt = self.fs.getinfo(fsdecode(path), namespaces=["details"]).modified except (fs.errors.MissingInfoNamespace, fs.errors.ResourceNotFound): return None else: return dt.timestamp() def _getPlist(self, fileName, default=None): """ Read a property list relative to the UFO filesystem's root. Raises UFOLibError if the file is missing and default is None, otherwise default is returned. The errors that could be raised during the reading of a plist are unpredictable and/or too large to list, so, a blind try: except: is done. If an exception occurs, a UFOLibError will be raised. """ try: with self.fs.open(fileName, "rb") as f: return plistlib.load(f) except fs.errors.ResourceNotFound: if default is None: raise UFOLibError( "'%s' is missing on %s. This file is required" % (fileName, self.fs) ) else: return default except Exception as e: # TODO(anthrotype): try to narrow this down a little raise UFOLibError(f"'{fileName}' could not be read on {self.fs}: {e}") def _writePlist(self, fileName, obj): """ Write a property list to a file relative to the UFO filesystem's root. Do this sort of atomically, making it harder to corrupt existing files, for example when plistlib encounters an error halfway during write. This also checks to see if text matches the text that is already in the file at path. If so, the file is not rewritten so that the modification date is preserved. The errors that could be raised during the writing of a plist are unpredictable and/or too large to list, so, a blind try: except: is done. If an exception occurs, a UFOLibError will be raised. """ if self._havePreviousFile: try: data = plistlib.dumps(obj) except Exception as e: raise UFOLibError( "'%s' could not be written on %s because " "the data is not properly formatted: %s" % (fileName, self.fs, e) ) if self.fs.exists(fileName) and data == self.fs.readbytes(fileName): return self.fs.writebytes(fileName, data) else: with self.fs.openbin(fileName, mode="w") as fp: try: plistlib.dump(obj, fp) except Exception as e: raise UFOLibError( "'%s' could not be written on %s because " "the data is not properly formatted: %s" % (fileName, self.fs, e) ) # ---------- # UFO Reader # ---------- class UFOReader(_UFOBaseIO): """ Read the various components of the .ufo. By default read data is validated. Set ``validate`` to ``False`` to not validate the data. """ def __init__(self, path, validate=True): if hasattr(path, "__fspath__"): # support os.PathLike objects path = path.__fspath__() if isinstance(path, str): structure = _sniffFileStructure(path) try: if structure is UFOFileStructure.ZIP: parentFS = fs.zipfs.ZipFS(path, write=False, encoding="utf-8") else: parentFS = fs.osfs.OSFS(path) except fs.errors.CreateFailed as e: raise UFOLibError(f"unable to open '{path}': {e}") if structure is UFOFileStructure.ZIP: # .ufoz zip files must contain a single root directory, with arbitrary # name, containing all the UFO files rootDirs = [ p.name for p in parentFS.scandir("/") # exclude macOS metadata contained in zip file if p.is_dir and p.name != "__MACOSX" ] if len(rootDirs) == 1: # 'ClosingSubFS' ensures that the parent zip file is closed when # its root subdirectory is closed self.fs = parentFS.opendir( rootDirs[0], factory=fs.subfs.ClosingSubFS ) else: raise UFOLibError( "Expected exactly 1 root directory, found %d" % len(rootDirs) ) else: # normal UFO 'packages' are just a single folder self.fs = parentFS # when passed a path string, we make sure we close the newly opened fs # upon calling UFOReader.close method or context manager's __exit__ self._shouldClose = True self._fileStructure = structure elif isinstance(path, fs.base.FS): filesystem = path try: filesystem.check() except fs.errors.FilesystemClosed: raise UFOLibError("the filesystem '%s' is closed" % path) else: self.fs = filesystem try: path = filesystem.getsyspath("/") except fs.errors.NoSysPath: # network or in-memory FS may not map to the local one path = str(filesystem) # when user passed an already initialized fs instance, it is her # responsibility to close it, thus UFOReader.close/__exit__ are no-op self._shouldClose = False # default to a 'package' structure self._fileStructure = UFOFileStructure.PACKAGE else: raise TypeError( "Expected a path string or fs.base.FS object, found '%s'" % type(path).__name__ ) self._path = fsdecode(path) self._validate = validate self._upConvertedKerningData = None try: self.readMetaInfo(validate=validate) except UFOLibError: self.close() raise # properties def _get_path(self): import warnings warnings.warn( "The 'path' attribute is deprecated; use the 'fs' attribute instead", DeprecationWarning, stacklevel=2, ) return self._path path = property(_get_path, doc="The path of the UFO (DEPRECATED).") def _get_formatVersion(self): import warnings warnings.warn( "The 'formatVersion' attribute is deprecated; use the 'formatVersionTuple'", DeprecationWarning, stacklevel=2, ) return self._formatVersion.major formatVersion = property( _get_formatVersion, doc="The (major) format version of the UFO. DEPRECATED: Use formatVersionTuple", ) @property def formatVersionTuple(self): """The (major, minor) format version of the UFO. This is determined by reading metainfo.plist during __init__. """ return self._formatVersion def _get_fileStructure(self): return self._fileStructure fileStructure = property( _get_fileStructure, doc=( "The file structure of the UFO: " "either UFOFileStructure.ZIP or UFOFileStructure.PACKAGE" ), ) # up conversion def _upConvertKerning(self, validate): """ Up convert kerning and groups in UFO 1 and 2. The data will be held internally until each bit of data has been retrieved. The conversion of both must be done at once, so the raw data is cached and an error is raised if one bit of data becomes obsolete before it is called. ``validate`` will validate the data. """ if self._upConvertedKerningData: testKerning = self._readKerning() if testKerning != self._upConvertedKerningData["originalKerning"]: raise UFOLibError( "The data in kerning.plist has been modified since it was converted to UFO 3 format." ) testGroups = self._readGroups() if testGroups != self._upConvertedKerningData["originalGroups"]: raise UFOLibError( "The data in groups.plist has been modified since it was converted to UFO 3 format." ) else: groups = self._readGroups() if validate: invalidFormatMessage = "groups.plist is not properly formatted." if not isinstance(groups, dict): raise UFOLibError(invalidFormatMessage) for groupName, glyphList in groups.items(): if not isinstance(groupName, str): raise UFOLibError(invalidFormatMessage) elif not isinstance(glyphList, list): raise UFOLibError(invalidFormatMessage) for glyphName in glyphList: if not isinstance(glyphName, str): raise UFOLibError(invalidFormatMessage) self._upConvertedKerningData = dict( kerning={}, originalKerning=self._readKerning(), groups={}, originalGroups=groups, ) # convert kerning and groups kerning, groups, conversionMaps = convertUFO1OrUFO2KerningToUFO3Kerning( self._upConvertedKerningData["originalKerning"], deepcopy(self._upConvertedKerningData["originalGroups"]), self.getGlyphSet(), ) # store self._upConvertedKerningData["kerning"] = kerning self._upConvertedKerningData["groups"] = groups self._upConvertedKerningData["groupRenameMaps"] = conversionMaps # support methods def readBytesFromPath(self, path): """ Returns the bytes in the file at the given path. The path must be relative to the UFO's filesystem root. Returns None if the file does not exist. """ try: return self.fs.readbytes(fsdecode(path)) except fs.errors.ResourceNotFound: return None def getReadFileForPath(self, path, encoding=None): """ Returns a file (or file-like) object for the file at the given path. The path must be relative to the UFO path. Returns None if the file does not exist. By default the file is opened in binary mode (reads bytes). If encoding is passed, the file is opened in text mode (reads str). Note: The caller is responsible for closing the open file. """ path = fsdecode(path) try: if encoding is None: return self.fs.openbin(path) else: return self.fs.open(path, mode="r", encoding=encoding) except fs.errors.ResourceNotFound: return None # metainfo.plist def _readMetaInfo(self, validate=None): """ Read metainfo.plist and return raw data. Only used for internal operations. ``validate`` will validate the read data, by default it is set to the class's validate value, can be overridden. """ if validate is None: validate = self._validate data = self._getPlist(METAINFO_FILENAME) if validate and not isinstance(data, dict): raise UFOLibError("metainfo.plist is not properly formatted.") try: formatVersionMajor = data["formatVersion"] except KeyError: raise UFOLibError( f"Missing required formatVersion in '{METAINFO_FILENAME}' on {self.fs}" ) formatVersionMinor = data.setdefault("formatVersionMinor", 0) try: formatVersion = UFOFormatVersion((formatVersionMajor, formatVersionMinor)) except ValueError as e: unsupportedMsg = ( f"Unsupported UFO format ({formatVersionMajor}.{formatVersionMinor}) " f"in '{METAINFO_FILENAME}' on {self.fs}" ) if validate: from fontTools.ufoLib.errors import UnsupportedUFOFormat raise UnsupportedUFOFormat(unsupportedMsg) from e formatVersion = UFOFormatVersion.default() logger.warning( "%s. Assuming the latest supported version (%s). " "Some data may be skipped or parsed incorrectly", unsupportedMsg, formatVersion, ) data["formatVersionTuple"] = formatVersion return data def readMetaInfo(self, validate=None): """ Read metainfo.plist and set formatVersion. Only used for internal operations. ``validate`` will validate the read data, by default it is set to the class's validate value, can be overridden. """ data = self._readMetaInfo(validate=validate) self._formatVersion = data["formatVersionTuple"] # groups.plist def _readGroups(self): groups = self._getPlist(GROUPS_FILENAME, {}) # remove any duplicate glyphs in a kerning group for groupName, glyphList in groups.items(): if groupName.startswith(("public.kern1.", "public.kern2.")): groups[groupName] = list(OrderedDict.fromkeys(glyphList)) return groups def readGroups(self, validate=None): """ Read groups.plist. Returns a dict. ``validate`` will validate the read data, by default it is set to the class's validate value, can be overridden. """ if validate is None: validate = self._validate # handle up conversion if self._formatVersion < UFOFormatVersion.FORMAT_3_0: self._upConvertKerning(validate) groups = self._upConvertedKerningData["groups"] # normal else: groups = self._readGroups() if validate: valid, message = groupsValidator(groups) if not valid: raise UFOLibError(message) return groups def getKerningGroupConversionRenameMaps(self, validate=None): """ Get maps defining the renaming that was done during any needed kerning group conversion. This method returns a dictionary of this form:: { "side1" : {"old group name" : "new group name"}, "side2" : {"old group name" : "new group name"} } When no conversion has been performed, the side1 and side2 dictionaries will be empty. ``validate`` will validate the groups, by default it is set to the class's validate value, can be overridden. """ if validate is None: validate = self._validate if self._formatVersion >= UFOFormatVersion.FORMAT_3_0: return dict(side1={}, side2={}) # use the public group reader to force the load and # conversion of the data if it hasn't happened yet. self.readGroups(validate=validate) return self._upConvertedKerningData["groupRenameMaps"] # fontinfo.plist def _readInfo(self, validate): data = self._getPlist(FONTINFO_FILENAME, {}) if validate and not isinstance(data, dict): raise UFOLibError("fontinfo.plist is not properly formatted.") return data def readInfo(self, info, validate=None): """ Read fontinfo.plist. It requires an object that allows setting attributes with names that follow the fontinfo.plist version 3 specification. This will write the attributes defined in the file into the object. ``validate`` will validate the read data, by default it is set to the class's validate value, can be overridden. """ if validate is None: validate = self._validate infoDict = self._readInfo(validate) infoDataToSet = {} # version 1 if self._formatVersion == UFOFormatVersion.FORMAT_1_0: for attr in fontInfoAttributesVersion1: value = infoDict.get(attr) if value is not None: infoDataToSet[attr] = value infoDataToSet = _convertFontInfoDataVersion1ToVersion2(infoDataToSet) infoDataToSet = _convertFontInfoDataVersion2ToVersion3(infoDataToSet) # version 2 elif self._formatVersion == UFOFormatVersion.FORMAT_2_0: for attr, dataValidationDict in list( fontInfoAttributesVersion2ValueData.items() ): value = infoDict.get(attr) if value is None: continue infoDataToSet[attr] = value infoDataToSet = _convertFontInfoDataVersion2ToVersion3(infoDataToSet) # version 3.x elif self._formatVersion.major == UFOFormatVersion.FORMAT_3_0.major: for attr, dataValidationDict in list( fontInfoAttributesVersion3ValueData.items() ): value = infoDict.get(attr) if value is None: continue infoDataToSet[attr] = value # unsupported version else: raise NotImplementedError(self._formatVersion) # validate data if validate: infoDataToSet = validateInfoVersion3Data(infoDataToSet) # populate the object for attr, value in list(infoDataToSet.items()): try: setattr(info, attr, value) except AttributeError: raise UFOLibError( "The supplied info object does not support setting a necessary attribute (%s)." % attr ) # kerning.plist def _readKerning(self): data = self._getPlist(KERNING_FILENAME, {}) return data def readKerning(self, validate=None): """ Read kerning.plist. Returns a dict. ``validate`` will validate the kerning data, by default it is set to the class's validate value, can be overridden. """ if validate is None: validate = self._validate # handle up conversion if self._formatVersion < UFOFormatVersion.FORMAT_3_0: self._upConvertKerning(validate) kerningNested = self._upConvertedKerningData["kerning"] # normal else: kerningNested = self._readKerning() if validate: valid, message = kerningValidator(kerningNested) if not valid: raise UFOLibError(message) # flatten kerning = {} for left in kerningNested: for right in kerningNested[left]: value = kerningNested[left][right] kerning[left, right] = value return kerning # lib.plist def readLib(self, validate=None): """ Read lib.plist. Returns a dict. ``validate`` will validate the data, by default it is set to the class's validate value, can be overridden. """ if validate is None: validate = self._validate data = self._getPlist(LIB_FILENAME, {}) if validate: valid, message = fontLibValidator(data) if not valid: raise UFOLibError(message) return data # features.fea def readFeatures(self): """ Read features.fea. Return a string. The returned string is empty if the file is missing. """ try: with self.fs.open(FEATURES_FILENAME, "r", encoding="utf-8") as f: return f.read() except fs.errors.ResourceNotFound: return "" # glyph sets & layers def _readLayerContents(self, validate): """ Rebuild the layer contents list by checking what glyphsets are available on disk. ``validate`` will validate the layer contents. """ if self._formatVersion < UFOFormatVersion.FORMAT_3_0: return [(DEFAULT_LAYER_NAME, DEFAULT_GLYPHS_DIRNAME)] contents = self._getPlist(LAYERCONTENTS_FILENAME) if validate: valid, error = layerContentsValidator(contents, self.fs) if not valid: raise UFOLibError(error) return contents def getLayerNames(self, validate=None): """ Get the ordered layer names from layercontents.plist. ``validate`` will validate the data, by default it is set to the class's validate value, can be overridden. """ if validate is None: validate = self._validate layerContents = self._readLayerContents(validate) layerNames = [layerName for layerName, directoryName in layerContents] return layerNames def getDefaultLayerName(self, validate=None): """ Get the default layer name from layercontents.plist. ``validate`` will validate the data, by default it is set to the class's validate value, can be overridden. """ if validate is None: validate = self._validate layerContents = self._readLayerContents(validate) for layerName, layerDirectory in layerContents: if layerDirectory == DEFAULT_GLYPHS_DIRNAME: return layerName # this will already have been raised during __init__ raise UFOLibError("The default layer is not defined in layercontents.plist.") def getGlyphSet(self, layerName=None, validateRead=None, validateWrite=None): """ Return the GlyphSet associated with the glyphs directory mapped to layerName in the UFO. If layerName is not provided, the name retrieved with getDefaultLayerName will be used. ``validateRead`` will validate the read data, by default it is set to the class's validate value, can be overridden. ``validateWrite`` will validate the written data, by default it is set to the class's validate value, can be overridden. """ from fontTools.ufoLib.glifLib import GlyphSet if validateRead is None: validateRead = self._validate if validateWrite is None: validateWrite = self._validate if layerName is None: layerName = self.getDefaultLayerName(validate=validateRead) directory = None layerContents = self._readLayerContents(validateRead) for storedLayerName, storedLayerDirectory in layerContents: if layerName == storedLayerName: directory = storedLayerDirectory break if directory is None: raise UFOLibError('No glyphs directory is mapped to "%s".' % layerName) try: glyphSubFS = self.fs.opendir(directory) except fs.errors.ResourceNotFound: raise UFOLibError(f"No '{directory}' directory for layer '{layerName}'") return GlyphSet( glyphSubFS, ufoFormatVersion=self._formatVersion, validateRead=validateRead, validateWrite=validateWrite, expectContentsFile=True, ) def getCharacterMapping(self, layerName=None, validate=None): """ Return a dictionary that maps unicode values (ints) to lists of glyph names. """ if validate is None: validate = self._validate glyphSet = self.getGlyphSet( layerName, validateRead=validate, validateWrite=True ) allUnicodes = glyphSet.getUnicodes() cmap = {} for glyphName, unicodes in allUnicodes.items(): for code in unicodes: if code in cmap: cmap[code].append(glyphName) else: cmap[code] = [glyphName] return cmap # /data def getDataDirectoryListing(self): """ Returns a list of all files in the data directory. The returned paths will be relative to the UFO. This will not list directory names, only file names. Thus, empty directories will be skipped. """ try: self._dataFS = self.fs.opendir(DATA_DIRNAME) except fs.errors.ResourceNotFound: return [] except fs.errors.DirectoryExpected: raise UFOLibError('The UFO contains a "data" file instead of a directory.') try: # fs Walker.files method returns "absolute" paths (in terms of the # root of the 'data' SubFS), so we strip the leading '/' to make # them relative return [p.lstrip("/") for p in self._dataFS.walk.files()] except fs.errors.ResourceError: return [] def getImageDirectoryListing(self, validate=None): """ Returns a list of all image file names in the images directory. Each of the images will have been verified to have the PNG signature. ``validate`` will validate the data, by default it is set to the class's validate value, can be overridden. """ if self._formatVersion < UFOFormatVersion.FORMAT_3_0: return [] if validate is None: validate = self._validate try: self._imagesFS = imagesFS = self.fs.opendir(IMAGES_DIRNAME) except fs.errors.ResourceNotFound: return [] except fs.errors.DirectoryExpected: raise UFOLibError( 'The UFO contains an "images" file instead of a directory.' ) result = [] for path in imagesFS.scandir("/"): if path.is_dir: # silently skip this as version control # systems often have hidden directories continue if validate: with imagesFS.openbin(path.name) as fp: valid, error = pngValidator(fileObj=fp) if valid: result.append(path.name) else: result.append(path.name) return result def readData(self, fileName): """ Return bytes for the file named 'fileName' inside the 'data/' directory. """ fileName = fsdecode(fileName) try: try: dataFS = self._dataFS except AttributeError: # in case readData is called before getDataDirectoryListing dataFS = self.fs.opendir(DATA_DIRNAME) data = dataFS.readbytes(fileName) except fs.errors.ResourceNotFound: raise UFOLibError(f"No data file named '{fileName}' on {self.fs}") return data def readImage(self, fileName, validate=None): """ Return image data for the file named fileName. ``validate`` will validate the data, by default it is set to the class's validate value, can be overridden. """ if validate is None: validate = self._validate if self._formatVersion < UFOFormatVersion.FORMAT_3_0: raise UFOLibError( f"Reading images is not allowed in UFO {self._formatVersion.major}." ) fileName = fsdecode(fileName) try: try: imagesFS = self._imagesFS except AttributeError: # in case readImage is called before getImageDirectoryListing imagesFS = self.fs.opendir(IMAGES_DIRNAME) data = imagesFS.readbytes(fileName) except fs.errors.ResourceNotFound: raise UFOLibError(f"No image file named '{fileName}' on {self.fs}") if validate: valid, error = pngValidator(data=data) if not valid: raise UFOLibError(error) return data def close(self): if self._shouldClose: self.fs.close() def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() # ---------- # UFO Writer # ---------- class UFOWriter(UFOReader): """ Write the various components of the .ufo. By default, the written data will be validated before writing. Set ``validate`` to ``False`` if you do not want to validate the data. Validation can also be overriden on a per method level if desired. The ``formatVersion`` argument allows to specify the UFO format version as a tuple of integers (major, minor), or as a single integer for the major digit only (minor is implied as 0). By default the latest formatVersion will be used; currently it's 3.0, which is equivalent to formatVersion=(3, 0). An UnsupportedUFOFormat exception is raised if the requested UFO formatVersion is not supported. """ def __init__( self, path, formatVersion=None, fileCreator="com.github.fonttools.ufoLib", structure=None, validate=True, ): try: formatVersion = UFOFormatVersion(formatVersion) except ValueError as e: from fontTools.ufoLib.errors import UnsupportedUFOFormat raise UnsupportedUFOFormat( f"Unsupported UFO format: {formatVersion!r}" ) from e if hasattr(path, "__fspath__"): # support os.PathLike objects path = path.__fspath__() if isinstance(path, str): # normalize path by removing trailing or double slashes path = os.path.normpath(path) havePreviousFile = os.path.exists(path) if havePreviousFile: # ensure we use the same structure as the destination existingStructure = _sniffFileStructure(path) if structure is not None: try: structure = UFOFileStructure(structure) except ValueError: raise UFOLibError( "Invalid or unsupported structure: '%s'" % structure ) if structure is not existingStructure: raise UFOLibError( "A UFO with a different structure (%s) already exists " "at the given path: '%s'" % (existingStructure, path) ) else: structure = existingStructure else: # if not exists, default to 'package' structure if structure is None: structure = UFOFileStructure.PACKAGE dirName = os.path.dirname(path) if dirName and not os.path.isdir(dirName): raise UFOLibError( "Cannot write to '%s': directory does not exist" % path ) if structure is UFOFileStructure.ZIP: if havePreviousFile: # we can't write a zip in-place, so we have to copy its # contents to a temporary location and work from there, then # upon closing UFOWriter we create the final zip file parentFS = fs.tempfs.TempFS() with fs.zipfs.ZipFS(path, encoding="utf-8") as origFS: fs.copy.copy_fs(origFS, parentFS) # if output path is an existing zip, we require that it contains # one, and only one, root directory (with arbitrary name), in turn # containing all the existing UFO contents rootDirs = [ p.name for p in parentFS.scandir("/") # exclude macOS metadata contained in zip file if p.is_dir and p.name != "__MACOSX" ] if len(rootDirs) != 1: raise UFOLibError( "Expected exactly 1 root directory, found %d" % len(rootDirs) ) else: # 'ClosingSubFS' ensures that the parent filesystem is closed # when its root subdirectory is closed self.fs = parentFS.opendir( rootDirs[0], factory=fs.subfs.ClosingSubFS ) else: # if the output zip file didn't exist, we create the root folder; # we name it the same as input 'path', but with '.ufo' extension rootDir = os.path.splitext(os.path.basename(path))[0] + ".ufo" parentFS = fs.zipfs.ZipFS(path, write=True, encoding="utf-8") parentFS.makedir(rootDir) self.fs = parentFS.opendir(rootDir, factory=fs.subfs.ClosingSubFS) else: self.fs = fs.osfs.OSFS(path, create=True) self._fileStructure = structure self._havePreviousFile = havePreviousFile self._shouldClose = True elif isinstance(path, fs.base.FS): filesystem = path try: filesystem.check() except fs.errors.FilesystemClosed: raise UFOLibError("the filesystem '%s' is closed" % path) else: self.fs = filesystem try: path = filesystem.getsyspath("/") except fs.errors.NoSysPath: # network or in-memory FS may not map to the local one path = str(filesystem) # if passed an FS object, always use 'package' structure if structure and structure is not UFOFileStructure.PACKAGE: import warnings warnings.warn( "The 'structure' argument is not used when input is an FS object", UserWarning, stacklevel=2, ) self._fileStructure = UFOFileStructure.PACKAGE # if FS contains a "metainfo.plist", we consider it non-empty self._havePreviousFile = filesystem.exists(METAINFO_FILENAME) # the user is responsible for closing the FS object self._shouldClose = False else: raise TypeError( "Expected a path string or fs object, found %s" % type(path).__name__ ) # establish some basic stuff self._path = fsdecode(path) self._formatVersion = formatVersion self._fileCreator = fileCreator self._downConversionKerningData = None self._validate = validate # if the file already exists, get the format version. # this will be needed for up and down conversion. previousFormatVersion = None if self._havePreviousFile: metaInfo = self._readMetaInfo(validate=validate) previousFormatVersion = metaInfo["formatVersionTuple"] # catch down conversion if previousFormatVersion > formatVersion: from fontTools.ufoLib.errors import UnsupportedUFOFormat raise UnsupportedUFOFormat( "The UFO located at this path is a higher version " f"({previousFormatVersion}) than the version ({formatVersion}) " "that is trying to be written. This is not supported." ) # handle the layer contents self.layerContents = {} if previousFormatVersion is not None and previousFormatVersion.major >= 3: # already exists self.layerContents = OrderedDict(self._readLayerContents(validate)) else: # previous < 3 # imply the layer contents if self.fs.exists(DEFAULT_GLYPHS_DIRNAME): self.layerContents = {DEFAULT_LAYER_NAME: DEFAULT_GLYPHS_DIRNAME} # write the new metainfo self._writeMetaInfo() # properties def _get_fileCreator(self): return self._fileCreator fileCreator = property( _get_fileCreator, doc="The file creator of the UFO. This is set into metainfo.plist during __init__.", ) # support methods for file system interaction def copyFromReader(self, reader, sourcePath, destPath): """ Copy the sourcePath in the provided UFOReader to destPath in this writer. The paths must be relative. This works with both individual files and directories. """ if not isinstance(reader, UFOReader): raise UFOLibError("The reader must be an instance of UFOReader.") sourcePath = fsdecode(sourcePath) destPath = fsdecode(destPath) if not reader.fs.exists(sourcePath): raise UFOLibError( 'The reader does not have data located at "%s".' % sourcePath ) if self.fs.exists(destPath): raise UFOLibError('A file named "%s" already exists.' % destPath) # create the destination directory if it doesn't exist self.fs.makedirs(fs.path.dirname(destPath), recreate=True) if reader.fs.isdir(sourcePath): fs.copy.copy_dir(reader.fs, sourcePath, self.fs, destPath) else: fs.copy.copy_file(reader.fs, sourcePath, self.fs, destPath) def writeBytesToPath(self, path, data): """ Write bytes to a path relative to the UFO filesystem's root. If writing to an existing UFO, check to see if data matches the data that is already in the file at path; if so, the file is not rewritten so that the modification date is preserved. If needed, the directory tree for the given path will be built. """ path = fsdecode(path) if self._havePreviousFile: if self.fs.isfile(path) and data == self.fs.readbytes(path): return try: self.fs.writebytes(path, data) except fs.errors.FileExpected: raise UFOLibError("A directory exists at '%s'" % path) except fs.errors.ResourceNotFound: self.fs.makedirs(fs.path.dirname(path), recreate=True) self.fs.writebytes(path, data) def getFileObjectForPath(self, path, mode="w", encoding=None): """ Returns a file (or file-like) object for the file at the given path. The path must be relative to the UFO path. Returns None if the file does not exist and the mode is "r" or "rb. An encoding may be passed if the file is opened in text mode. Note: The caller is responsible for closing the open file. """ path = fsdecode(path) try: return self.fs.open(path, mode=mode, encoding=encoding) except fs.errors.ResourceNotFound as e: m = mode[0] if m == "r": # XXX I think we should just let it raise. The docstring, # however, says that this returns None if mode is 'r' return None elif m == "w" or m == "a" or m == "x": self.fs.makedirs(fs.path.dirname(path), recreate=True) return self.fs.open(path, mode=mode, encoding=encoding) except fs.errors.ResourceError as e: return UFOLibError(f"unable to open '{path}' on {self.fs}: {e}") def removePath(self, path, force=False, removeEmptyParents=True): """ Remove the file (or directory) at path. The path must be relative to the UFO. Raises UFOLibError if the path doesn't exist. If force=True, ignore non-existent paths. If the directory where 'path' is located becomes empty, it will be automatically removed, unless 'removeEmptyParents' is False. """ path = fsdecode(path) try: self.fs.remove(path) except fs.errors.FileExpected: self.fs.removetree(path) except fs.errors.ResourceNotFound: if not force: raise UFOLibError(f"'{path}' does not exist on {self.fs}") if removeEmptyParents: parent = fs.path.dirname(path) if parent: fs.tools.remove_empty(self.fs, parent) # alias kept for backward compatibility with old API removeFileForPath = removePath # UFO mod time def setModificationTime(self): """ Set the UFO modification time to the current time. This is never called automatically. It is up to the caller to call this when finished working on the UFO. """ path = self._path if path is not None and os.path.exists(path): try: # this may fail on some filesystems (e.g. SMB servers) os.utime(path, None) except OSError as e: logger.warning("Failed to set modified time: %s", e) # metainfo.plist def _writeMetaInfo(self): metaInfo = dict( creator=self._fileCreator, formatVersion=self._formatVersion.major, ) if self._formatVersion.minor != 0: metaInfo["formatVersionMinor"] = self._formatVersion.minor self._writePlist(METAINFO_FILENAME, metaInfo) # groups.plist def setKerningGroupConversionRenameMaps(self, maps): """ Set maps defining the renaming that should be done when writing groups and kerning in UFO 1 and UFO 2. This will effectively undo the conversion done when UFOReader reads this data. The dictionary should have this form:: { "side1" : {"group name to use when writing" : "group name in data"}, "side2" : {"group name to use when writing" : "group name in data"} } This is the same form returned by UFOReader's getKerningGroupConversionRenameMaps method. """ if self._formatVersion >= UFOFormatVersion.FORMAT_3_0: return # XXX raise an error here # flip the dictionaries remap = {} for side in ("side1", "side2"): for writeName, dataName in list(maps[side].items()): remap[dataName] = writeName self._downConversionKerningData = dict(groupRenameMap=remap) def writeGroups(self, groups, validate=None): """ Write groups.plist. This method requires a dict of glyph groups as an argument. ``validate`` will validate the data, by default it is set to the class's validate value, can be overridden. """ if validate is None: validate = self._validate # validate the data structure if validate: valid, message = groupsValidator(groups) if not valid: raise UFOLibError(message) # down convert if ( self._formatVersion < UFOFormatVersion.FORMAT_3_0 and self._downConversionKerningData is not None ): remap = self._downConversionKerningData["groupRenameMap"] remappedGroups = {} # there are some edge cases here that are ignored: # 1. if a group is being renamed to a name that # already exists, the existing group is always # overwritten. (this is why there are two loops # below.) there doesn't seem to be a logical # solution to groups mismatching and overwriting # with the specifiecd group seems like a better # solution than throwing an error. # 2. if side 1 and side 2 groups are being renamed # to the same group name there is no check to # ensure that the contents are identical. that # is left up to the caller. for name, contents in list(groups.items()): if name in remap: continue remappedGroups[name] = contents for name, contents in list(groups.items()): if name not in remap: continue name = remap[name] remappedGroups[name] = contents groups = remappedGroups # pack and write groupsNew = {} for key, value in groups.items(): groupsNew[key] = list(value) if groupsNew: self._writePlist(GROUPS_FILENAME, groupsNew) elif self._havePreviousFile: self.removePath(GROUPS_FILENAME, force=True, removeEmptyParents=False) # fontinfo.plist def writeInfo(self, info, validate=None): """ Write info.plist. This method requires an object that supports getting attributes that follow the fontinfo.plist version 2 specification. Attributes will be taken from the given object and written into the file. ``validate`` will validate the data, by default it is set to the class's validate value, can be overridden. """ if validate is None: validate = self._validate # gather version 3 data infoData = {} for attr in list(fontInfoAttributesVersion3ValueData.keys()): if hasattr(info, attr): try: value = getattr(info, attr) except AttributeError: raise UFOLibError( "The supplied info object does not support getting a necessary attribute (%s)." % attr ) if value is None: continue infoData[attr] = value # down convert data if necessary and validate if self._formatVersion == UFOFormatVersion.FORMAT_3_0: if validate: infoData = validateInfoVersion3Data(infoData) elif self._formatVersion == UFOFormatVersion.FORMAT_2_0: infoData = _convertFontInfoDataVersion3ToVersion2(infoData) if validate: infoData = validateInfoVersion2Data(infoData) elif self._formatVersion == UFOFormatVersion.FORMAT_1_0: infoData = _convertFontInfoDataVersion3ToVersion2(infoData) if validate: infoData = validateInfoVersion2Data(infoData) infoData = _convertFontInfoDataVersion2ToVersion1(infoData) # write file if there is anything to write if infoData: self._writePlist(FONTINFO_FILENAME, infoData) # kerning.plist def writeKerning(self, kerning, validate=None): """ Write kerning.plist. This method requires a dict of kerning pairs as an argument. This performs basic structural validation of the kerning, but it does not check for compliance with the spec in regards to conflicting pairs. The assumption is that the kerning data being passed is standards compliant. ``validate`` will validate the data, by default it is set to the class's validate value, can be overridden. """ if validate is None: validate = self._validate # validate the data structure if validate: invalidFormatMessage = "The kerning is not properly formatted." if not isDictEnough(kerning): raise UFOLibError(invalidFormatMessage) for pair, value in list(kerning.items()): if not isinstance(pair, (list, tuple)): raise UFOLibError(invalidFormatMessage) if not len(pair) == 2: raise UFOLibError(invalidFormatMessage) if not isinstance(pair[0], str): raise UFOLibError(invalidFormatMessage) if not isinstance(pair[1], str): raise UFOLibError(invalidFormatMessage) if not isinstance(value, numberTypes): raise UFOLibError(invalidFormatMessage) # down convert if ( self._formatVersion < UFOFormatVersion.FORMAT_3_0 and self._downConversionKerningData is not None ): remap = self._downConversionKerningData["groupRenameMap"] remappedKerning = {} for (side1, side2), value in list(kerning.items()): side1 = remap.get(side1, side1) side2 = remap.get(side2, side2) remappedKerning[side1, side2] = value kerning = remappedKerning # pack and write kerningDict = {} for left, right in kerning.keys(): value = kerning[left, right] if left not in kerningDict: kerningDict[left] = {} kerningDict[left][right] = value if kerningDict: self._writePlist(KERNING_FILENAME, kerningDict) elif self._havePreviousFile: self.removePath(KERNING_FILENAME, force=True, removeEmptyParents=False) # lib.plist def writeLib(self, libDict, validate=None): """ Write lib.plist. This method requires a lib dict as an argument. ``validate`` will validate the data, by default it is set to the class's validate value, can be overridden. """ if validate is None: validate = self._validate if validate: valid, message = fontLibValidator(libDict) if not valid: raise UFOLibError(message) if libDict: self._writePlist(LIB_FILENAME, libDict) elif self._havePreviousFile: self.removePath(LIB_FILENAME, force=True, removeEmptyParents=False) # features.fea def writeFeatures(self, features, validate=None): """ Write features.fea. This method requires a features string as an argument. """ if validate is None: validate = self._validate if self._formatVersion == UFOFormatVersion.FORMAT_1_0: raise UFOLibError("features.fea is not allowed in UFO Format Version 1.") if validate: if not isinstance(features, str): raise UFOLibError("The features are not text.") if features: self.writeBytesToPath(FEATURES_FILENAME, features.encode("utf8")) elif self._havePreviousFile: self.removePath(FEATURES_FILENAME, force=True, removeEmptyParents=False) # glyph sets & layers def writeLayerContents(self, layerOrder=None, validate=None): """ Write the layercontents.plist file. This method *must* be called after all glyph sets have been written. """ if validate is None: validate = self._validate if self._formatVersion < UFOFormatVersion.FORMAT_3_0: return if layerOrder is not None: newOrder = [] for layerName in layerOrder: if layerName is None: layerName = DEFAULT_LAYER_NAME newOrder.append(layerName) layerOrder = newOrder else: layerOrder = list(self.layerContents.keys()) if validate and set(layerOrder) != set(self.layerContents.keys()): raise UFOLibError( "The layer order content does not match the glyph sets that have been created." ) layerContents = [ (layerName, self.layerContents[layerName]) for layerName in layerOrder ] self._writePlist(LAYERCONTENTS_FILENAME, layerContents) def _findDirectoryForLayerName(self, layerName): foundDirectory = None for existingLayerName, directoryName in list(self.layerContents.items()): if layerName is None and directoryName == DEFAULT_GLYPHS_DIRNAME: foundDirectory = directoryName break elif existingLayerName == layerName: foundDirectory = directoryName break if not foundDirectory: raise UFOLibError( "Could not locate a glyph set directory for the layer named %s." % layerName ) return foundDirectory def getGlyphSet( self, layerName=None, defaultLayer=True, glyphNameToFileNameFunc=None, validateRead=None, validateWrite=None, expectContentsFile=False, ): """ Return the GlyphSet object associated with the appropriate glyph directory in the .ufo. If layerName is None, the default glyph set will be used. The defaultLayer flag indictes that the layer should be saved into the default glyphs directory. ``validateRead`` will validate the read data, by default it is set to the class's validate value, can be overridden. ``validateWrte`` will validate the written data, by default it is set to the class's validate value, can be overridden. ``expectContentsFile`` will raise a GlifLibError if a contents.plist file is not found on the glyph set file system. This should be set to ``True`` if you are reading an existing UFO and ``False`` if you use ``getGlyphSet`` to create a fresh glyph set. """ if validateRead is None: validateRead = self._validate if validateWrite is None: validateWrite = self._validate # only default can be written in < 3 if self._formatVersion < UFOFormatVersion.FORMAT_3_0 and ( not defaultLayer or layerName is not None ): raise UFOLibError( f"Only the default layer can be writen in UFO {self._formatVersion.major}." ) # locate a layer name when None has been given if layerName is None and defaultLayer: for existingLayerName, directory in self.layerContents.items(): if directory == DEFAULT_GLYPHS_DIRNAME: layerName = existingLayerName if layerName is None: layerName = DEFAULT_LAYER_NAME elif layerName is None and not defaultLayer: raise UFOLibError("A layer name must be provided for non-default layers.") # move along to format specific writing if self._formatVersion < UFOFormatVersion.FORMAT_3_0: return self._getDefaultGlyphSet( validateRead, validateWrite, glyphNameToFileNameFunc=glyphNameToFileNameFunc, expectContentsFile=expectContentsFile, ) elif self._formatVersion.major == UFOFormatVersion.FORMAT_3_0.major: return self._getGlyphSetFormatVersion3( validateRead, validateWrite, layerName=layerName, defaultLayer=defaultLayer, glyphNameToFileNameFunc=glyphNameToFileNameFunc, expectContentsFile=expectContentsFile, ) else: raise NotImplementedError(self._formatVersion) def _getDefaultGlyphSet( self, validateRead, validateWrite, glyphNameToFileNameFunc=None, expectContentsFile=False, ): from fontTools.ufoLib.glifLib import GlyphSet glyphSubFS = self.fs.makedir(DEFAULT_GLYPHS_DIRNAME, recreate=True) return GlyphSet( glyphSubFS, glyphNameToFileNameFunc=glyphNameToFileNameFunc, ufoFormatVersion=self._formatVersion, validateRead=validateRead, validateWrite=validateWrite, expectContentsFile=expectContentsFile, ) def _getGlyphSetFormatVersion3( self, validateRead, validateWrite, layerName=None, defaultLayer=True, glyphNameToFileNameFunc=None, expectContentsFile=False, ): from fontTools.ufoLib.glifLib import GlyphSet # if the default flag is on, make sure that the default in the file # matches the default being written. also make sure that this layer # name is not already linked to a non-default layer. if defaultLayer: for existingLayerName, directory in self.layerContents.items(): if directory == DEFAULT_GLYPHS_DIRNAME: if existingLayerName != layerName: raise UFOLibError( "Another layer ('%s') is already mapped to the default directory." % existingLayerName ) elif existingLayerName == layerName: raise UFOLibError( "The layer name is already mapped to a non-default layer." ) # get an existing directory name if layerName in self.layerContents: directory = self.layerContents[layerName] # get a new directory name else: if defaultLayer: directory = DEFAULT_GLYPHS_DIRNAME else: # not caching this could be slightly expensive, # but caching it will be cumbersome existing = {d.lower() for d in self.layerContents.values()} directory = userNameToFileName( layerName, existing=existing, prefix="glyphs." ) # make the directory glyphSubFS = self.fs.makedir(directory, recreate=True) # store the mapping self.layerContents[layerName] = directory # load the glyph set return GlyphSet( glyphSubFS, glyphNameToFileNameFunc=glyphNameToFileNameFunc, ufoFormatVersion=self._formatVersion, validateRead=validateRead, validateWrite=validateWrite, expectContentsFile=expectContentsFile, ) def renameGlyphSet(self, layerName, newLayerName, defaultLayer=False): """ Rename a glyph set. Note: if a GlyphSet object has already been retrieved for layerName, it is up to the caller to inform that object that the directory it represents has changed. """ if self._formatVersion < UFOFormatVersion.FORMAT_3_0: # ignore renaming glyph sets for UFO1 UFO2 # just write the data from the default layer return # the new and old names can be the same # as long as the default is being switched if layerName == newLayerName: # if the default is off and the layer is already not the default, skip if ( self.layerContents[layerName] != DEFAULT_GLYPHS_DIRNAME and not defaultLayer ): return # if the default is on and the layer is already the default, skip if self.layerContents[layerName] == DEFAULT_GLYPHS_DIRNAME and defaultLayer: return else: # make sure the new layer name doesn't already exist if newLayerName is None: newLayerName = DEFAULT_LAYER_NAME if newLayerName in self.layerContents: raise UFOLibError("A layer named %s already exists." % newLayerName) # make sure the default layer doesn't already exist if defaultLayer and DEFAULT_GLYPHS_DIRNAME in self.layerContents.values(): raise UFOLibError("A default layer already exists.") # get the paths oldDirectory = self._findDirectoryForLayerName(layerName) if defaultLayer: newDirectory = DEFAULT_GLYPHS_DIRNAME else: existing = {name.lower() for name in self.layerContents.values()} newDirectory = userNameToFileName( newLayerName, existing=existing, prefix="glyphs." ) # update the internal mapping del self.layerContents[layerName] self.layerContents[newLayerName] = newDirectory # do the file system copy self.fs.movedir(oldDirectory, newDirectory, create=True) def deleteGlyphSet(self, layerName): """ Remove the glyph set matching layerName. """ if self._formatVersion < UFOFormatVersion.FORMAT_3_0: # ignore deleting glyph sets for UFO1 UFO2 as there are no layers # just write the data from the default layer return foundDirectory = self._findDirectoryForLayerName(layerName) self.removePath(foundDirectory, removeEmptyParents=False) del self.layerContents[layerName] def writeData(self, fileName, data): """ Write data to fileName in the 'data' directory. The data must be a bytes string. """ self.writeBytesToPath(f"{DATA_DIRNAME}/{fsdecode(fileName)}", data) def removeData(self, fileName): """ Remove the file named fileName from the data directory. """ self.removePath(f"{DATA_DIRNAME}/{fsdecode(fileName)}") # /images def writeImage(self, fileName, data, validate=None): """ Write data to fileName in the images directory. The data must be a valid PNG. """ if validate is None: validate = self._validate if self._formatVersion < UFOFormatVersion.FORMAT_3_0: raise UFOLibError( f"Images are not allowed in UFO {self._formatVersion.major}." ) fileName = fsdecode(fileName) if validate: valid, error = pngValidator(data=data) if not valid: raise UFOLibError(error) self.writeBytesToPath(f"{IMAGES_DIRNAME}/{fileName}", data) def removeImage(self, fileName, validate=None): # XXX remove unused 'validate'? """ Remove the file named fileName from the images directory. """ if self._formatVersion < UFOFormatVersion.FORMAT_3_0: raise UFOLibError( f"Images are not allowed in UFO {self._formatVersion.major}." ) self.removePath(f"{IMAGES_DIRNAME}/{fsdecode(fileName)}") def copyImageFromReader(self, reader, sourceFileName, destFileName, validate=None): """ Copy the sourceFileName in the provided UFOReader to destFileName in this writer. This uses the most memory efficient method possible for copying the data possible. """ if validate is None: validate = self._validate if self._formatVersion < UFOFormatVersion.FORMAT_3_0: raise UFOLibError( f"Images are not allowed in UFO {self._formatVersion.major}." ) sourcePath = f"{IMAGES_DIRNAME}/{fsdecode(sourceFileName)}" destPath = f"{IMAGES_DIRNAME}/{fsdecode(destFileName)}" self.copyFromReader(reader, sourcePath, destPath) def close(self): if self._havePreviousFile and self._fileStructure is UFOFileStructure.ZIP: # if we are updating an existing zip file, we can now compress the # contents of the temporary filesystem in the destination path rootDir = os.path.splitext(os.path.basename(self._path))[0] + ".ufo" with fs.zipfs.ZipFS(self._path, write=True, encoding="utf-8") as destFS: fs.copy.copy_fs(self.fs, destFS.makedir(rootDir)) super().close() # just an alias, makes it more explicit UFOReaderWriter = UFOWriter # ---------------- # Helper Functions # ---------------- def _sniffFileStructure(ufo_path): """Return UFOFileStructure.ZIP if the UFO at path 'ufo_path' (str) is a zip file, else return UFOFileStructure.PACKAGE if 'ufo_path' is a directory. Raise UFOLibError if it is a file with unknown structure, or if the path does not exist. """ if zipfile.is_zipfile(ufo_path): return UFOFileStructure.ZIP elif os.path.isdir(ufo_path): return UFOFileStructure.PACKAGE elif os.path.isfile(ufo_path): raise UFOLibError( "The specified UFO does not have a known structure: '%s'" % ufo_path ) else: raise UFOLibError("No such file or directory: '%s'" % ufo_path) def makeUFOPath(path): """ Return a .ufo pathname. >>> makeUFOPath("directory/something.ext") == ( ... os.path.join('directory', 'something.ufo')) True >>> makeUFOPath("directory/something.another.thing.ext") == ( ... os.path.join('directory', 'something.another.thing.ufo')) True """ dir, name = os.path.split(path) name = ".".join([".".join(name.split(".")[:-1]), "ufo"]) return os.path.join(dir, name) # ---------------------- # fontinfo.plist Support # ---------------------- # Version Validators # There is no version 1 validator and there shouldn't be. # The version 1 spec was very loose and there were numerous # cases of invalid values. def validateFontInfoVersion2ValueForAttribute(attr, value): """ This performs very basic validation of the value for attribute following the UFO 2 fontinfo.plist specification. The results of this should not be interpretted as *correct* for the font that they are part of. This merely indicates that the value is of the proper type and, where the specification defines a set range of possible values for an attribute, that the value is in the accepted range. """ dataValidationDict = fontInfoAttributesVersion2ValueData[attr] valueType = dataValidationDict.get("type") validator = dataValidationDict.get("valueValidator") valueOptions = dataValidationDict.get("valueOptions") # have specific options for the validator if valueOptions is not None: isValidValue = validator(value, valueOptions) # no specific options else: if validator == genericTypeValidator: isValidValue = validator(value, valueType) else: isValidValue = validator(value) return isValidValue def validateInfoVersion2Data(infoData): """ This performs very basic validation of the value for infoData following the UFO 2 fontinfo.plist specification. The results of this should not be interpretted as *correct* for the font that they are part of. This merely indicates that the values are of the proper type and, where the specification defines a set range of possible values for an attribute, that the value is in the accepted range. """ validInfoData = {} for attr, value in list(infoData.items()): isValidValue = validateFontInfoVersion2ValueForAttribute(attr, value) if not isValidValue: raise UFOLibError(f"Invalid value for attribute {attr} ({value!r}).") else: validInfoData[attr] = value return validInfoData def validateFontInfoVersion3ValueForAttribute(attr, value): """ This performs very basic validation of the value for attribute following the UFO 3 fontinfo.plist specification. The results of this should not be interpretted as *correct* for the font that they are part of. This merely indicates that the value is of the proper type and, where the specification defines a set range of possible values for an attribute, that the value is in the accepted range. """ dataValidationDict = fontInfoAttributesVersion3ValueData[attr] valueType = dataValidationDict.get("type") validator = dataValidationDict.get("valueValidator") valueOptions = dataValidationDict.get("valueOptions") # have specific options for the validator if valueOptions is not None: isValidValue = validator(value, valueOptions) # no specific options else: if validator == genericTypeValidator: isValidValue = validator(value, valueType) else: isValidValue = validator(value) return isValidValue def validateInfoVersion3Data(infoData): """ This performs very basic validation of the value for infoData following the UFO 3 fontinfo.plist specification. The results of this should not be interpretted as *correct* for the font that they are part of. This merely indicates that the values are of the proper type and, where the specification defines a set range of possible values for an attribute, that the value is in the accepted range. """ validInfoData = {} for attr, value in list(infoData.items()): isValidValue = validateFontInfoVersion3ValueForAttribute(attr, value) if not isValidValue: raise UFOLibError(f"Invalid value for attribute {attr} ({value!r}).") else: validInfoData[attr] = value return validInfoData # Value Options fontInfoOpenTypeHeadFlagsOptions = list(range(0, 15)) fontInfoOpenTypeOS2SelectionOptions = [1, 2, 3, 4, 7, 8, 9] fontInfoOpenTypeOS2UnicodeRangesOptions = list(range(0, 128)) fontInfoOpenTypeOS2CodePageRangesOptions = list(range(0, 64)) fontInfoOpenTypeOS2TypeOptions = [0, 1, 2, 3, 8, 9] # Version Attribute Definitions # This defines the attributes, types and, in some # cases the possible values, that can exist is # fontinfo.plist. fontInfoAttributesVersion1 = { "familyName", "styleName", "fullName", "fontName", "menuName", "fontStyle", "note", "versionMajor", "versionMinor", "year", "copyright", "notice", "trademark", "license", "licenseURL", "createdBy", "designer", "designerURL", "vendorURL", "unitsPerEm", "ascender", "descender", "capHeight", "xHeight", "defaultWidth", "slantAngle", "italicAngle", "widthName", "weightName", "weightValue", "fondName", "otFamilyName", "otStyleName", "otMacName", "msCharSet", "fondID", "uniqueID", "ttVendor", "ttUniqueID", "ttVersion", } fontInfoAttributesVersion2ValueData = { "familyName": dict(type=str), "styleName": dict(type=str), "styleMapFamilyName": dict(type=str), "styleMapStyleName": dict( type=str, valueValidator=fontInfoStyleMapStyleNameValidator ), "versionMajor": dict(type=int), "versionMinor": dict(type=int), "year": dict(type=int), "copyright": dict(type=str), "trademark": dict(type=str), "unitsPerEm": dict(type=(int, float)), "descender": dict(type=(int, float)), "xHeight": dict(type=(int, float)), "capHeight": dict(type=(int, float)), "ascender": dict(type=(int, float)), "italicAngle": dict(type=(float, int)), "note": dict(type=str), "openTypeHeadCreated": dict( type=str, valueValidator=fontInfoOpenTypeHeadCreatedValidator ), "openTypeHeadLowestRecPPEM": dict(type=(int, float)), "openTypeHeadFlags": dict( type="integerList", valueValidator=genericIntListValidator, valueOptions=fontInfoOpenTypeHeadFlagsOptions, ), "openTypeHheaAscender": dict(type=(int, float)), "openTypeHheaDescender": dict(type=(int, float)), "openTypeHheaLineGap": dict(type=(int, float)), "openTypeHheaCaretSlopeRise": dict(type=int), "openTypeHheaCaretSlopeRun": dict(type=int), "openTypeHheaCaretOffset": dict(type=(int, float)), "openTypeNameDesigner": dict(type=str), "openTypeNameDesignerURL": dict(type=str), "openTypeNameManufacturer": dict(type=str), "openTypeNameManufacturerURL": dict(type=str), "openTypeNameLicense": dict(type=str), "openTypeNameLicenseURL": dict(type=str), "openTypeNameVersion": dict(type=str), "openTypeNameUniqueID": dict(type=str), "openTypeNameDescription": dict(type=str), "openTypeNamePreferredFamilyName": dict(type=str), "openTypeNamePreferredSubfamilyName": dict(type=str), "openTypeNameCompatibleFullName": dict(type=str), "openTypeNameSampleText": dict(type=str), "openTypeNameWWSFamilyName": dict(type=str), "openTypeNameWWSSubfamilyName": dict(type=str), "openTypeOS2WidthClass": dict( type=int, valueValidator=fontInfoOpenTypeOS2WidthClassValidator ), "openTypeOS2WeightClass": dict( type=int, valueValidator=fontInfoOpenTypeOS2WeightClassValidator ), "openTypeOS2Selection": dict( type="integerList", valueValidator=genericIntListValidator, valueOptions=fontInfoOpenTypeOS2SelectionOptions, ), "openTypeOS2VendorID": dict(type=str), "openTypeOS2Panose": dict( type="integerList", valueValidator=fontInfoVersion2OpenTypeOS2PanoseValidator ), "openTypeOS2FamilyClass": dict( type="integerList", valueValidator=fontInfoOpenTypeOS2FamilyClassValidator ), "openTypeOS2UnicodeRanges": dict( type="integerList", valueValidator=genericIntListValidator, valueOptions=fontInfoOpenTypeOS2UnicodeRangesOptions, ), "openTypeOS2CodePageRanges": dict( type="integerList", valueValidator=genericIntListValidator, valueOptions=fontInfoOpenTypeOS2CodePageRangesOptions, ), "openTypeOS2TypoAscender": dict(type=(int, float)), "openTypeOS2TypoDescender": dict(type=(int, float)), "openTypeOS2TypoLineGap": dict(type=(int, float)), "openTypeOS2WinAscent": dict(type=(int, float)), "openTypeOS2WinDescent": dict(type=(int, float)), "openTypeOS2Type": dict( type="integerList", valueValidator=genericIntListValidator, valueOptions=fontInfoOpenTypeOS2TypeOptions, ), "openTypeOS2SubscriptXSize": dict(type=(int, float)), "openTypeOS2SubscriptYSize": dict(type=(int, float)), "openTypeOS2SubscriptXOffset": dict(type=(int, float)), "openTypeOS2SubscriptYOffset": dict(type=(int, float)), "openTypeOS2SuperscriptXSize": dict(type=(int, float)), "openTypeOS2SuperscriptYSize": dict(type=(int, float)), "openTypeOS2SuperscriptXOffset": dict(type=(int, float)), "openTypeOS2SuperscriptYOffset": dict(type=(int, float)), "openTypeOS2StrikeoutSize": dict(type=(int, float)), "openTypeOS2StrikeoutPosition": dict(type=(int, float)), "openTypeVheaVertTypoAscender": dict(type=(int, float)), "openTypeVheaVertTypoDescender": dict(type=(int, float)), "openTypeVheaVertTypoLineGap": dict(type=(int, float)), "openTypeVheaCaretSlopeRise": dict(type=int), "openTypeVheaCaretSlopeRun": dict(type=int), "openTypeVheaCaretOffset": dict(type=(int, float)), "postscriptFontName": dict(type=str), "postscriptFullName": dict(type=str), "postscriptSlantAngle": dict(type=(float, int)), "postscriptUniqueID": dict(type=int), "postscriptUnderlineThickness": dict(type=(int, float)), "postscriptUnderlinePosition": dict(type=(int, float)), "postscriptIsFixedPitch": dict(type=bool), "postscriptBlueValues": dict( type="integerList", valueValidator=fontInfoPostscriptBluesValidator ), "postscriptOtherBlues": dict( type="integerList", valueValidator=fontInfoPostscriptOtherBluesValidator ), "postscriptFamilyBlues": dict( type="integerList", valueValidator=fontInfoPostscriptBluesValidator ), "postscriptFamilyOtherBlues": dict( type="integerList", valueValidator=fontInfoPostscriptOtherBluesValidator ), "postscriptStemSnapH": dict( type="integerList", valueValidator=fontInfoPostscriptStemsValidator ), "postscriptStemSnapV": dict( type="integerList", valueValidator=fontInfoPostscriptStemsValidator ), "postscriptBlueFuzz": dict(type=(int, float)), "postscriptBlueShift": dict(type=(int, float)), "postscriptBlueScale": dict(type=(float, int)), "postscriptForceBold": dict(type=bool), "postscriptDefaultWidthX": dict(type=(int, float)), "postscriptNominalWidthX": dict(type=(int, float)), "postscriptWeightName": dict(type=str), "postscriptDefaultCharacter": dict(type=str), "postscriptWindowsCharacterSet": dict( type=int, valueValidator=fontInfoPostscriptWindowsCharacterSetValidator ), "macintoshFONDFamilyID": dict(type=int), "macintoshFONDName": dict(type=str), } fontInfoAttributesVersion2 = set(fontInfoAttributesVersion2ValueData.keys()) fontInfoAttributesVersion3ValueData = deepcopy(fontInfoAttributesVersion2ValueData) fontInfoAttributesVersion3ValueData.update( { "versionMinor": dict(type=int, valueValidator=genericNonNegativeIntValidator), "unitsPerEm": dict( type=(int, float), valueValidator=genericNonNegativeNumberValidator ), "openTypeHeadLowestRecPPEM": dict( type=int, valueValidator=genericNonNegativeNumberValidator ), "openTypeHheaAscender": dict(type=int), "openTypeHheaDescender": dict(type=int), "openTypeHheaLineGap": dict(type=int), "openTypeHheaCaretOffset": dict(type=int), "openTypeOS2Panose": dict( type="integerList", valueValidator=fontInfoVersion3OpenTypeOS2PanoseValidator, ), "openTypeOS2TypoAscender": dict(type=int), "openTypeOS2TypoDescender": dict(type=int), "openTypeOS2TypoLineGap": dict(type=int), "openTypeOS2WinAscent": dict( type=int, valueValidator=genericNonNegativeNumberValidator ), "openTypeOS2WinDescent": dict( type=int, valueValidator=genericNonNegativeNumberValidator ), "openTypeOS2SubscriptXSize": dict(type=int), "openTypeOS2SubscriptYSize": dict(type=int), "openTypeOS2SubscriptXOffset": dict(type=int), "openTypeOS2SubscriptYOffset": dict(type=int), "openTypeOS2SuperscriptXSize": dict(type=int), "openTypeOS2SuperscriptYSize": dict(type=int), "openTypeOS2SuperscriptXOffset": dict(type=int), "openTypeOS2SuperscriptYOffset": dict(type=int), "openTypeOS2StrikeoutSize": dict(type=int), "openTypeOS2StrikeoutPosition": dict(type=int), "openTypeGaspRangeRecords": dict( type="dictList", valueValidator=fontInfoOpenTypeGaspRangeRecordsValidator ), "openTypeNameRecords": dict( type="dictList", valueValidator=fontInfoOpenTypeNameRecordsValidator ), "openTypeVheaVertTypoAscender": dict(type=int), "openTypeVheaVertTypoDescender": dict(type=int), "openTypeVheaVertTypoLineGap": dict(type=int), "openTypeVheaCaretOffset": dict(type=int), "woffMajorVersion": dict( type=int, valueValidator=genericNonNegativeIntValidator ), "woffMinorVersion": dict( type=int, valueValidator=genericNonNegativeIntValidator ), "woffMetadataUniqueID": dict( type=dict, valueValidator=fontInfoWOFFMetadataUniqueIDValidator ), "woffMetadataVendor": dict( type=dict, valueValidator=fontInfoWOFFMetadataVendorValidator ), "woffMetadataCredits": dict( type=dict, valueValidator=fontInfoWOFFMetadataCreditsValidator ), "woffMetadataDescription": dict( type=dict, valueValidator=fontInfoWOFFMetadataDescriptionValidator ), "woffMetadataLicense": dict( type=dict, valueValidator=fontInfoWOFFMetadataLicenseValidator ), "woffMetadataCopyright": dict( type=dict, valueValidator=fontInfoWOFFMetadataCopyrightValidator ), "woffMetadataTrademark": dict( type=dict, valueValidator=fontInfoWOFFMetadataTrademarkValidator ), "woffMetadataLicensee": dict( type=dict, valueValidator=fontInfoWOFFMetadataLicenseeValidator ), "woffMetadataExtensions": dict( type=list, valueValidator=fontInfoWOFFMetadataExtensionsValidator ), "guidelines": dict(type=list, valueValidator=guidelinesValidator), } ) fontInfoAttributesVersion3 = set(fontInfoAttributesVersion3ValueData.keys()) # insert the type validator for all attrs that # have no defined validator. for attr, dataDict in list(fontInfoAttributesVersion2ValueData.items()): if "valueValidator" not in dataDict: dataDict["valueValidator"] = genericTypeValidator for attr, dataDict in list(fontInfoAttributesVersion3ValueData.items()): if "valueValidator" not in dataDict: dataDict["valueValidator"] = genericTypeValidator # Version Conversion Support # These are used from converting from version 1 # to version 2 or vice-versa. def _flipDict(d): flipped = {} for key, value in list(d.items()): flipped[value] = key return flipped fontInfoAttributesVersion1To2 = { "menuName": "styleMapFamilyName", "designer": "openTypeNameDesigner", "designerURL": "openTypeNameDesignerURL", "createdBy": "openTypeNameManufacturer", "vendorURL": "openTypeNameManufacturerURL", "license": "openTypeNameLicense", "licenseURL": "openTypeNameLicenseURL", "ttVersion": "openTypeNameVersion", "ttUniqueID": "openTypeNameUniqueID", "notice": "openTypeNameDescription", "otFamilyName": "openTypeNamePreferredFamilyName", "otStyleName": "openTypeNamePreferredSubfamilyName", "otMacName": "openTypeNameCompatibleFullName", "weightName": "postscriptWeightName", "weightValue": "openTypeOS2WeightClass", "ttVendor": "openTypeOS2VendorID", "uniqueID": "postscriptUniqueID", "fontName": "postscriptFontName", "fondID": "macintoshFONDFamilyID", "fondName": "macintoshFONDName", "defaultWidth": "postscriptDefaultWidthX", "slantAngle": "postscriptSlantAngle", "fullName": "postscriptFullName", # require special value conversion "fontStyle": "styleMapStyleName", "widthName": "openTypeOS2WidthClass", "msCharSet": "postscriptWindowsCharacterSet", } fontInfoAttributesVersion2To1 = _flipDict(fontInfoAttributesVersion1To2) deprecatedFontInfoAttributesVersion2 = set(fontInfoAttributesVersion1To2.keys()) _fontStyle1To2 = {64: "regular", 1: "italic", 32: "bold", 33: "bold italic"} _fontStyle2To1 = _flipDict(_fontStyle1To2) # Some UFO 1 files have 0 _fontStyle1To2[0] = "regular" _widthName1To2 = { "Ultra-condensed": 1, "Extra-condensed": 2, "Condensed": 3, "Semi-condensed": 4, "Medium (normal)": 5, "Semi-expanded": 6, "Expanded": 7, "Extra-expanded": 8, "Ultra-expanded": 9, } _widthName2To1 = _flipDict(_widthName1To2) # FontLab's default width value is "Normal". # Many format version 1 UFOs will have this. _widthName1To2["Normal"] = 5 # FontLab has an "All" width value. In UFO 1 # move this up to "Normal". _widthName1To2["All"] = 5 # "medium" appears in a lot of UFO 1 files. _widthName1To2["medium"] = 5 # "Medium" appears in a lot of UFO 1 files. _widthName1To2["Medium"] = 5 _msCharSet1To2 = { 0: 1, 1: 2, 2: 3, 77: 4, 128: 5, 129: 6, 130: 7, 134: 8, 136: 9, 161: 10, 162: 11, 163: 12, 177: 13, 178: 14, 186: 15, 200: 16, 204: 17, 222: 18, 238: 19, 255: 20, } _msCharSet2To1 = _flipDict(_msCharSet1To2) # 1 <-> 2 def convertFontInfoValueForAttributeFromVersion1ToVersion2(attr, value): """ Convert value from version 1 to version 2 format. Returns the new attribute name and the converted value. If the value is None, None will be returned for the new value. """ # convert floats to ints if possible if isinstance(value, float): if int(value) == value: value = int(value) if value is not None: if attr == "fontStyle": v = _fontStyle1To2.get(value) if v is None: raise UFOLibError( f"Cannot convert value ({value!r}) for attribute {attr}." ) value = v elif attr == "widthName": v = _widthName1To2.get(value) if v is None: raise UFOLibError( f"Cannot convert value ({value!r}) for attribute {attr}." ) value = v elif attr == "msCharSet": v = _msCharSet1To2.get(value) if v is None: raise UFOLibError( f"Cannot convert value ({value!r}) for attribute {attr}." ) value = v attr = fontInfoAttributesVersion1To2.get(attr, attr) return attr, value def convertFontInfoValueForAttributeFromVersion2ToVersion1(attr, value): """ Convert value from version 2 to version 1 format. Returns the new attribute name and the converted value. If the value is None, None will be returned for the new value. """ if value is not None: if attr == "styleMapStyleName": value = _fontStyle2To1.get(value) elif attr == "openTypeOS2WidthClass": value = _widthName2To1.get(value) elif attr == "postscriptWindowsCharacterSet": value = _msCharSet2To1.get(value) attr = fontInfoAttributesVersion2To1.get(attr, attr) return attr, value def _convertFontInfoDataVersion1ToVersion2(data): converted = {} for attr, value in list(data.items()): # FontLab gives -1 for the weightValue # for fonts wil no defined value. Many # format version 1 UFOs will have this. if attr == "weightValue" and value == -1: continue newAttr, newValue = convertFontInfoValueForAttributeFromVersion1ToVersion2( attr, value ) # skip if the attribute is not part of version 2 if newAttr not in fontInfoAttributesVersion2: continue # catch values that can't be converted if value is None: raise UFOLibError( f"Cannot convert value ({value!r}) for attribute {newAttr}." ) # store converted[newAttr] = newValue return converted def _convertFontInfoDataVersion2ToVersion1(data): converted = {} for attr, value in list(data.items()): newAttr, newValue = convertFontInfoValueForAttributeFromVersion2ToVersion1( attr, value ) # only take attributes that are registered for version 1 if newAttr not in fontInfoAttributesVersion1: continue # catch values that can't be converted if value is None: raise UFOLibError( f"Cannot convert value ({value!r}) for attribute {newAttr}." ) # store converted[newAttr] = newValue return converted # 2 <-> 3 _ufo2To3NonNegativeInt = { "versionMinor", "openTypeHeadLowestRecPPEM", "openTypeOS2WinAscent", "openTypeOS2WinDescent", } _ufo2To3NonNegativeIntOrFloat = { "unitsPerEm", } _ufo2To3FloatToInt = { "openTypeHeadLowestRecPPEM", "openTypeHheaAscender", "openTypeHheaDescender", "openTypeHheaLineGap", "openTypeHheaCaretOffset", "openTypeOS2TypoAscender", "openTypeOS2TypoDescender", "openTypeOS2TypoLineGap", "openTypeOS2WinAscent", "openTypeOS2WinDescent", "openTypeOS2SubscriptXSize", "openTypeOS2SubscriptYSize", "openTypeOS2SubscriptXOffset", "openTypeOS2SubscriptYOffset", "openTypeOS2SuperscriptXSize", "openTypeOS2SuperscriptYSize", "openTypeOS2SuperscriptXOffset", "openTypeOS2SuperscriptYOffset", "openTypeOS2StrikeoutSize", "openTypeOS2StrikeoutPosition", "openTypeVheaVertTypoAscender", "openTypeVheaVertTypoDescender", "openTypeVheaVertTypoLineGap", "openTypeVheaCaretOffset", } def convertFontInfoValueForAttributeFromVersion2ToVersion3(attr, value): """ Convert value from version 2 to version 3 format. Returns the new attribute name and the converted value. If the value is None, None will be returned for the new value. """ if attr in _ufo2To3FloatToInt: try: value = round(value) except (ValueError, TypeError): raise UFOLibError("Could not convert value for %s." % attr) if attr in _ufo2To3NonNegativeInt: try: value = int(abs(value)) except (ValueError, TypeError): raise UFOLibError("Could not convert value for %s." % attr) elif attr in _ufo2To3NonNegativeIntOrFloat: try: v = float(abs(value)) except (ValueError, TypeError): raise UFOLibError("Could not convert value for %s." % attr) if v == int(v): v = int(v) if v != value: value = v return attr, value def convertFontInfoValueForAttributeFromVersion3ToVersion2(attr, value): """ Convert value from version 3 to version 2 format. Returns the new attribute name and the converted value. If the value is None, None will be returned for the new value. """ return attr, value def _convertFontInfoDataVersion3ToVersion2(data): converted = {} for attr, value in list(data.items()): newAttr, newValue = convertFontInfoValueForAttributeFromVersion3ToVersion2( attr, value ) if newAttr not in fontInfoAttributesVersion2: continue converted[newAttr] = newValue return converted def _convertFontInfoDataVersion2ToVersion3(data): converted = {} for attr, value in list(data.items()): attr, value = convertFontInfoValueForAttributeFromVersion2ToVersion3( attr, value ) converted[attr] = value return converted if __name__ == "__main__": import doctest doctest.testmod() PKaZZZ��h>)>)fontTools/ufoLib/converters.py""" Conversion functions. """ # adapted from the UFO spec def convertUFO1OrUFO2KerningToUFO3Kerning(kerning, groups, glyphSet=()): # gather known kerning groups based on the prefixes firstReferencedGroups, secondReferencedGroups = findKnownKerningGroups(groups) # Make lists of groups referenced in kerning pairs. for first, seconds in list(kerning.items()): if first in groups and first not in glyphSet: if not first.startswith("public.kern1."): firstReferencedGroups.add(first) for second in list(seconds.keys()): if second in groups and second not in glyphSet: if not second.startswith("public.kern2."): secondReferencedGroups.add(second) # Create new names for these groups. firstRenamedGroups = {} for first in firstReferencedGroups: # Make a list of existing group names. existingGroupNames = list(groups.keys()) + list(firstRenamedGroups.keys()) # Remove the old prefix from the name newName = first.replace("@MMK_L_", "") # Add the new prefix to the name. newName = "public.kern1." + newName # Make a unique group name. newName = makeUniqueGroupName(newName, existingGroupNames) # Store for use later. firstRenamedGroups[first] = newName secondRenamedGroups = {} for second in secondReferencedGroups: # Make a list of existing group names. existingGroupNames = list(groups.keys()) + list(secondRenamedGroups.keys()) # Remove the old prefix from the name newName = second.replace("@MMK_R_", "") # Add the new prefix to the name. newName = "public.kern2." + newName # Make a unique group name. newName = makeUniqueGroupName(newName, existingGroupNames) # Store for use later. secondRenamedGroups[second] = newName # Populate the new group names into the kerning dictionary as needed. newKerning = {} for first, seconds in list(kerning.items()): first = firstRenamedGroups.get(first, first) newSeconds = {} for second, value in list(seconds.items()): second = secondRenamedGroups.get(second, second) newSeconds[second] = value newKerning[first] = newSeconds # Make copies of the referenced groups and store them # under the new names in the overall groups dictionary. allRenamedGroups = list(firstRenamedGroups.items()) allRenamedGroups += list(secondRenamedGroups.items()) for oldName, newName in allRenamedGroups: group = list(groups[oldName]) groups[newName] = group # Return the kerning and the groups. return newKerning, groups, dict(side1=firstRenamedGroups, side2=secondRenamedGroups) def findKnownKerningGroups(groups): """ This will find kerning groups with known prefixes. In some cases not all kerning groups will be referenced by the kerning pairs. The algorithm for locating groups in convertUFO1OrUFO2KerningToUFO3Kerning will miss these unreferenced groups. By scanning for known prefixes this function will catch all of the prefixed groups. These are the prefixes and sides that are handled: @MMK_L_ - side 1 @MMK_R_ - side 2 >>> testGroups = { ... "@MMK_L_1" : None, ... "@MMK_L_2" : None, ... "@MMK_L_3" : None, ... "@MMK_R_1" : None, ... "@MMK_R_2" : None, ... "@MMK_R_3" : None, ... "@MMK_l_1" : None, ... "@MMK_r_1" : None, ... "@MMK_X_1" : None, ... "foo" : None, ... } >>> first, second = findKnownKerningGroups(testGroups) >>> sorted(first) == ['@MMK_L_1', '@MMK_L_2', '@MMK_L_3'] True >>> sorted(second) == ['@MMK_R_1', '@MMK_R_2', '@MMK_R_3'] True """ knownFirstGroupPrefixes = ["@MMK_L_"] knownSecondGroupPrefixes = ["@MMK_R_"] firstGroups = set() secondGroups = set() for groupName in list(groups.keys()): for firstPrefix in knownFirstGroupPrefixes: if groupName.startswith(firstPrefix): firstGroups.add(groupName) break for secondPrefix in knownSecondGroupPrefixes: if groupName.startswith(secondPrefix): secondGroups.add(groupName) break return firstGroups, secondGroups def makeUniqueGroupName(name, groupNames, counter=0): # Add a number to the name if the counter is higher than zero. newName = name if counter > 0: newName = "%s%d" % (newName, counter) # If the new name is in the existing group names, recurse. if newName in groupNames: return makeUniqueGroupName(name, groupNames, counter + 1) # Otherwise send back the new name. return newName def test(): """ No known prefixes. >>> testKerning = { ... "A" : { ... "A" : 1, ... "B" : 2, ... "CGroup" : 3, ... "DGroup" : 4 ... }, ... "BGroup" : { ... "A" : 5, ... "B" : 6, ... "CGroup" : 7, ... "DGroup" : 8 ... }, ... "CGroup" : { ... "A" : 9, ... "B" : 10, ... "CGroup" : 11, ... "DGroup" : 12 ... }, ... } >>> testGroups = { ... "BGroup" : ["B"], ... "CGroup" : ["C"], ... "DGroup" : ["D"], ... } >>> kerning, groups, maps = convertUFO1OrUFO2KerningToUFO3Kerning( ... testKerning, testGroups, []) >>> expected = { ... "A" : { ... "A": 1, ... "B": 2, ... "public.kern2.CGroup": 3, ... "public.kern2.DGroup": 4 ... }, ... "public.kern1.BGroup": { ... "A": 5, ... "B": 6, ... "public.kern2.CGroup": 7, ... "public.kern2.DGroup": 8 ... }, ... "public.kern1.CGroup": { ... "A": 9, ... "B": 10, ... "public.kern2.CGroup": 11, ... "public.kern2.DGroup": 12 ... } ... } >>> kerning == expected True >>> expected = { ... "BGroup": ["B"], ... "CGroup": ["C"], ... "DGroup": ["D"], ... "public.kern1.BGroup": ["B"], ... "public.kern1.CGroup": ["C"], ... "public.kern2.CGroup": ["C"], ... "public.kern2.DGroup": ["D"], ... } >>> groups == expected True Known prefixes. >>> testKerning = { ... "A" : { ... "A" : 1, ... "B" : 2, ... "@MMK_R_CGroup" : 3, ... "@MMK_R_DGroup" : 4 ... }, ... "@MMK_L_BGroup" : { ... "A" : 5, ... "B" : 6, ... "@MMK_R_CGroup" : 7, ... "@MMK_R_DGroup" : 8 ... }, ... "@MMK_L_CGroup" : { ... "A" : 9, ... "B" : 10, ... "@MMK_R_CGroup" : 11, ... "@MMK_R_DGroup" : 12 ... }, ... } >>> testGroups = { ... "@MMK_L_BGroup" : ["B"], ... "@MMK_L_CGroup" : ["C"], ... "@MMK_L_XGroup" : ["X"], ... "@MMK_R_CGroup" : ["C"], ... "@MMK_R_DGroup" : ["D"], ... "@MMK_R_XGroup" : ["X"], ... } >>> kerning, groups, maps = convertUFO1OrUFO2KerningToUFO3Kerning( ... testKerning, testGroups, []) >>> expected = { ... "A" : { ... "A": 1, ... "B": 2, ... "public.kern2.CGroup": 3, ... "public.kern2.DGroup": 4 ... }, ... "public.kern1.BGroup": { ... "A": 5, ... "B": 6, ... "public.kern2.CGroup": 7, ... "public.kern2.DGroup": 8 ... }, ... "public.kern1.CGroup": { ... "A": 9, ... "B": 10, ... "public.kern2.CGroup": 11, ... "public.kern2.DGroup": 12 ... } ... } >>> kerning == expected True >>> expected = { ... "@MMK_L_BGroup": ["B"], ... "@MMK_L_CGroup": ["C"], ... "@MMK_L_XGroup": ["X"], ... "@MMK_R_CGroup": ["C"], ... "@MMK_R_DGroup": ["D"], ... "@MMK_R_XGroup": ["X"], ... "public.kern1.BGroup": ["B"], ... "public.kern1.CGroup": ["C"], ... "public.kern1.XGroup": ["X"], ... "public.kern2.CGroup": ["C"], ... "public.kern2.DGroup": ["D"], ... "public.kern2.XGroup": ["X"], ... } >>> groups == expected True >>> from .validators import kerningValidator >>> kerningValidator(kerning) (True, None) Mixture of known prefixes and groups without prefixes. >>> testKerning = { ... "A" : { ... "A" : 1, ... "B" : 2, ... "@MMK_R_CGroup" : 3, ... "DGroup" : 4 ... }, ... "BGroup" : { ... "A" : 5, ... "B" : 6, ... "@MMK_R_CGroup" : 7, ... "DGroup" : 8 ... }, ... "@MMK_L_CGroup" : { ... "A" : 9, ... "B" : 10, ... "@MMK_R_CGroup" : 11, ... "DGroup" : 12 ... }, ... } >>> testGroups = { ... "BGroup" : ["B"], ... "@MMK_L_CGroup" : ["C"], ... "@MMK_R_CGroup" : ["C"], ... "DGroup" : ["D"], ... } >>> kerning, groups, maps = convertUFO1OrUFO2KerningToUFO3Kerning( ... testKerning, testGroups, []) >>> expected = { ... "A" : { ... "A": 1, ... "B": 2, ... "public.kern2.CGroup": 3, ... "public.kern2.DGroup": 4 ... }, ... "public.kern1.BGroup": { ... "A": 5, ... "B": 6, ... "public.kern2.CGroup": 7, ... "public.kern2.DGroup": 8 ... }, ... "public.kern1.CGroup": { ... "A": 9, ... "B": 10, ... "public.kern2.CGroup": 11, ... "public.kern2.DGroup": 12 ... } ... } >>> kerning == expected True >>> expected = { ... "BGroup": ["B"], ... "@MMK_L_CGroup": ["C"], ... "@MMK_R_CGroup": ["C"], ... "DGroup": ["D"], ... "public.kern1.BGroup": ["B"], ... "public.kern1.CGroup": ["C"], ... "public.kern2.CGroup": ["C"], ... "public.kern2.DGroup": ["D"], ... } >>> groups == expected True """ if __name__ == "__main__": import doctest doctest.testmod() PKaZZZ����HHfontTools/ufoLib/errors.pyfrom __future__ import annotations class UFOLibError(Exception): pass class UnsupportedUFOFormat(UFOLibError): pass class GlifLibError(UFOLibError): def _add_note(self, note: str) -> None: # Loose backport of PEP 678 until we only support Python 3.11+, used for # adding additional context to errors. # TODO: Replace with https://docs.python.org/3.11/library/exceptions.html#BaseException.add_note (message, *rest) = self.args self.args = ((message + "\n" + note), *rest) class UnsupportedGLIFFormat(GlifLibError): pass PKaZZZ�ܤ���fontTools/ufoLib/etree.py"""DEPRECATED - This module is kept here only as a backward compatibility shim for the old ufoLib.etree module, which was moved to fontTools.misc.etree. Please use the latter instead. """ from fontTools.misc.etree import * PKaZZZ���A��fontTools/ufoLib/filenames.py""" User name to file name conversion. This was taken from the UFO 3 spec. """ # Restrictions are taken mostly from # https://docs.microsoft.com/en-gb/windows/win32/fileio/naming-a-file#naming-conventions. # # 1. Integer value zero, sometimes referred to as the ASCII NUL character. # 2. Characters whose integer representations are in the range 1 to 31, # inclusive. # 3. Various characters that (mostly) Windows and POSIX-y filesystems don't # allow, plus "(" and ")", as per the specification. illegalCharacters = { "\x00", "\x01", "\x02", "\x03", "\x04", "\x05", "\x06", "\x07", "\x08", "\t", "\n", "\x0b", "\x0c", "\r", "\x0e", "\x0f", "\x10", "\x11", "\x12", "\x13", "\x14", "\x15", "\x16", "\x17", "\x18", "\x19", "\x1a", "\x1b", "\x1c", "\x1d", "\x1e", "\x1f", '"', "*", "+", "/", ":", "<", ">", "?", "[", "\\", "]", "(", ")", "|", "\x7f", } reservedFileNames = { "aux", "clock$", "com1", "com2", "com3", "com4", "com5", "com6", "com7", "com8", "com9", "con", "lpt1", "lpt2", "lpt3", "lpt4", "lpt5", "lpt6", "lpt7", "lpt8", "lpt9", "nul", "prn", } maxFileNameLength = 255 class NameTranslationError(Exception): pass def userNameToFileName(userName: str, existing=(), prefix="", suffix=""): """ `existing` should be a set-like object. >>> userNameToFileName("a") == "a" True >>> userNameToFileName("A") == "A_" True >>> userNameToFileName("AE") == "A_E_" True >>> userNameToFileName("Ae") == "A_e" True >>> userNameToFileName("ae") == "ae" True >>> userNameToFileName("aE") == "aE_" True >>> userNameToFileName("a.alt") == "a.alt" True >>> userNameToFileName("A.alt") == "A_.alt" True >>> userNameToFileName("A.Alt") == "A_.A_lt" True >>> userNameToFileName("A.aLt") == "A_.aL_t" True >>> userNameToFileName(u"A.alT") == "A_.alT_" True >>> userNameToFileName("T_H") == "T__H_" True >>> userNameToFileName("T_h") == "T__h" True >>> userNameToFileName("t_h") == "t_h" True >>> userNameToFileName("F_F_I") == "F__F__I_" True >>> userNameToFileName("f_f_i") == "f_f_i" True >>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash" True >>> userNameToFileName(".notdef") == "_notdef" True >>> userNameToFileName("con") == "_con" True >>> userNameToFileName("CON") == "C_O_N_" True >>> userNameToFileName("con.alt") == "_con.alt" True >>> userNameToFileName("alt.con") == "alt._con" True """ # the incoming name must be a string if not isinstance(userName, str): raise ValueError("The value for userName must be a string.") # establish the prefix and suffix lengths prefixLength = len(prefix) suffixLength = len(suffix) # replace an initial period with an _ # if no prefix is to be added if not prefix and userName[0] == ".": userName = "_" + userName[1:] # filter the user name filteredUserName = [] for character in userName: # replace illegal characters with _ if character in illegalCharacters: character = "_" # add _ to all non-lower characters elif character != character.lower(): character += "_" filteredUserName.append(character) userName = "".join(filteredUserName) # clip to 255 sliceLength = maxFileNameLength - prefixLength - suffixLength userName = userName[:sliceLength] # test for illegal files names parts = [] for part in userName.split("."): if part.lower() in reservedFileNames: part = "_" + part parts.append(part) userName = ".".join(parts) # test for clash fullName = prefix + userName + suffix if fullName.lower() in existing: fullName = handleClash1(userName, existing, prefix, suffix) # finished return fullName def handleClash1(userName, existing=[], prefix="", suffix=""): """ existing should be a case-insensitive list of all existing file names. >>> prefix = ("0" * 5) + "." >>> suffix = "." + ("0" * 10) >>> existing = ["a" * 5] >>> e = list(existing) >>> handleClash1(userName="A" * 5, existing=e, ... prefix=prefix, suffix=suffix) == ( ... '00000.AAAAA000000000000001.0000000000') True >>> e = list(existing) >>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix) >>> handleClash1(userName="A" * 5, existing=e, ... prefix=prefix, suffix=suffix) == ( ... '00000.AAAAA000000000000002.0000000000') True >>> e = list(existing) >>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix) >>> handleClash1(userName="A" * 5, existing=e, ... prefix=prefix, suffix=suffix) == ( ... '00000.AAAAA000000000000001.0000000000') True """ # if the prefix length + user name length + suffix length + 15 is at # or past the maximum length, silce 15 characters off of the user name prefixLength = len(prefix) suffixLength = len(suffix) if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength: l = prefixLength + len(userName) + suffixLength + 15 sliceLength = maxFileNameLength - l userName = userName[:sliceLength] finalName = None # try to add numbers to create a unique name counter = 1 while finalName is None: name = userName + str(counter).zfill(15) fullName = prefix + name + suffix if fullName.lower() not in existing: finalName = fullName break else: counter += 1 if counter >= 999999999999999: break # if there is a clash, go to the next fallback if finalName is None: finalName = handleClash2(existing, prefix, suffix) # finished return finalName def handleClash2(existing=[], prefix="", suffix=""): """ existing should be a case-insensitive list of all existing file names. >>> prefix = ("0" * 5) + "." >>> suffix = "." + ("0" * 10) >>> existing = [prefix + str(i) + suffix for i in range(100)] >>> e = list(existing) >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( ... '00000.100.0000000000') True >>> e = list(existing) >>> e.remove(prefix + "1" + suffix) >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( ... '00000.1.0000000000') True >>> e = list(existing) >>> e.remove(prefix + "2" + suffix) >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( ... '00000.2.0000000000') True """ # calculate the longest possible string maxLength = maxFileNameLength - len(prefix) - len(suffix) maxValue = int("9" * maxLength) # try to find a number finalName = None counter = 1 while finalName is None: fullName = prefix + str(counter) + suffix if fullName.lower() not in existing: finalName = fullName break else: counter += 1 if counter >= maxValue: break # raise an error if nothing has been found if finalName is None: raise NameTranslationError("No unique name could be found.") # finished return finalName if __name__ == "__main__": import doctest doctest.testmod() PKaZZZKYZ�uufontTools/ufoLib/glifLib.py""" glifLib.py -- Generic module for reading and writing the .glif format. More info about the .glif format (GLyphInterchangeFormat) can be found here: http://unifiedfontobject.org The main class in this module is GlyphSet. It manages a set of .glif files in a folder. It offers two ways to read glyph data, and one way to write glyph data. See the class doc string for details. """ from __future__ import annotations import logging import enum from warnings import warn from collections import OrderedDict import fs import fs.base import fs.errors import fs.osfs import fs.path from fontTools.misc.textTools import tobytes from fontTools.misc import plistlib from fontTools.pens.pointPen import AbstractPointPen, PointToSegmentPen from fontTools.ufoLib.errors import GlifLibError from fontTools.ufoLib.filenames import userNameToFileName from fontTools.ufoLib.validators import ( genericTypeValidator, colorValidator, guidelinesValidator, anchorsValidator, identifierValidator, imageValidator, glyphLibValidator, ) from fontTools.misc import etree from fontTools.ufoLib import _UFOBaseIO, UFOFormatVersion from fontTools.ufoLib.utils import numberTypes, _VersionTupleEnumMixin __all__ = [ "GlyphSet", "GlifLibError", "readGlyphFromString", "writeGlyphToString", "glyphNameToFileName", ] logger = logging.getLogger(__name__) # --------- # Constants # --------- CONTENTS_FILENAME = "contents.plist" LAYERINFO_FILENAME = "layerinfo.plist" class GLIFFormatVersion(tuple, _VersionTupleEnumMixin, enum.Enum): FORMAT_1_0 = (1, 0) FORMAT_2_0 = (2, 0) @classmethod def default(cls, ufoFormatVersion=None): if ufoFormatVersion is not None: return max(cls.supported_versions(ufoFormatVersion)) return super().default() @classmethod def supported_versions(cls, ufoFormatVersion=None): if ufoFormatVersion is None: # if ufo format unspecified, return all the supported GLIF formats return super().supported_versions() # else only return the GLIF formats supported by the given UFO format versions = {cls.FORMAT_1_0} if ufoFormatVersion >= UFOFormatVersion.FORMAT_3_0: versions.add(cls.FORMAT_2_0) return frozenset(versions) # workaround for py3.11, see https://github.com/fonttools/fonttools/pull/2655 GLIFFormatVersion.__str__ = _VersionTupleEnumMixin.__str__ # ------------ # Simple Glyph # ------------ class Glyph: """ Minimal glyph object. It has no glyph attributes until either the draw() or the drawPoints() method has been called. """ def __init__(self, glyphName, glyphSet): self.glyphName = glyphName self.glyphSet = glyphSet def draw(self, pen, outputImpliedClosingLine=False): """ Draw this glyph onto a *FontTools* Pen. """ pointPen = PointToSegmentPen( pen, outputImpliedClosingLine=outputImpliedClosingLine ) self.drawPoints(pointPen) def drawPoints(self, pointPen): """ Draw this glyph onto a PointPen. """ self.glyphSet.readGlyph(self.glyphName, self, pointPen) # --------- # Glyph Set # --------- class GlyphSet(_UFOBaseIO): """ GlyphSet manages a set of .glif files inside one directory. GlyphSet's constructor takes a path to an existing directory as it's first argument. Reading glyph data can either be done through the readGlyph() method, or by using GlyphSet's dictionary interface, where the keys are glyph names and the values are (very) simple glyph objects. To write a glyph to the glyph set, you use the writeGlyph() method. The simple glyph objects returned through the dict interface do not support writing, they are just a convenient way to get at the glyph data. """ glyphClass = Glyph def __init__( self, path, glyphNameToFileNameFunc=None, ufoFormatVersion=None, validateRead=True, validateWrite=True, expectContentsFile=False, ): """ 'path' should be a path (string) to an existing local directory, or an instance of fs.base.FS class. The optional 'glyphNameToFileNameFunc' argument must be a callback function that takes two arguments: a glyph name and a list of all existing filenames (if any exist). It should return a file name (including the .glif extension). The glyphNameToFileName function is called whenever a file name is created for a given glyph name. ``validateRead`` will validate read operations. Its default is ``True``. ``validateWrite`` will validate write operations. Its default is ``True``. ``expectContentsFile`` will raise a GlifLibError if a contents.plist file is not found on the glyph set file system. This should be set to ``True`` if you are reading an existing UFO and ``False`` if you create a fresh glyph set. """ try: ufoFormatVersion = UFOFormatVersion(ufoFormatVersion) except ValueError as e: from fontTools.ufoLib.errors import UnsupportedUFOFormat raise UnsupportedUFOFormat( f"Unsupported UFO format: {ufoFormatVersion!r}" ) from e if hasattr(path, "__fspath__"): # support os.PathLike objects path = path.__fspath__() if isinstance(path, str): try: filesystem = fs.osfs.OSFS(path) except fs.errors.CreateFailed: raise GlifLibError("No glyphs directory '%s'" % path) self._shouldClose = True elif isinstance(path, fs.base.FS): filesystem = path try: filesystem.check() except fs.errors.FilesystemClosed: raise GlifLibError("the filesystem '%s' is closed" % filesystem) self._shouldClose = False else: raise TypeError( "Expected a path string or fs object, found %s" % type(path).__name__ ) try: path = filesystem.getsyspath("/") except fs.errors.NoSysPath: # network or in-memory FS may not map to the local one path = str(filesystem) # 'dirName' is kept for backward compatibility only, but it's DEPRECATED # as it's not guaranteed that it maps to an existing OSFS directory. # Client could use the FS api via the `self.fs` attribute instead. self.dirName = fs.path.parts(path)[-1] self.fs = filesystem # if glyphSet contains no 'contents.plist', we consider it empty self._havePreviousFile = filesystem.exists(CONTENTS_FILENAME) if expectContentsFile and not self._havePreviousFile: raise GlifLibError(f"{CONTENTS_FILENAME} is missing.") # attribute kept for backward compatibility self.ufoFormatVersion = ufoFormatVersion.major self.ufoFormatVersionTuple = ufoFormatVersion if glyphNameToFileNameFunc is None: glyphNameToFileNameFunc = glyphNameToFileName self.glyphNameToFileName = glyphNameToFileNameFunc self._validateRead = validateRead self._validateWrite = validateWrite self._existingFileNames: set[str] | None = None self._reverseContents = None self.rebuildContents() def rebuildContents(self, validateRead=None): """ Rebuild the contents dict by loading contents.plist. ``validateRead`` will validate the data, by default it is set to the class's ``validateRead`` value, can be overridden. """ if validateRead is None: validateRead = self._validateRead contents = self._getPlist(CONTENTS_FILENAME, {}) # validate the contents if validateRead: invalidFormat = False if not isinstance(contents, dict): invalidFormat = True else: for name, fileName in contents.items(): if not isinstance(name, str): invalidFormat = True if not isinstance(fileName, str): invalidFormat = True elif not self.fs.exists(fileName): raise GlifLibError( "%s references a file that does not exist: %s" % (CONTENTS_FILENAME, fileName) ) if invalidFormat: raise GlifLibError("%s is not properly formatted" % CONTENTS_FILENAME) self.contents = contents self._existingFileNames = None self._reverseContents = None def getReverseContents(self): """ Return a reversed dict of self.contents, mapping file names to glyph names. This is primarily an aid for custom glyph name to file name schemes that want to make sure they don't generate duplicate file names. The file names are converted to lowercase so we can reliably check for duplicates that only differ in case, which is important for case-insensitive file systems. """ if self._reverseContents is None: d = {} for k, v in self.contents.items(): d[v.lower()] = k self._reverseContents = d return self._reverseContents def writeContents(self): """ Write the contents.plist file out to disk. Call this method when you're done writing glyphs. """ self._writePlist(CONTENTS_FILENAME, self.contents) # layer info def readLayerInfo(self, info, validateRead=None): """ ``validateRead`` will validate the data, by default it is set to the class's ``validateRead`` value, can be overridden. """ if validateRead is None: validateRead = self._validateRead infoDict = self._getPlist(LAYERINFO_FILENAME, {}) if validateRead: if not isinstance(infoDict, dict): raise GlifLibError("layerinfo.plist is not properly formatted.") infoDict = validateLayerInfoVersion3Data(infoDict) # populate the object for attr, value in infoDict.items(): try: setattr(info, attr, value) except AttributeError: raise GlifLibError( "The supplied layer info object does not support setting a necessary attribute (%s)." % attr ) def writeLayerInfo(self, info, validateWrite=None): """ ``validateWrite`` will validate the data, by default it is set to the class's ``validateWrite`` value, can be overridden. """ if validateWrite is None: validateWrite = self._validateWrite if self.ufoFormatVersionTuple.major < 3: raise GlifLibError( "layerinfo.plist is not allowed in UFO %d." % self.ufoFormatVersionTuple.major ) # gather data infoData = {} for attr in layerInfoVersion3ValueData.keys(): if hasattr(info, attr): try: value = getattr(info, attr) except AttributeError: raise GlifLibError( "The supplied info object does not support getting a necessary attribute (%s)." % attr ) if value is None or (attr == "lib" and not value): continue infoData[attr] = value if infoData: # validate if validateWrite: infoData = validateLayerInfoVersion3Data(infoData) # write file self._writePlist(LAYERINFO_FILENAME, infoData) elif self._havePreviousFile and self.fs.exists(LAYERINFO_FILENAME): # data empty, remove existing file self.fs.remove(LAYERINFO_FILENAME) def getGLIF(self, glyphName): """ Get the raw GLIF text for a given glyph name. This only works for GLIF files that are already on disk. This method is useful in situations when the raw XML needs to be read from a glyph set for a particular glyph before fully parsing it into an object structure via the readGlyph method. Raises KeyError if 'glyphName' is not in contents.plist, or GlifLibError if the file associated with can't be found. """ fileName = self.contents[glyphName] try: return self.fs.readbytes(fileName) except fs.errors.ResourceNotFound: raise GlifLibError( "The file '%s' associated with glyph '%s' in contents.plist " "does not exist on %s" % (fileName, glyphName, self.fs) ) def getGLIFModificationTime(self, glyphName): """ Returns the modification time for the GLIF file with 'glyphName', as a floating point number giving the number of seconds since the epoch. Return None if the associated file does not exist or the underlying filesystem does not support getting modified times. Raises KeyError if the glyphName is not in contents.plist. """ fileName = self.contents[glyphName] return self.getFileModificationTime(fileName) # reading/writing API def readGlyph(self, glyphName, glyphObject=None, pointPen=None, validate=None): """ Read a .glif file for 'glyphName' from the glyph set. The 'glyphObject' argument can be any kind of object (even None); the readGlyph() method will attempt to set the following attributes on it: width the advance width of the glyph height the advance height of the glyph unicodes a list of unicode values for this glyph note a string lib a dictionary containing custom data image a dictionary containing image data guidelines a list of guideline data dictionaries anchors a list of anchor data dictionaries All attributes are optional, in two ways: 1) An attribute *won't* be set if the .glif file doesn't contain data for it. 'glyphObject' will have to deal with default values itself. 2) If setting the attribute fails with an AttributeError (for example if the 'glyphObject' attribute is read- only), readGlyph() will not propagate that exception, but ignore that attribute. To retrieve outline information, you need to pass an object conforming to the PointPen protocol as the 'pointPen' argument. This argument may be None if you don't need the outline data. readGlyph() will raise KeyError if the glyph is not present in the glyph set. ``validate`` will validate the data, by default it is set to the class's ``validateRead`` value, can be overridden. """ if validate is None: validate = self._validateRead text = self.getGLIF(glyphName) try: tree = _glifTreeFromString(text) formatVersions = GLIFFormatVersion.supported_versions( self.ufoFormatVersionTuple ) _readGlyphFromTree( tree, glyphObject, pointPen, formatVersions=formatVersions, validate=validate, ) except GlifLibError as glifLibError: # Re-raise with a note that gives extra context, describing where # the error occurred. fileName = self.contents[glyphName] try: glifLocation = f"'{self.fs.getsyspath(fileName)}'" except fs.errors.NoSysPath: # Network or in-memory FS may not map to a local path, so use # the best string representation we have. glifLocation = f"'{fileName}' from '{str(self.fs)}'" glifLibError._add_note( f"The issue is in glyph '{glyphName}', located in {glifLocation}." ) raise def writeGlyph( self, glyphName, glyphObject=None, drawPointsFunc=None, formatVersion=None, validate=None, ): """ Write a .glif file for 'glyphName' to the glyph set. The 'glyphObject' argument can be any kind of object (even None); the writeGlyph() method will attempt to get the following attributes from it: width the advance width of the glyph height the advance height of the glyph unicodes a list of unicode values for this glyph note a string lib a dictionary containing custom data image a dictionary containing image data guidelines a list of guideline data dictionaries anchors a list of anchor data dictionaries All attributes are optional: if 'glyphObject' doesn't have the attribute, it will simply be skipped. To write outline data to the .glif file, writeGlyph() needs a function (any callable object actually) that will take one argument: an object that conforms to the PointPen protocol. The function will be called by writeGlyph(); it has to call the proper PointPen methods to transfer the outline to the .glif file. The GLIF format version will be chosen based on the ufoFormatVersion passed during the creation of this object. If a particular format version is desired, it can be passed with the formatVersion argument. The formatVersion argument accepts either a tuple of integers for (major, minor), or a single integer for the major digit only (with minor digit implied as 0). An UnsupportedGLIFFormat exception is raised if the requested GLIF formatVersion is not supported. ``validate`` will validate the data, by default it is set to the class's ``validateWrite`` value, can be overridden. """ if formatVersion is None: formatVersion = GLIFFormatVersion.default(self.ufoFormatVersionTuple) else: try: formatVersion = GLIFFormatVersion(formatVersion) except ValueError as e: from fontTools.ufoLib.errors import UnsupportedGLIFFormat raise UnsupportedGLIFFormat( f"Unsupported GLIF format version: {formatVersion!r}" ) from e if formatVersion not in GLIFFormatVersion.supported_versions( self.ufoFormatVersionTuple ): from fontTools.ufoLib.errors import UnsupportedGLIFFormat raise UnsupportedGLIFFormat( f"Unsupported GLIF format version ({formatVersion!s}) " f"for UFO format version {self.ufoFormatVersionTuple!s}." ) if validate is None: validate = self._validateWrite fileName = self.contents.get(glyphName) if fileName is None: if self._existingFileNames is None: self._existingFileNames = { fileName.lower() for fileName in self.contents.values() } fileName = self.glyphNameToFileName(glyphName, self._existingFileNames) self.contents[glyphName] = fileName self._existingFileNames.add(fileName.lower()) if self._reverseContents is not None: self._reverseContents[fileName.lower()] = glyphName data = _writeGlyphToBytes( glyphName, glyphObject, drawPointsFunc, formatVersion=formatVersion, validate=validate, ) if ( self._havePreviousFile and self.fs.exists(fileName) and data == self.fs.readbytes(fileName) ): return self.fs.writebytes(fileName, data) def deleteGlyph(self, glyphName): """Permanently delete the glyph from the glyph set on disk. Will raise KeyError if the glyph is not present in the glyph set. """ fileName = self.contents[glyphName] self.fs.remove(fileName) if self._existingFileNames is not None: self._existingFileNames.remove(fileName.lower()) if self._reverseContents is not None: del self._reverseContents[fileName.lower()] del self.contents[glyphName] # dict-like support def keys(self): return list(self.contents.keys()) def has_key(self, glyphName): return glyphName in self.contents __contains__ = has_key def __len__(self): return len(self.contents) def __getitem__(self, glyphName): if glyphName not in self.contents: raise KeyError(glyphName) return self.glyphClass(glyphName, self) # quickly fetch unicode values def getUnicodes(self, glyphNames=None): """ Return a dictionary that maps glyph names to lists containing the unicode value[s] for that glyph, if any. This parses the .glif files partially, so it is a lot faster than parsing all files completely. By default this checks all glyphs, but a subset can be passed with glyphNames. """ unicodes = {} if glyphNames is None: glyphNames = self.contents.keys() for glyphName in glyphNames: text = self.getGLIF(glyphName) unicodes[glyphName] = _fetchUnicodes(text) return unicodes def getComponentReferences(self, glyphNames=None): """ Return a dictionary that maps glyph names to lists containing the base glyph name of components in the glyph. This parses the .glif files partially, so it is a lot faster than parsing all files completely. By default this checks all glyphs, but a subset can be passed with glyphNames. """ components = {} if glyphNames is None: glyphNames = self.contents.keys() for glyphName in glyphNames: text = self.getGLIF(glyphName) components[glyphName] = _fetchComponentBases(text) return components def getImageReferences(self, glyphNames=None): """ Return a dictionary that maps glyph names to the file name of the image referenced by the glyph. This parses the .glif files partially, so it is a lot faster than parsing all files completely. By default this checks all glyphs, but a subset can be passed with glyphNames. """ images = {} if glyphNames is None: glyphNames = self.contents.keys() for glyphName in glyphNames: text = self.getGLIF(glyphName) images[glyphName] = _fetchImageFileName(text) return images def close(self): if self._shouldClose: self.fs.close() def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() # ----------------------- # Glyph Name to File Name # ----------------------- def glyphNameToFileName(glyphName, existingFileNames): """ Wrapper around the userNameToFileName function in filenames.py Note that existingFileNames should be a set for large glyphsets or performance will suffer. """ if existingFileNames is None: existingFileNames = set() return userNameToFileName(glyphName, existing=existingFileNames, suffix=".glif") # ----------------------- # GLIF To and From String # ----------------------- def readGlyphFromString( aString, glyphObject=None, pointPen=None, formatVersions=None, validate=True, ): """ Read .glif data from a string into a glyph object. The 'glyphObject' argument can be any kind of object (even None); the readGlyphFromString() method will attempt to set the following attributes on it: width the advance width of the glyph height the advance height of the glyph unicodes a list of unicode values for this glyph note a string lib a dictionary containing custom data image a dictionary containing image data guidelines a list of guideline data dictionaries anchors a list of anchor data dictionaries All attributes are optional, in two ways: 1) An attribute *won't* be set if the .glif file doesn't contain data for it. 'glyphObject' will have to deal with default values itself. 2) If setting the attribute fails with an AttributeError (for example if the 'glyphObject' attribute is read- only), readGlyphFromString() will not propagate that exception, but ignore that attribute. To retrieve outline information, you need to pass an object conforming to the PointPen protocol as the 'pointPen' argument. This argument may be None if you don't need the outline data. The formatVersions optional argument define the GLIF format versions that are allowed to be read. The type is Optional[Iterable[Tuple[int, int], int]]. It can contain either integers (for the major versions to be allowed, with minor digits defaulting to 0), or tuples of integers to specify both (major, minor) versions. By default when formatVersions is None all the GLIF format versions currently defined are allowed to be read. ``validate`` will validate the read data. It is set to ``True`` by default. """ tree = _glifTreeFromString(aString) if formatVersions is None: validFormatVersions = GLIFFormatVersion.supported_versions() else: validFormatVersions, invalidFormatVersions = set(), set() for v in formatVersions: try: formatVersion = GLIFFormatVersion(v) except ValueError: invalidFormatVersions.add(v) else: validFormatVersions.add(formatVersion) if not validFormatVersions: raise ValueError( "None of the requested GLIF formatVersions are supported: " f"{formatVersions!r}" ) _readGlyphFromTree( tree, glyphObject, pointPen, formatVersions=validFormatVersions, validate=validate, ) def _writeGlyphToBytes( glyphName, glyphObject=None, drawPointsFunc=None, writer=None, formatVersion=None, validate=True, ): """Return .glif data for a glyph as a UTF-8 encoded bytes string.""" try: formatVersion = GLIFFormatVersion(formatVersion) except ValueError: from fontTools.ufoLib.errors import UnsupportedGLIFFormat raise UnsupportedGLIFFormat( "Unsupported GLIF format version: {formatVersion!r}" ) # start if validate and not isinstance(glyphName, str): raise GlifLibError("The glyph name is not properly formatted.") if validate and len(glyphName) == 0: raise GlifLibError("The glyph name is empty.") glyphAttrs = OrderedDict( [("name", glyphName), ("format", repr(formatVersion.major))] ) if formatVersion.minor != 0: glyphAttrs["formatMinor"] = repr(formatVersion.minor) root = etree.Element("glyph", glyphAttrs) identifiers = set() # advance _writeAdvance(glyphObject, root, validate) # unicodes if getattr(glyphObject, "unicodes", None): _writeUnicodes(glyphObject, root, validate) # note if getattr(glyphObject, "note", None): _writeNote(glyphObject, root, validate) # image if formatVersion.major >= 2 and getattr(glyphObject, "image", None): _writeImage(glyphObject, root, validate) # guidelines if formatVersion.major >= 2 and getattr(glyphObject, "guidelines", None): _writeGuidelines(glyphObject, root, identifiers, validate) # anchors anchors = getattr(glyphObject, "anchors", None) if formatVersion.major >= 2 and anchors: _writeAnchors(glyphObject, root, identifiers, validate) # outline if drawPointsFunc is not None: outline = etree.SubElement(root, "outline") pen = GLIFPointPen(outline, identifiers=identifiers, validate=validate) drawPointsFunc(pen) if formatVersion.major == 1 and anchors: _writeAnchorsFormat1(pen, anchors, validate) # prevent lxml from writing self-closing tags if not len(outline): outline.text = "\n " # lib if getattr(glyphObject, "lib", None): _writeLib(glyphObject, root, validate) # return the text data = etree.tostring( root, encoding="UTF-8", xml_declaration=True, pretty_print=True ) return data def writeGlyphToString( glyphName, glyphObject=None, drawPointsFunc=None, formatVersion=None, validate=True, ): """ Return .glif data for a glyph as a string. The XML declaration's encoding is always set to "UTF-8". The 'glyphObject' argument can be any kind of object (even None); the writeGlyphToString() method will attempt to get the following attributes from it: width the advance width of the glyph height the advance height of the glyph unicodes a list of unicode values for this glyph note a string lib a dictionary containing custom data image a dictionary containing image data guidelines a list of guideline data dictionaries anchors a list of anchor data dictionaries All attributes are optional: if 'glyphObject' doesn't have the attribute, it will simply be skipped. To write outline data to the .glif file, writeGlyphToString() needs a function (any callable object actually) that will take one argument: an object that conforms to the PointPen protocol. The function will be called by writeGlyphToString(); it has to call the proper PointPen methods to transfer the outline to the .glif file. The GLIF format version can be specified with the formatVersion argument. This accepts either a tuple of integers for (major, minor), or a single integer for the major digit only (with minor digit implied as 0). By default when formatVesion is None the latest GLIF format version will be used; currently it's 2.0, which is equivalent to formatVersion=(2, 0). An UnsupportedGLIFFormat exception is raised if the requested UFO formatVersion is not supported. ``validate`` will validate the written data. It is set to ``True`` by default. """ data = _writeGlyphToBytes( glyphName, glyphObject=glyphObject, drawPointsFunc=drawPointsFunc, formatVersion=formatVersion, validate=validate, ) return data.decode("utf-8") def _writeAdvance(glyphObject, element, validate): width = getattr(glyphObject, "width", None) if width is not None: if validate and not isinstance(width, numberTypes): raise GlifLibError("width attribute must be int or float") if width == 0: width = None height = getattr(glyphObject, "height", None) if height is not None: if validate and not isinstance(height, numberTypes): raise GlifLibError("height attribute must be int or float") if height == 0: height = None if width is not None and height is not None: etree.SubElement( element, "advance", OrderedDict([("height", repr(height)), ("width", repr(width))]), ) elif width is not None: etree.SubElement(element, "advance", dict(width=repr(width))) elif height is not None: etree.SubElement(element, "advance", dict(height=repr(height))) def _writeUnicodes(glyphObject, element, validate): unicodes = getattr(glyphObject, "unicodes", None) if validate and isinstance(unicodes, int): unicodes = [unicodes] seen = set() for code in unicodes: if validate and not isinstance(code, int): raise GlifLibError("unicode values must be int") if code in seen: continue seen.add(code) hexCode = "%04X" % code etree.SubElement(element, "unicode", dict(hex=hexCode)) def _writeNote(glyphObject, element, validate): note = getattr(glyphObject, "note", None) if validate and not isinstance(note, str): raise GlifLibError("note attribute must be str") note = note.strip() note = "\n" + note + "\n" etree.SubElement(element, "note").text = note def _writeImage(glyphObject, element, validate): image = getattr(glyphObject, "image", None) if validate and not imageValidator(image): raise GlifLibError( "image attribute must be a dict or dict-like object with the proper structure." ) attrs = OrderedDict([("fileName", image["fileName"])]) for attr, default in _transformationInfo: value = image.get(attr, default) if value != default: attrs[attr] = repr(value) color = image.get("color") if color is not None: attrs["color"] = color etree.SubElement(element, "image", attrs) def _writeGuidelines(glyphObject, element, identifiers, validate): guidelines = getattr(glyphObject, "guidelines", []) if validate and not guidelinesValidator(guidelines): raise GlifLibError("guidelines attribute does not have the proper structure.") for guideline in guidelines: attrs = OrderedDict() x = guideline.get("x") if x is not None: attrs["x"] = repr(x) y = guideline.get("y") if y is not None: attrs["y"] = repr(y) angle = guideline.get("angle") if angle is not None: attrs["angle"] = repr(angle) name = guideline.get("name") if name is not None: attrs["name"] = name color = guideline.get("color") if color is not None: attrs["color"] = color identifier = guideline.get("identifier") if identifier is not None: if validate and identifier in identifiers: raise GlifLibError("identifier used more than once: %s" % identifier) attrs["identifier"] = identifier identifiers.add(identifier) etree.SubElement(element, "guideline", attrs) def _writeAnchorsFormat1(pen, anchors, validate): if validate and not anchorsValidator(anchors): raise GlifLibError("anchors attribute does not have the proper structure.") for anchor in anchors: attrs = {} x = anchor["x"] attrs["x"] = repr(x) y = anchor["y"] attrs["y"] = repr(y) name = anchor.get("name") if name is not None: attrs["name"] = name pen.beginPath() pen.addPoint((x, y), segmentType="move", name=name) pen.endPath() def _writeAnchors(glyphObject, element, identifiers, validate): anchors = getattr(glyphObject, "anchors", []) if validate and not anchorsValidator(anchors): raise GlifLibError("anchors attribute does not have the proper structure.") for anchor in anchors: attrs = OrderedDict() x = anchor["x"] attrs["x"] = repr(x) y = anchor["y"] attrs["y"] = repr(y) name = anchor.get("name") if name is not None: attrs["name"] = name color = anchor.get("color") if color is not None: attrs["color"] = color identifier = anchor.get("identifier") if identifier is not None: if validate and identifier in identifiers: raise GlifLibError("identifier used more than once: %s" % identifier) attrs["identifier"] = identifier identifiers.add(identifier) etree.SubElement(element, "anchor", attrs) def _writeLib(glyphObject, element, validate): lib = getattr(glyphObject, "lib", None) if not lib: # don't write empty lib return if validate: valid, message = glyphLibValidator(lib) if not valid: raise GlifLibError(message) if not isinstance(lib, dict): lib = dict(lib) # plist inside GLIF begins with 2 levels of indentation e = plistlib.totree(lib, indent_level=2) etree.SubElement(element, "lib").append(e) # ----------------------- # layerinfo.plist Support # ----------------------- layerInfoVersion3ValueData = { "color": dict(type=str, valueValidator=colorValidator), "lib": dict(type=dict, valueValidator=genericTypeValidator), } def validateLayerInfoVersion3ValueForAttribute(attr, value): """ This performs very basic validation of the value for attribute following the UFO 3 fontinfo.plist specification. The results of this should not be interpretted as *correct* for the font that they are part of. This merely indicates that the value is of the proper type and, where the specification defines a set range of possible values for an attribute, that the value is in the accepted range. """ if attr not in layerInfoVersion3ValueData: return False dataValidationDict = layerInfoVersion3ValueData[attr] valueType = dataValidationDict.get("type") validator = dataValidationDict.get("valueValidator") valueOptions = dataValidationDict.get("valueOptions") # have specific options for the validator if valueOptions is not None: isValidValue = validator(value, valueOptions) # no specific options else: if validator == genericTypeValidator: isValidValue = validator(value, valueType) else: isValidValue = validator(value) return isValidValue def validateLayerInfoVersion3Data(infoData): """ This performs very basic validation of the value for infoData following the UFO 3 layerinfo.plist specification. The results of this should not be interpretted as *correct* for the font that they are part of. This merely indicates that the values are of the proper type and, where the specification defines a set range of possible values for an attribute, that the value is in the accepted range. """ for attr, value in infoData.items(): if attr not in layerInfoVersion3ValueData: raise GlifLibError("Unknown attribute %s." % attr) isValidValue = validateLayerInfoVersion3ValueForAttribute(attr, value) if not isValidValue: raise GlifLibError(f"Invalid value for attribute {attr} ({value!r}).") return infoData # ----------------- # GLIF Tree Support # ----------------- def _glifTreeFromFile(aFile): if etree._have_lxml: tree = etree.parse(aFile, parser=etree.XMLParser(remove_comments=True)) else: tree = etree.parse(aFile) root = tree.getroot() if root.tag != "glyph": raise GlifLibError("The GLIF is not properly formatted.") if root.text and root.text.strip() != "": raise GlifLibError("Invalid GLIF structure.") return root def _glifTreeFromString(aString): data = tobytes(aString, encoding="utf-8") try: if etree._have_lxml: root = etree.fromstring(data, parser=etree.XMLParser(remove_comments=True)) else: root = etree.fromstring(data) except Exception as etree_exception: raise GlifLibError("GLIF contains invalid XML.") from etree_exception if root.tag != "glyph": raise GlifLibError("The GLIF is not properly formatted.") if root.text and root.text.strip() != "": raise GlifLibError("Invalid GLIF structure.") return root def _readGlyphFromTree( tree, glyphObject=None, pointPen=None, formatVersions=GLIFFormatVersion.supported_versions(), validate=True, ): # check the format version formatVersionMajor = tree.get("format") if validate and formatVersionMajor is None: raise GlifLibError("Unspecified format version in GLIF.") formatVersionMinor = tree.get("formatMinor", 0) try: formatVersion = GLIFFormatVersion( (int(formatVersionMajor), int(formatVersionMinor)) ) except ValueError as e: msg = "Unsupported GLIF format: %s.%s" % ( formatVersionMajor, formatVersionMinor, ) if validate: from fontTools.ufoLib.errors import UnsupportedGLIFFormat raise UnsupportedGLIFFormat(msg) from e # warn but continue using the latest supported format formatVersion = GLIFFormatVersion.default() logger.warning( "%s. Assuming the latest supported version (%s). " "Some data may be skipped or parsed incorrectly.", msg, formatVersion, ) if validate and formatVersion not in formatVersions: raise GlifLibError(f"Forbidden GLIF format version: {formatVersion!s}") try: readGlyphFromTree = _READ_GLYPH_FROM_TREE_FUNCS[formatVersion] except KeyError: raise NotImplementedError(formatVersion) readGlyphFromTree( tree=tree, glyphObject=glyphObject, pointPen=pointPen, validate=validate, formatMinor=formatVersion.minor, ) def _readGlyphFromTreeFormat1( tree, glyphObject=None, pointPen=None, validate=None, **kwargs ): # get the name _readName(glyphObject, tree, validate) # populate the sub elements unicodes = [] haveSeenAdvance = haveSeenOutline = haveSeenLib = haveSeenNote = False for element in tree: if element.tag == "outline": if validate: if haveSeenOutline: raise GlifLibError("The outline element occurs more than once.") if element.attrib: raise GlifLibError( "The outline element contains unknown attributes." ) if element.text and element.text.strip() != "": raise GlifLibError("Invalid outline structure.") haveSeenOutline = True buildOutlineFormat1(glyphObject, pointPen, element, validate) elif glyphObject is None: continue elif element.tag == "advance": if validate and haveSeenAdvance: raise GlifLibError("The advance element occurs more than once.") haveSeenAdvance = True _readAdvance(glyphObject, element) elif element.tag == "unicode": try: v = element.get("hex") v = int(v, 16) if v not in unicodes: unicodes.append(v) except ValueError: raise GlifLibError( "Illegal value for hex attribute of unicode element." ) elif element.tag == "note": if validate and haveSeenNote: raise GlifLibError("The note element occurs more than once.") haveSeenNote = True _readNote(glyphObject, element) elif element.tag == "lib": if validate and haveSeenLib: raise GlifLibError("The lib element occurs more than once.") haveSeenLib = True _readLib(glyphObject, element, validate) else: raise GlifLibError("Unknown element in GLIF: %s" % element) # set the collected unicodes if unicodes: _relaxedSetattr(glyphObject, "unicodes", unicodes) def _readGlyphFromTreeFormat2( tree, glyphObject=None, pointPen=None, validate=None, formatMinor=0 ): # get the name _readName(glyphObject, tree, validate) # populate the sub elements unicodes = [] guidelines = [] anchors = [] haveSeenAdvance = haveSeenImage = haveSeenOutline = haveSeenLib = haveSeenNote = ( False ) identifiers = set() for element in tree: if element.tag == "outline": if validate: if haveSeenOutline: raise GlifLibError("The outline element occurs more than once.") if element.attrib: raise GlifLibError( "The outline element contains unknown attributes." ) if element.text and element.text.strip() != "": raise GlifLibError("Invalid outline structure.") haveSeenOutline = True if pointPen is not None: buildOutlineFormat2( glyphObject, pointPen, element, identifiers, validate ) elif glyphObject is None: continue elif element.tag == "advance": if validate and haveSeenAdvance: raise GlifLibError("The advance element occurs more than once.") haveSeenAdvance = True _readAdvance(glyphObject, element) elif element.tag == "unicode": try: v = element.get("hex") v = int(v, 16) if v not in unicodes: unicodes.append(v) except ValueError: raise GlifLibError( "Illegal value for hex attribute of unicode element." ) elif element.tag == "guideline": if validate and len(element): raise GlifLibError("Unknown children in guideline element.") attrib = dict(element.attrib) for attr in ("x", "y", "angle"): if attr in attrib: attrib[attr] = _number(attrib[attr]) guidelines.append(attrib) elif element.tag == "anchor": if validate and len(element): raise GlifLibError("Unknown children in anchor element.") attrib = dict(element.attrib) for attr in ("x", "y"): if attr in element.attrib: attrib[attr] = _number(attrib[attr]) anchors.append(attrib) elif element.tag == "image": if validate: if haveSeenImage: raise GlifLibError("The image element occurs more than once.") if len(element): raise GlifLibError("Unknown children in image element.") haveSeenImage = True _readImage(glyphObject, element, validate) elif element.tag == "note": if validate and haveSeenNote: raise GlifLibError("The note element occurs more than once.") haveSeenNote = True _readNote(glyphObject, element) elif element.tag == "lib": if validate and haveSeenLib: raise GlifLibError("The lib element occurs more than once.") haveSeenLib = True _readLib(glyphObject, element, validate) else: raise GlifLibError("Unknown element in GLIF: %s" % element) # set the collected unicodes if unicodes: _relaxedSetattr(glyphObject, "unicodes", unicodes) # set the collected guidelines if guidelines: if validate and not guidelinesValidator(guidelines, identifiers): raise GlifLibError("The guidelines are improperly formatted.") _relaxedSetattr(glyphObject, "guidelines", guidelines) # set the collected anchors if anchors: if validate and not anchorsValidator(anchors, identifiers): raise GlifLibError("The anchors are improperly formatted.") _relaxedSetattr(glyphObject, "anchors", anchors) _READ_GLYPH_FROM_TREE_FUNCS = { GLIFFormatVersion.FORMAT_1_0: _readGlyphFromTreeFormat1, GLIFFormatVersion.FORMAT_2_0: _readGlyphFromTreeFormat2, } def _readName(glyphObject, root, validate): glyphName = root.get("name") if validate and not glyphName: raise GlifLibError("Empty glyph name in GLIF.") if glyphName and glyphObject is not None: _relaxedSetattr(glyphObject, "name", glyphName) def _readAdvance(glyphObject, advance): width = _number(advance.get("width", 0)) _relaxedSetattr(glyphObject, "width", width) height = _number(advance.get("height", 0)) _relaxedSetattr(glyphObject, "height", height) def _readNote(glyphObject, note): lines = note.text.split("\n") note = "\n".join(line.strip() for line in lines if line.strip()) _relaxedSetattr(glyphObject, "note", note) def _readLib(glyphObject, lib, validate): assert len(lib) == 1 child = lib[0] plist = plistlib.fromtree(child) if validate: valid, message = glyphLibValidator(plist) if not valid: raise GlifLibError(message) _relaxedSetattr(glyphObject, "lib", plist) def _readImage(glyphObject, image, validate): imageData = dict(image.attrib) for attr, default in _transformationInfo: value = imageData.get(attr, default) imageData[attr] = _number(value) if validate and not imageValidator(imageData): raise GlifLibError("The image element is not properly formatted.") _relaxedSetattr(glyphObject, "image", imageData) # ---------------- # GLIF to PointPen # ---------------- contourAttributesFormat2 = {"identifier"} componentAttributesFormat1 = { "base", "xScale", "xyScale", "yxScale", "yScale", "xOffset", "yOffset", } componentAttributesFormat2 = componentAttributesFormat1 | {"identifier"} pointAttributesFormat1 = {"x", "y", "type", "smooth", "name"} pointAttributesFormat2 = pointAttributesFormat1 | {"identifier"} pointSmoothOptions = {"no", "yes"} pointTypeOptions = {"move", "line", "offcurve", "curve", "qcurve"} # format 1 def buildOutlineFormat1(glyphObject, pen, outline, validate): anchors = [] for element in outline: if element.tag == "contour": if len(element) == 1: point = element[0] if point.tag == "point": anchor = _buildAnchorFormat1(point, validate) if anchor is not None: anchors.append(anchor) continue if pen is not None: _buildOutlineContourFormat1(pen, element, validate) elif element.tag == "component": if pen is not None: _buildOutlineComponentFormat1(pen, element, validate) else: raise GlifLibError("Unknown element in outline element: %s" % element) if glyphObject is not None and anchors: if validate and not anchorsValidator(anchors): raise GlifLibError("GLIF 1 anchors are not properly formatted.") _relaxedSetattr(glyphObject, "anchors", anchors) def _buildAnchorFormat1(point, validate): if point.get("type") != "move": return None name = point.get("name") if name is None: return None x = point.get("x") y = point.get("y") if validate and x is None: raise GlifLibError("Required x attribute is missing in point element.") if validate and y is None: raise GlifLibError("Required y attribute is missing in point element.") x = _number(x) y = _number(y) anchor = dict(x=x, y=y, name=name) return anchor def _buildOutlineContourFormat1(pen, contour, validate): if validate and contour.attrib: raise GlifLibError("Unknown attributes in contour element.") pen.beginPath() if len(contour): massaged = _validateAndMassagePointStructures( contour, pointAttributesFormat1, openContourOffCurveLeniency=True, validate=validate, ) _buildOutlinePointsFormat1(pen, massaged) pen.endPath() def _buildOutlinePointsFormat1(pen, contour): for point in contour: x = point["x"] y = point["y"] segmentType = point["segmentType"] smooth = point["smooth"] name = point["name"] pen.addPoint((x, y), segmentType=segmentType, smooth=smooth, name=name) def _buildOutlineComponentFormat1(pen, component, validate): if validate: if len(component): raise GlifLibError("Unknown child elements of component element.") for attr in component.attrib.keys(): if attr not in componentAttributesFormat1: raise GlifLibError("Unknown attribute in component element: %s" % attr) baseGlyphName = component.get("base") if validate and baseGlyphName is None: raise GlifLibError("The base attribute is not defined in the component.") transformation = [] for attr, default in _transformationInfo: value = component.get(attr) if value is None: value = default else: value = _number(value) transformation.append(value) pen.addComponent(baseGlyphName, tuple(transformation)) # format 2 def buildOutlineFormat2(glyphObject, pen, outline, identifiers, validate): for element in outline: if element.tag == "contour": _buildOutlineContourFormat2(pen, element, identifiers, validate) elif element.tag == "component": _buildOutlineComponentFormat2(pen, element, identifiers, validate) else: raise GlifLibError("Unknown element in outline element: %s" % element.tag) def _buildOutlineContourFormat2(pen, contour, identifiers, validate): if validate: for attr in contour.attrib.keys(): if attr not in contourAttributesFormat2: raise GlifLibError("Unknown attribute in contour element: %s" % attr) identifier = contour.get("identifier") if identifier is not None: if validate: if identifier in identifiers: raise GlifLibError( "The identifier %s is used more than once." % identifier ) if not identifierValidator(identifier): raise GlifLibError( "The contour identifier %s is not valid." % identifier ) identifiers.add(identifier) try: pen.beginPath(identifier=identifier) except TypeError: pen.beginPath() warn( "The beginPath method needs an identifier kwarg. The contour's identifier value has been discarded.", DeprecationWarning, ) if len(contour): massaged = _validateAndMassagePointStructures( contour, pointAttributesFormat2, validate=validate ) _buildOutlinePointsFormat2(pen, massaged, identifiers, validate) pen.endPath() def _buildOutlinePointsFormat2(pen, contour, identifiers, validate): for point in contour: x = point["x"] y = point["y"] segmentType = point["segmentType"] smooth = point["smooth"] name = point["name"] identifier = point.get("identifier") if identifier is not None: if validate: if identifier in identifiers: raise GlifLibError( "The identifier %s is used more than once." % identifier ) if not identifierValidator(identifier): raise GlifLibError("The identifier %s is not valid." % identifier) identifiers.add(identifier) try: pen.addPoint( (x, y), segmentType=segmentType, smooth=smooth, name=name, identifier=identifier, ) except TypeError: pen.addPoint((x, y), segmentType=segmentType, smooth=smooth, name=name) warn( "The addPoint method needs an identifier kwarg. The point's identifier value has been discarded.", DeprecationWarning, ) def _buildOutlineComponentFormat2(pen, component, identifiers, validate): if validate: if len(component): raise GlifLibError("Unknown child elements of component element.") for attr in component.attrib.keys(): if attr not in componentAttributesFormat2: raise GlifLibError("Unknown attribute in component element: %s" % attr) baseGlyphName = component.get("base") if validate and baseGlyphName is None: raise GlifLibError("The base attribute is not defined in the component.") transformation = [] for attr, default in _transformationInfo: value = component.get(attr) if value is None: value = default else: value = _number(value) transformation.append(value) identifier = component.get("identifier") if identifier is not None: if validate: if identifier in identifiers: raise GlifLibError( "The identifier %s is used more than once." % identifier ) if validate and not identifierValidator(identifier): raise GlifLibError("The identifier %s is not valid." % identifier) identifiers.add(identifier) try: pen.addComponent(baseGlyphName, tuple(transformation), identifier=identifier) except TypeError: pen.addComponent(baseGlyphName, tuple(transformation)) warn( "The addComponent method needs an identifier kwarg. The component's identifier value has been discarded.", DeprecationWarning, ) # all formats def _validateAndMassagePointStructures( contour, pointAttributes, openContourOffCurveLeniency=False, validate=True ): if not len(contour): return # store some data for later validation lastOnCurvePoint = None haveOffCurvePoint = False # validate and massage the individual point elements massaged = [] for index, element in enumerate(contour): # not <point> if element.tag != "point": raise GlifLibError( "Unknown child element (%s) of contour element." % element.tag ) point = dict(element.attrib) massaged.append(point) if validate: # unknown attributes for attr in point.keys(): if attr not in pointAttributes: raise GlifLibError("Unknown attribute in point element: %s" % attr) # search for unknown children if len(element): raise GlifLibError("Unknown child elements in point element.") # x and y are required for attr in ("x", "y"): try: point[attr] = _number(point[attr]) except KeyError as e: raise GlifLibError( f"Required {attr} attribute is missing in point element." ) from e # segment type pointType = point.pop("type", "offcurve") if validate and pointType not in pointTypeOptions: raise GlifLibError("Unknown point type: %s" % pointType) if pointType == "offcurve": pointType = None point["segmentType"] = pointType if pointType is None: haveOffCurvePoint = True else: lastOnCurvePoint = index # move can only occur as the first point if validate and pointType == "move" and index != 0: raise GlifLibError( "A move point occurs after the first point in the contour." ) # smooth is optional smooth = point.get("smooth", "no") if validate and smooth is not None: if smooth not in pointSmoothOptions: raise GlifLibError("Unknown point smooth value: %s" % smooth) smooth = smooth == "yes" point["smooth"] = smooth # smooth can only be applied to curve and qcurve if validate and smooth and pointType is None: raise GlifLibError("smooth attribute set in an offcurve point.") # name is optional if "name" not in element.attrib: point["name"] = None if openContourOffCurveLeniency: # remove offcurves that precede a move. this is technically illegal, # but we let it slide because there are fonts out there in the wild like this. if massaged[0]["segmentType"] == "move": count = 0 for point in reversed(massaged): if point["segmentType"] is None: count += 1 else: break if count: massaged = massaged[:-count] # validate the off-curves in the segments if validate and haveOffCurvePoint and lastOnCurvePoint is not None: # we only care about how many offCurves there are before an onCurve # filter out the trailing offCurves offCurvesCount = len(massaged) - 1 - lastOnCurvePoint for point in massaged: segmentType = point["segmentType"] if segmentType is None: offCurvesCount += 1 else: if offCurvesCount: # move and line can't be preceded by off-curves if segmentType == "move": # this will have been filtered out already raise GlifLibError("move can not have an offcurve.") elif segmentType == "line": raise GlifLibError("line can not have an offcurve.") elif segmentType == "curve": if offCurvesCount > 2: raise GlifLibError("Too many offcurves defined for curve.") elif segmentType == "qcurve": pass else: # unknown segment type. it'll be caught later. pass offCurvesCount = 0 return massaged # --------------------- # Misc Helper Functions # --------------------- def _relaxedSetattr(object, attr, value): try: setattr(object, attr, value) except AttributeError: pass def _number(s): """ Given a numeric string, return an integer or a float, whichever the string indicates. _number("1") will return the integer 1, _number("1.0") will return the float 1.0. >>> _number("1") 1 >>> _number("1.0") 1.0 >>> _number("a") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... GlifLibError: Could not convert a to an int or float. """ try: n = int(s) return n except ValueError: pass try: n = float(s) return n except ValueError: raise GlifLibError("Could not convert %s to an int or float." % s) # -------------------- # Rapid Value Fetching # -------------------- # base class _DoneParsing(Exception): pass class _BaseParser: def __init__(self): self._elementStack = [] def parse(self, text): from xml.parsers.expat import ParserCreate parser = ParserCreate() parser.StartElementHandler = self.startElementHandler parser.EndElementHandler = self.endElementHandler parser.Parse(text) def startElementHandler(self, name, attrs): self._elementStack.append(name) def endElementHandler(self, name): other = self._elementStack.pop(-1) assert other == name # unicodes def _fetchUnicodes(glif): """ Get a list of unicodes listed in glif. """ parser = _FetchUnicodesParser() parser.parse(glif) return parser.unicodes class _FetchUnicodesParser(_BaseParser): def __init__(self): self.unicodes = [] super().__init__() def startElementHandler(self, name, attrs): if ( name == "unicode" and self._elementStack and self._elementStack[-1] == "glyph" ): value = attrs.get("hex") if value is not None: try: value = int(value, 16) if value not in self.unicodes: self.unicodes.append(value) except ValueError: pass super().startElementHandler(name, attrs) # image def _fetchImageFileName(glif): """ The image file name (if any) from glif. """ parser = _FetchImageFileNameParser() try: parser.parse(glif) except _DoneParsing: pass return parser.fileName class _FetchImageFileNameParser(_BaseParser): def __init__(self): self.fileName = None super().__init__() def startElementHandler(self, name, attrs): if name == "image" and self._elementStack and self._elementStack[-1] == "glyph": self.fileName = attrs.get("fileName") raise _DoneParsing super().startElementHandler(name, attrs) # component references def _fetchComponentBases(glif): """ Get a list of component base glyphs listed in glif. """ parser = _FetchComponentBasesParser() try: parser.parse(glif) except _DoneParsing: pass return list(parser.bases) class _FetchComponentBasesParser(_BaseParser): def __init__(self): self.bases = [] super().__init__() def startElementHandler(self, name, attrs): if ( name == "component" and self._elementStack and self._elementStack[-1] == "outline" ): base = attrs.get("base") if base is not None: self.bases.append(base) super().startElementHandler(name, attrs) def endElementHandler(self, name): if name == "outline": raise _DoneParsing super().endElementHandler(name) # -------------- # GLIF Point Pen # -------------- _transformationInfo = [ # field name, default value ("xScale", 1), ("xyScale", 0), ("yxScale", 0), ("yScale", 1), ("xOffset", 0), ("yOffset", 0), ] class GLIFPointPen(AbstractPointPen): """ Helper class using the PointPen protocol to write the <outline> part of .glif files. """ def __init__(self, element, formatVersion=None, identifiers=None, validate=True): if identifiers is None: identifiers = set() self.formatVersion = GLIFFormatVersion(formatVersion) self.identifiers = identifiers self.outline = element self.contour = None self.prevOffCurveCount = 0 self.prevPointTypes = [] self.validate = validate def beginPath(self, identifier=None, **kwargs): attrs = OrderedDict() if identifier is not None and self.formatVersion.major >= 2: if self.validate: if identifier in self.identifiers: raise GlifLibError( "identifier used more than once: %s" % identifier ) if not identifierValidator(identifier): raise GlifLibError( "identifier not formatted properly: %s" % identifier ) attrs["identifier"] = identifier self.identifiers.add(identifier) self.contour = etree.SubElement(self.outline, "contour", attrs) self.prevOffCurveCount = 0 def endPath(self): if self.prevPointTypes and self.prevPointTypes[0] == "move": if self.validate and self.prevPointTypes[-1] == "offcurve": raise GlifLibError("open contour has loose offcurve point") # prevent lxml from writing self-closing tags if not len(self.contour): self.contour.text = "\n " self.contour = None self.prevPointType = None self.prevOffCurveCount = 0 self.prevPointTypes = [] def addPoint( self, pt, segmentType=None, smooth=None, name=None, identifier=None, **kwargs ): attrs = OrderedDict() # coordinates if pt is not None: if self.validate: for coord in pt: if not isinstance(coord, numberTypes): raise GlifLibError("coordinates must be int or float") attrs["x"] = repr(pt[0]) attrs["y"] = repr(pt[1]) # segment type if segmentType == "offcurve": segmentType = None if self.validate: if segmentType == "move" and self.prevPointTypes: raise GlifLibError( "move occurs after a point has already been added to the contour." ) if ( segmentType in ("move", "line") and self.prevPointTypes and self.prevPointTypes[-1] == "offcurve" ): raise GlifLibError("offcurve occurs before %s point." % segmentType) if segmentType == "curve" and self.prevOffCurveCount > 2: raise GlifLibError("too many offcurve points before curve point.") if segmentType is not None: attrs["type"] = segmentType else: segmentType = "offcurve" if segmentType == "offcurve": self.prevOffCurveCount += 1 else: self.prevOffCurveCount = 0 self.prevPointTypes.append(segmentType) # smooth if smooth: if self.validate and segmentType == "offcurve": raise GlifLibError("can't set smooth in an offcurve point.") attrs["smooth"] = "yes" # name if name is not None: attrs["name"] = name # identifier if identifier is not None and self.formatVersion.major >= 2: if self.validate: if identifier in self.identifiers: raise GlifLibError( "identifier used more than once: %s" % identifier ) if not identifierValidator(identifier): raise GlifLibError( "identifier not formatted properly: %s" % identifier ) attrs["identifier"] = identifier self.identifiers.add(identifier) etree.SubElement(self.contour, "point", attrs) def addComponent(self, glyphName, transformation, identifier=None, **kwargs): attrs = OrderedDict([("base", glyphName)]) for (attr, default), value in zip(_transformationInfo, transformation): if self.validate and not isinstance(value, numberTypes): raise GlifLibError("transformation values must be int or float") if value != default: attrs[attr] = repr(value) if identifier is not None and self.formatVersion.major >= 2: if self.validate: if identifier in self.identifiers: raise GlifLibError( "identifier used more than once: %s" % identifier ) if self.validate and not identifierValidator(identifier): raise GlifLibError( "identifier not formatted properly: %s" % identifier ) attrs["identifier"] = identifier self.identifiers.add(identifier) etree.SubElement(self.outline, "component", attrs) if __name__ == "__main__": import doctest doctest.testmod() PKaZZZ��q� � fontTools/ufoLib/kerning.pydef lookupKerningValue( pair, kerning, groups, fallback=0, glyphToFirstGroup=None, glyphToSecondGroup=None ): """ Note: This expects kerning to be a flat dictionary of kerning pairs, not the nested structure used in kerning.plist. >>> groups = { ... "public.kern1.O" : ["O", "D", "Q"], ... "public.kern2.E" : ["E", "F"] ... } >>> kerning = { ... ("public.kern1.O", "public.kern2.E") : -100, ... ("public.kern1.O", "F") : -200, ... ("D", "F") : -300 ... } >>> lookupKerningValue(("D", "F"), kerning, groups) -300 >>> lookupKerningValue(("O", "F"), kerning, groups) -200 >>> lookupKerningValue(("O", "E"), kerning, groups) -100 >>> lookupKerningValue(("O", "O"), kerning, groups) 0 >>> lookupKerningValue(("E", "E"), kerning, groups) 0 >>> lookupKerningValue(("E", "O"), kerning, groups) 0 >>> lookupKerningValue(("X", "X"), kerning, groups) 0 >>> lookupKerningValue(("public.kern1.O", "public.kern2.E"), ... kerning, groups) -100 >>> lookupKerningValue(("public.kern1.O", "F"), kerning, groups) -200 >>> lookupKerningValue(("O", "public.kern2.E"), kerning, groups) -100 >>> lookupKerningValue(("public.kern1.X", "public.kern2.X"), kerning, groups) 0 """ # quickly check to see if the pair is in the kerning dictionary if pair in kerning: return kerning[pair] # create glyph to group mapping if glyphToFirstGroup is not None: assert glyphToSecondGroup is not None if glyphToSecondGroup is not None: assert glyphToFirstGroup is not None if glyphToFirstGroup is None: glyphToFirstGroup = {} glyphToSecondGroup = {} for group, groupMembers in groups.items(): if group.startswith("public.kern1."): for glyph in groupMembers: glyphToFirstGroup[glyph] = group elif group.startswith("public.kern2."): for glyph in groupMembers: glyphToSecondGroup[glyph] = group # get group names and make sure first and second are glyph names first, second = pair firstGroup = secondGroup = None if first.startswith("public.kern1."): firstGroup = first first = None else: firstGroup = glyphToFirstGroup.get(first) if second.startswith("public.kern2."): secondGroup = second second = None else: secondGroup = glyphToSecondGroup.get(second) # make an ordered list of pairs to look up pairs = [ (first, second), (first, secondGroup), (firstGroup, second), (firstGroup, secondGroup), ] # look up the pairs and return any matches for pair in pairs: if pair in kerning: return kerning[pair] # use the fallback value return fallback if __name__ == "__main__": import doctest doctest.testmod() PKaZZZ�`>���fontTools/ufoLib/plistlib.py"""DEPRECATED - This module is kept here only as a backward compatibility shim for the old ufoLib.plistlib module, which was moved to fontTools.misc.plistlib. Please use the latter instead. """ from fontTools.misc.plistlib import dump, dumps, load, loads from fontTools.misc.textTools import tobytes # The following functions were part of the old py2-like ufoLib.plistlib API. # They are kept only for backward compatiblity. from fontTools.ufoLib.utils import deprecated @deprecated("Use 'fontTools.misc.plistlib.load' instead") def readPlist(path_or_file): did_open = False if isinstance(path_or_file, str): path_or_file = open(path_or_file, "rb") did_open = True try: return load(path_or_file, use_builtin_types=False) finally: if did_open: path_or_file.close() @deprecated("Use 'fontTools.misc.plistlib.dump' instead") def writePlist(value, path_or_file): did_open = False if isinstance(path_or_file, str): path_or_file = open(path_or_file, "wb") did_open = True try: dump(value, path_or_file, use_builtin_types=False) finally: if did_open: path_or_file.close() @deprecated("Use 'fontTools.misc.plistlib.loads' instead") def readPlistFromString(data): return loads(tobytes(data, encoding="utf-8"), use_builtin_types=False) @deprecated("Use 'fontTools.misc.plistlib.dumps' instead") def writePlistToString(value): return dumps(value, use_builtin_types=False) PKaZZZ.���fontTools/ufoLib/pointPen.py"""DEPRECATED - This module is kept here only as a backward compatibility shim for the old ufoLib.pointPen module, which was moved to fontTools.pens.pointPen. Please use the latter instead. """ from fontTools.pens.pointPen import * PKaZZZ"2eefontTools/ufoLib/utils.py"""The module contains miscellaneous helpers. It's not considered part of the public ufoLib API. """ import warnings import functools numberTypes = (int, float) def deprecated(msg=""): """Decorator factory to mark functions as deprecated with given message. >>> @deprecated("Enough!") ... def some_function(): ... "I just print 'hello world'." ... print("hello world") >>> some_function() hello world >>> some_function.__doc__ == "I just print 'hello world'." True """ def deprecated_decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): warnings.warn( f"{func.__name__} function is a deprecated. {msg}", category=DeprecationWarning, stacklevel=2, ) return func(*args, **kwargs) return wrapper return deprecated_decorator # To be mixed with enum.Enum in UFOFormatVersion and GLIFFormatVersion class _VersionTupleEnumMixin: @property def major(self): return self.value[0] @property def minor(self): return self.value[1] @classmethod def _missing_(cls, value): # allow to initialize a version enum from a single (major) integer if isinstance(value, int): return cls((value, 0)) # or from None to obtain the current default version if value is None: return cls.default() return super()._missing_(value) def __str__(self): return f"{self.major}.{self.minor}" @classmethod def default(cls): # get the latest defined version (i.e. the max of all versions) return max(cls.__members__.values()) @classmethod def supported_versions(cls): return frozenset(cls.__members__.values()) if __name__ == "__main__": import doctest doctest.testmod() PKaZZZ����UxUxfontTools/ufoLib/validators.py"""Various low level data validators.""" import calendar from io import open import fs.base import fs.osfs from collections.abc import Mapping from fontTools.ufoLib.utils import numberTypes # ------- # Generic # ------- def isDictEnough(value): """ Some objects will likely come in that aren't dicts but are dict-ish enough. """ if isinstance(value, Mapping): return True for attr in ("keys", "values", "items"): if not hasattr(value, attr): return False return True def genericTypeValidator(value, typ): """ Generic. (Added at version 2.) """ return isinstance(value, typ) def genericIntListValidator(values, validValues): """ Generic. (Added at version 2.) """ if not isinstance(values, (list, tuple)): return False valuesSet = set(values) validValuesSet = set(validValues) if valuesSet - validValuesSet: return False for value in values: if not isinstance(value, int): return False return True def genericNonNegativeIntValidator(value): """ Generic. (Added at version 3.) """ if not isinstance(value, int): return False if value < 0: return False return True def genericNonNegativeNumberValidator(value): """ Generic. (Added at version 3.) """ if not isinstance(value, numberTypes): return False if value < 0: return False return True def genericDictValidator(value, prototype): """ Generic. (Added at version 3.) """ # not a dict if not isinstance(value, Mapping): return False # missing required keys for key, (typ, required) in prototype.items(): if not required: continue if key not in value: return False # unknown keys for key in value.keys(): if key not in prototype: return False # incorrect types for key, v in value.items(): prototypeType, required = prototype[key] if v is None and not required: continue if not isinstance(v, prototypeType): return False return True # -------------- # fontinfo.plist # -------------- # Data Validators def fontInfoStyleMapStyleNameValidator(value): """ Version 2+. """ options = ["regular", "italic", "bold", "bold italic"] return value in options def fontInfoOpenTypeGaspRangeRecordsValidator(value): """ Version 3+. """ if not isinstance(value, list): return False if len(value) == 0: return True validBehaviors = [0, 1, 2, 3] dictPrototype = dict(rangeMaxPPEM=(int, True), rangeGaspBehavior=(list, True)) ppemOrder = [] for rangeRecord in value: if not genericDictValidator(rangeRecord, dictPrototype): return False ppem = rangeRecord["rangeMaxPPEM"] behavior = rangeRecord["rangeGaspBehavior"] ppemValidity = genericNonNegativeIntValidator(ppem) if not ppemValidity: return False behaviorValidity = genericIntListValidator(behavior, validBehaviors) if not behaviorValidity: return False ppemOrder.append(ppem) if ppemOrder != sorted(ppemOrder): return False return True def fontInfoOpenTypeHeadCreatedValidator(value): """ Version 2+. """ # format: 0000/00/00 00:00:00 if not isinstance(value, str): return False # basic formatting if not len(value) == 19: return False if value.count(" ") != 1: return False date, time = value.split(" ") if date.count("/") != 2: return False if time.count(":") != 2: return False # date year, month, day = date.split("/") if len(year) != 4: return False if len(month) != 2: return False if len(day) != 2: return False try: year = int(year) month = int(month) day = int(day) except ValueError: return False if month < 1 or month > 12: return False monthMaxDay = calendar.monthrange(year, month)[1] if day < 1 or day > monthMaxDay: return False # time hour, minute, second = time.split(":") if len(hour) != 2: return False if len(minute) != 2: return False if len(second) != 2: return False try: hour = int(hour) minute = int(minute) second = int(second) except ValueError: return False if hour < 0 or hour > 23: return False if minute < 0 or minute > 59: return False if second < 0 or second > 59: return False # fallback return True def fontInfoOpenTypeNameRecordsValidator(value): """ Version 3+. """ if not isinstance(value, list): return False dictPrototype = dict( nameID=(int, True), platformID=(int, True), encodingID=(int, True), languageID=(int, True), string=(str, True), ) for nameRecord in value: if not genericDictValidator(nameRecord, dictPrototype): return False return True def fontInfoOpenTypeOS2WeightClassValidator(value): """ Version 2+. """ if not isinstance(value, int): return False if value < 0: return False return True def fontInfoOpenTypeOS2WidthClassValidator(value): """ Version 2+. """ if not isinstance(value, int): return False if value < 1: return False if value > 9: return False return True def fontInfoVersion2OpenTypeOS2PanoseValidator(values): """ Version 2. """ if not isinstance(values, (list, tuple)): return False if len(values) != 10: return False for value in values: if not isinstance(value, int): return False # XXX further validation? return True def fontInfoVersion3OpenTypeOS2PanoseValidator(values): """ Version 3+. """ if not isinstance(values, (list, tuple)): return False if len(values) != 10: return False for value in values: if not isinstance(value, int): return False if value < 0: return False # XXX further validation? return True def fontInfoOpenTypeOS2FamilyClassValidator(values): """ Version 2+. """ if not isinstance(values, (list, tuple)): return False if len(values) != 2: return False for value in values: if not isinstance(value, int): return False classID, subclassID = values if classID < 0 or classID > 14: return False if subclassID < 0 or subclassID > 15: return False return True def fontInfoPostscriptBluesValidator(values): """ Version 2+. """ if not isinstance(values, (list, tuple)): return False if len(values) > 14: return False if len(values) % 2: return False for value in values: if not isinstance(value, numberTypes): return False return True def fontInfoPostscriptOtherBluesValidator(values): """ Version 2+. """ if not isinstance(values, (list, tuple)): return False if len(values) > 10: return False if len(values) % 2: return False for value in values: if not isinstance(value, numberTypes): return False return True def fontInfoPostscriptStemsValidator(values): """ Version 2+. """ if not isinstance(values, (list, tuple)): return False if len(values) > 12: return False for value in values: if not isinstance(value, numberTypes): return False return True def fontInfoPostscriptWindowsCharacterSetValidator(value): """ Version 2+. """ validValues = list(range(1, 21)) if value not in validValues: return False return True def fontInfoWOFFMetadataUniqueIDValidator(value): """ Version 3+. """ dictPrototype = dict(id=(str, True)) if not genericDictValidator(value, dictPrototype): return False return True def fontInfoWOFFMetadataVendorValidator(value): """ Version 3+. """ dictPrototype = { "name": (str, True), "url": (str, False), "dir": (str, False), "class": (str, False), } if not genericDictValidator(value, dictPrototype): return False if "dir" in value and value.get("dir") not in ("ltr", "rtl"): return False return True def fontInfoWOFFMetadataCreditsValidator(value): """ Version 3+. """ dictPrototype = dict(credits=(list, True)) if not genericDictValidator(value, dictPrototype): return False if not len(value["credits"]): return False dictPrototype = { "name": (str, True), "url": (str, False), "role": (str, False), "dir": (str, False), "class": (str, False), } for credit in value["credits"]: if not genericDictValidator(credit, dictPrototype): return False if "dir" in credit and credit.get("dir") not in ("ltr", "rtl"): return False return True def fontInfoWOFFMetadataDescriptionValidator(value): """ Version 3+. """ dictPrototype = dict(url=(str, False), text=(list, True)) if not genericDictValidator(value, dictPrototype): return False for text in value["text"]: if not fontInfoWOFFMetadataTextValue(text): return False return True def fontInfoWOFFMetadataLicenseValidator(value): """ Version 3+. """ dictPrototype = dict(url=(str, False), text=(list, False), id=(str, False)) if not genericDictValidator(value, dictPrototype): return False if "text" in value: for text in value["text"]: if not fontInfoWOFFMetadataTextValue(text): return False return True def fontInfoWOFFMetadataTrademarkValidator(value): """ Version 3+. """ dictPrototype = dict(text=(list, True)) if not genericDictValidator(value, dictPrototype): return False for text in value["text"]: if not fontInfoWOFFMetadataTextValue(text): return False return True def fontInfoWOFFMetadataCopyrightValidator(value): """ Version 3+. """ dictPrototype = dict(text=(list, True)) if not genericDictValidator(value, dictPrototype): return False for text in value["text"]: if not fontInfoWOFFMetadataTextValue(text): return False return True def fontInfoWOFFMetadataLicenseeValidator(value): """ Version 3+. """ dictPrototype = {"name": (str, True), "dir": (str, False), "class": (str, False)} if not genericDictValidator(value, dictPrototype): return False if "dir" in value and value.get("dir") not in ("ltr", "rtl"): return False return True def fontInfoWOFFMetadataTextValue(value): """ Version 3+. """ dictPrototype = { "text": (str, True), "language": (str, False), "dir": (str, False), "class": (str, False), } if not genericDictValidator(value, dictPrototype): return False if "dir" in value and value.get("dir") not in ("ltr", "rtl"): return False return True def fontInfoWOFFMetadataExtensionsValidator(value): """ Version 3+. """ if not isinstance(value, list): return False if not value: return False for extension in value: if not fontInfoWOFFMetadataExtensionValidator(extension): return False return True def fontInfoWOFFMetadataExtensionValidator(value): """ Version 3+. """ dictPrototype = dict(names=(list, False), items=(list, True), id=(str, False)) if not genericDictValidator(value, dictPrototype): return False if "names" in value: for name in value["names"]: if not fontInfoWOFFMetadataExtensionNameValidator(name): return False for item in value["items"]: if not fontInfoWOFFMetadataExtensionItemValidator(item): return False return True def fontInfoWOFFMetadataExtensionItemValidator(value): """ Version 3+. """ dictPrototype = dict(id=(str, False), names=(list, True), values=(list, True)) if not genericDictValidator(value, dictPrototype): return False for name in value["names"]: if not fontInfoWOFFMetadataExtensionNameValidator(name): return False for val in value["values"]: if not fontInfoWOFFMetadataExtensionValueValidator(val): return False return True def fontInfoWOFFMetadataExtensionNameValidator(value): """ Version 3+. """ dictPrototype = { "text": (str, True), "language": (str, False), "dir": (str, False), "class": (str, False), } if not genericDictValidator(value, dictPrototype): return False if "dir" in value and value.get("dir") not in ("ltr", "rtl"): return False return True def fontInfoWOFFMetadataExtensionValueValidator(value): """ Version 3+. """ dictPrototype = { "text": (str, True), "language": (str, False), "dir": (str, False), "class": (str, False), } if not genericDictValidator(value, dictPrototype): return False if "dir" in value and value.get("dir") not in ("ltr", "rtl"): return False return True # ---------- # Guidelines # ---------- def guidelinesValidator(value, identifiers=None): """ Version 3+. """ if not isinstance(value, list): return False if identifiers is None: identifiers = set() for guide in value: if not guidelineValidator(guide): return False identifier = guide.get("identifier") if identifier is not None: if identifier in identifiers: return False identifiers.add(identifier) return True _guidelineDictPrototype = dict( x=((int, float), False), y=((int, float), False), angle=((int, float), False), name=(str, False), color=(str, False), identifier=(str, False), ) def guidelineValidator(value): """ Version 3+. """ if not genericDictValidator(value, _guidelineDictPrototype): return False x = value.get("x") y = value.get("y") angle = value.get("angle") # x or y must be present if x is None and y is None: return False # if x or y are None, angle must not be present if x is None or y is None: if angle is not None: return False # if x and y are defined, angle must be defined if x is not None and y is not None and angle is None: return False # angle must be between 0 and 360 if angle is not None: if angle < 0: return False if angle > 360: return False # identifier must be 1 or more characters identifier = value.get("identifier") if identifier is not None and not identifierValidator(identifier): return False # color must follow the proper format color = value.get("color") if color is not None and not colorValidator(color): return False return True # ------- # Anchors # ------- def anchorsValidator(value, identifiers=None): """ Version 3+. """ if not isinstance(value, list): return False if identifiers is None: identifiers = set() for anchor in value: if not anchorValidator(anchor): return False identifier = anchor.get("identifier") if identifier is not None: if identifier in identifiers: return False identifiers.add(identifier) return True _anchorDictPrototype = dict( x=((int, float), False), y=((int, float), False), name=(str, False), color=(str, False), identifier=(str, False), ) def anchorValidator(value): """ Version 3+. """ if not genericDictValidator(value, _anchorDictPrototype): return False x = value.get("x") y = value.get("y") # x and y must be present if x is None or y is None: return False # identifier must be 1 or more characters identifier = value.get("identifier") if identifier is not None and not identifierValidator(identifier): return False # color must follow the proper format color = value.get("color") if color is not None and not colorValidator(color): return False return True # ---------- # Identifier # ---------- def identifierValidator(value): """ Version 3+. >>> identifierValidator("a") True >>> identifierValidator("") False >>> identifierValidator("a" * 101) False """ validCharactersMin = 0x20 validCharactersMax = 0x7E if not isinstance(value, str): return False if not value: return False if len(value) > 100: return False for c in value: c = ord(c) if c < validCharactersMin or c > validCharactersMax: return False return True # ----- # Color # ----- def colorValidator(value): """ Version 3+. >>> colorValidator("0,0,0,0") True >>> colorValidator(".5,.5,.5,.5") True >>> colorValidator("0.5,0.5,0.5,0.5") True >>> colorValidator("1,1,1,1") True >>> colorValidator("2,0,0,0") False >>> colorValidator("0,2,0,0") False >>> colorValidator("0,0,2,0") False >>> colorValidator("0,0,0,2") False >>> colorValidator("1r,1,1,1") False >>> colorValidator("1,1g,1,1") False >>> colorValidator("1,1,1b,1") False >>> colorValidator("1,1,1,1a") False >>> colorValidator("1 1 1 1") False >>> colorValidator("1 1,1,1") False >>> colorValidator("1,1 1,1") False >>> colorValidator("1,1,1 1") False >>> colorValidator("1, 1, 1, 1") True """ if not isinstance(value, str): return False parts = value.split(",") if len(parts) != 4: return False for part in parts: part = part.strip() converted = False try: part = int(part) converted = True except ValueError: pass if not converted: try: part = float(part) converted = True except ValueError: pass if not converted: return False if part < 0: return False if part > 1: return False return True # ----- # image # ----- pngSignature = b"\x89PNG\r\n\x1a\n" _imageDictPrototype = dict( fileName=(str, True), xScale=((int, float), False), xyScale=((int, float), False), yxScale=((int, float), False), yScale=((int, float), False), xOffset=((int, float), False), yOffset=((int, float), False), color=(str, False), ) def imageValidator(value): """ Version 3+. """ if not genericDictValidator(value, _imageDictPrototype): return False # fileName must be one or more characters if not value["fileName"]: return False # color must follow the proper format color = value.get("color") if color is not None and not colorValidator(color): return False return True def pngValidator(path=None, data=None, fileObj=None): """ Version 3+. This checks the signature of the image data. """ assert path is not None or data is not None or fileObj is not None if path is not None: with open(path, "rb") as f: signature = f.read(8) elif data is not None: signature = data[:8] elif fileObj is not None: pos = fileObj.tell() signature = fileObj.read(8) fileObj.seek(pos) if signature != pngSignature: return False, "Image does not begin with the PNG signature." return True, None # ------------------- # layercontents.plist # ------------------- def layerContentsValidator(value, ufoPathOrFileSystem): """ Check the validity of layercontents.plist. Version 3+. """ if isinstance(ufoPathOrFileSystem, fs.base.FS): fileSystem = ufoPathOrFileSystem else: fileSystem = fs.osfs.OSFS(ufoPathOrFileSystem) bogusFileMessage = "layercontents.plist in not in the correct format." # file isn't in the right format if not isinstance(value, list): return False, bogusFileMessage # work through each entry usedLayerNames = set() usedDirectories = set() contents = {} for entry in value: # layer entry in the incorrect format if not isinstance(entry, list): return False, bogusFileMessage if not len(entry) == 2: return False, bogusFileMessage for i in entry: if not isinstance(i, str): return False, bogusFileMessage layerName, directoryName = entry # check directory naming if directoryName != "glyphs": if not directoryName.startswith("glyphs."): return ( False, "Invalid directory name (%s) in layercontents.plist." % directoryName, ) if len(layerName) == 0: return False, "Empty layer name in layercontents.plist." # directory doesn't exist if not fileSystem.exists(directoryName): return False, "A glyphset does not exist at %s." % directoryName # default layer name if layerName == "public.default" and directoryName != "glyphs": return ( False, "The name public.default is being used by a layer that is not the default.", ) # check usage if layerName in usedLayerNames: return ( False, "The layer name %s is used by more than one layer." % layerName, ) usedLayerNames.add(layerName) if directoryName in usedDirectories: return ( False, "The directory %s is used by more than one layer." % directoryName, ) usedDirectories.add(directoryName) # store contents[layerName] = directoryName # missing default layer foundDefault = "glyphs" in contents.values() if not foundDefault: return False, "The required default glyph set is not in the UFO." return True, None # ------------ # groups.plist # ------------ def groupsValidator(value): """ Check the validity of the groups. Version 3+ (though it's backwards compatible with UFO 1 and UFO 2). >>> groups = {"A" : ["A", "A"], "A2" : ["A"]} >>> groupsValidator(groups) (True, None) >>> groups = {"" : ["A"]} >>> valid, msg = groupsValidator(groups) >>> valid False >>> print(msg) A group has an empty name. >>> groups = {"public.awesome" : ["A"]} >>> groupsValidator(groups) (True, None) >>> groups = {"public.kern1." : ["A"]} >>> valid, msg = groupsValidator(groups) >>> valid False >>> print(msg) The group data contains a kerning group with an incomplete name. >>> groups = {"public.kern2." : ["A"]} >>> valid, msg = groupsValidator(groups) >>> valid False >>> print(msg) The group data contains a kerning group with an incomplete name. >>> groups = {"public.kern1.A" : ["A"], "public.kern2.A" : ["A"]} >>> groupsValidator(groups) (True, None) >>> groups = {"public.kern1.A1" : ["A"], "public.kern1.A2" : ["A"]} >>> valid, msg = groupsValidator(groups) >>> valid False >>> print(msg) The glyph "A" occurs in too many kerning groups. """ bogusFormatMessage = "The group data is not in the correct format." if not isDictEnough(value): return False, bogusFormatMessage firstSideMapping = {} secondSideMapping = {} for groupName, glyphList in value.items(): if not isinstance(groupName, (str)): return False, bogusFormatMessage if not isinstance(glyphList, (list, tuple)): return False, bogusFormatMessage if not groupName: return False, "A group has an empty name." if groupName.startswith("public."): if not groupName.startswith("public.kern1.") and not groupName.startswith( "public.kern2." ): # unknown public.* name. silently skip. continue else: if len("public.kernN.") == len(groupName): return ( False, "The group data contains a kerning group with an incomplete name.", ) if groupName.startswith("public.kern1."): d = firstSideMapping else: d = secondSideMapping for glyphName in glyphList: if not isinstance(glyphName, str): return ( False, "The group data %s contains an invalid member." % groupName, ) if glyphName in d: return ( False, 'The glyph "%s" occurs in too many kerning groups.' % glyphName, ) d[glyphName] = groupName return True, None # ------------- # kerning.plist # ------------- def kerningValidator(data): """ Check the validity of the kerning data structure. Version 3+ (though it's backwards compatible with UFO 1 and UFO 2). >>> kerning = {"A" : {"B" : 100}} >>> kerningValidator(kerning) (True, None) >>> kerning = {"A" : ["B"]} >>> valid, msg = kerningValidator(kerning) >>> valid False >>> print(msg) The kerning data is not in the correct format. >>> kerning = {"A" : {"B" : "100"}} >>> valid, msg = kerningValidator(kerning) >>> valid False >>> print(msg) The kerning data is not in the correct format. """ bogusFormatMessage = "The kerning data is not in the correct format." if not isinstance(data, Mapping): return False, bogusFormatMessage for first, secondDict in data.items(): if not isinstance(first, str): return False, bogusFormatMessage elif not isinstance(secondDict, Mapping): return False, bogusFormatMessage for second, value in secondDict.items(): if not isinstance(second, str): return False, bogusFormatMessage elif not isinstance(value, numberTypes): return False, bogusFormatMessage return True, None # ------------- # lib.plist/lib # ------------- _bogusLibFormatMessage = "The lib data is not in the correct format: %s" def fontLibValidator(value): """ Check the validity of the lib. Version 3+ (though it's backwards compatible with UFO 1 and UFO 2). >>> lib = {"foo" : "bar"} >>> fontLibValidator(lib) (True, None) >>> lib = {"public.awesome" : "hello"} >>> fontLibValidator(lib) (True, None) >>> lib = {"public.glyphOrder" : ["A", "C", "B"]} >>> fontLibValidator(lib) (True, None) >>> lib = "hello" >>> valid, msg = fontLibValidator(lib) >>> valid False >>> print(msg) # doctest: +ELLIPSIS The lib data is not in the correct format: expected a dictionary, ... >>> lib = {1: "hello"} >>> valid, msg = fontLibValidator(lib) >>> valid False >>> print(msg) The lib key is not properly formatted: expected str, found int: 1 >>> lib = {"public.glyphOrder" : "hello"} >>> valid, msg = fontLibValidator(lib) >>> valid False >>> print(msg) # doctest: +ELLIPSIS public.glyphOrder is not properly formatted: expected list or tuple,... >>> lib = {"public.glyphOrder" : ["A", 1, "B"]} >>> valid, msg = fontLibValidator(lib) >>> valid False >>> print(msg) # doctest: +ELLIPSIS public.glyphOrder is not properly formatted: expected str,... """ if not isDictEnough(value): reason = "expected a dictionary, found %s" % type(value).__name__ return False, _bogusLibFormatMessage % reason for key, value in value.items(): if not isinstance(key, str): return False, ( "The lib key is not properly formatted: expected str, found %s: %r" % (type(key).__name__, key) ) # public.glyphOrder if key == "public.glyphOrder": bogusGlyphOrderMessage = "public.glyphOrder is not properly formatted: %s" if not isinstance(value, (list, tuple)): reason = "expected list or tuple, found %s" % type(value).__name__ return False, bogusGlyphOrderMessage % reason for glyphName in value: if not isinstance(glyphName, str): reason = "expected str, found %s" % type(glyphName).__name__ return False, bogusGlyphOrderMessage % reason return True, None # -------- # GLIF lib # -------- def glyphLibValidator(value): """ Check the validity of the lib. Version 3+ (though it's backwards compatible with UFO 1 and UFO 2). >>> lib = {"foo" : "bar"} >>> glyphLibValidator(lib) (True, None) >>> lib = {"public.awesome" : "hello"} >>> glyphLibValidator(lib) (True, None) >>> lib = {"public.markColor" : "1,0,0,0.5"} >>> glyphLibValidator(lib) (True, None) >>> lib = {"public.markColor" : 1} >>> valid, msg = glyphLibValidator(lib) >>> valid False >>> print(msg) public.markColor is not properly formatted. """ if not isDictEnough(value): reason = "expected a dictionary, found %s" % type(value).__name__ return False, _bogusLibFormatMessage % reason for key, value in value.items(): if not isinstance(key, str): reason = "key (%s) should be a string" % key return False, _bogusLibFormatMessage % reason # public.markColor if key == "public.markColor": if not colorValidator(value): return False, "public.markColor is not properly formatted." return True, None if __name__ == "__main__": import doctest doctest.testmod() PKaZZZ&����z�zfontTools/unicodedata/Blocks.py# -*- coding: utf-8 -*- # # NOTE: This file was auto-generated with MetaTools/buildUCD.py. # Source: https://unicode.org/Public/UNIDATA/Blocks.txt # License: http://unicode.org/copyright.html#License # # Blocks-15.0.0.txt # Date: 2022-01-28, 20:58:00 GMT [KW] # © 2022 Unicode®, Inc. # For terms of use, see https://www.unicode.org/terms_of_use.html # # Unicode Character Database # For documentation, see https://www.unicode.org/reports/tr44/ # # Format: # Start Code..End Code; Block Name RANGES = [ 0x0000, # .. 0x007F ; Basic Latin 0x0080, # .. 0x00FF ; Latin-1 Supplement 0x0100, # .. 0x017F ; Latin Extended-A 0x0180, # .. 0x024F ; Latin Extended-B 0x0250, # .. 0x02AF ; IPA Extensions 0x02B0, # .. 0x02FF ; Spacing Modifier Letters 0x0300, # .. 0x036F ; Combining Diacritical Marks 0x0370, # .. 0x03FF ; Greek and Coptic 0x0400, # .. 0x04FF ; Cyrillic 0x0500, # .. 0x052F ; Cyrillic Supplement 0x0530, # .. 0x058F ; Armenian 0x0590, # .. 0x05FF ; Hebrew 0x0600, # .. 0x06FF ; Arabic 0x0700, # .. 0x074F ; Syriac 0x0750, # .. 0x077F ; Arabic Supplement 0x0780, # .. 0x07BF ; Thaana 0x07C0, # .. 0x07FF ; NKo 0x0800, # .. 0x083F ; Samaritan 0x0840, # .. 0x085F ; Mandaic 0x0860, # .. 0x086F ; Syriac Supplement 0x0870, # .. 0x089F ; Arabic Extended-B 0x08A0, # .. 0x08FF ; Arabic Extended-A 0x0900, # .. 0x097F ; Devanagari 0x0980, # .. 0x09FF ; Bengali 0x0A00, # .. 0x0A7F ; Gurmukhi 0x0A80, # .. 0x0AFF ; Gujarati 0x0B00, # .. 0x0B7F ; Oriya 0x0B80, # .. 0x0BFF ; Tamil 0x0C00, # .. 0x0C7F ; Telugu 0x0C80, # .. 0x0CFF ; Kannada 0x0D00, # .. 0x0D7F ; Malayalam 0x0D80, # .. 0x0DFF ; Sinhala 0x0E00, # .. 0x0E7F ; Thai 0x0E80, # .. 0x0EFF ; Lao 0x0F00, # .. 0x0FFF ; Tibetan 0x1000, # .. 0x109F ; Myanmar 0x10A0, # .. 0x10FF ; Georgian 0x1100, # .. 0x11FF ; Hangul Jamo 0x1200, # .. 0x137F ; Ethiopic 0x1380, # .. 0x139F ; Ethiopic Supplement 0x13A0, # .. 0x13FF ; Cherokee 0x1400, # .. 0x167F ; Unified Canadian Aboriginal Syllabics 0x1680, # .. 0x169F ; Ogham 0x16A0, # .. 0x16FF ; Runic 0x1700, # .. 0x171F ; Tagalog 0x1720, # .. 0x173F ; Hanunoo 0x1740, # .. 0x175F ; Buhid 0x1760, # .. 0x177F ; Tagbanwa 0x1780, # .. 0x17FF ; Khmer 0x1800, # .. 0x18AF ; Mongolian 0x18B0, # .. 0x18FF ; Unified Canadian Aboriginal Syllabics Extended 0x1900, # .. 0x194F ; Limbu 0x1950, # .. 0x197F ; Tai Le 0x1980, # .. 0x19DF ; New Tai Lue 0x19E0, # .. 0x19FF ; Khmer Symbols 0x1A00, # .. 0x1A1F ; Buginese 0x1A20, # .. 0x1AAF ; Tai Tham 0x1AB0, # .. 0x1AFF ; Combining Diacritical Marks Extended 0x1B00, # .. 0x1B7F ; Balinese 0x1B80, # .. 0x1BBF ; Sundanese 0x1BC0, # .. 0x1BFF ; Batak 0x1C00, # .. 0x1C4F ; Lepcha 0x1C50, # .. 0x1C7F ; Ol Chiki 0x1C80, # .. 0x1C8F ; Cyrillic Extended-C 0x1C90, # .. 0x1CBF ; Georgian Extended 0x1CC0, # .. 0x1CCF ; Sundanese Supplement 0x1CD0, # .. 0x1CFF ; Vedic Extensions 0x1D00, # .. 0x1D7F ; Phonetic Extensions 0x1D80, # .. 0x1DBF ; Phonetic Extensions Supplement 0x1DC0, # .. 0x1DFF ; Combining Diacritical Marks Supplement 0x1E00, # .. 0x1EFF ; Latin Extended Additional 0x1F00, # .. 0x1FFF ; Greek Extended 0x2000, # .. 0x206F ; General Punctuation 0x2070, # .. 0x209F ; Superscripts and Subscripts 0x20A0, # .. 0x20CF ; Currency Symbols 0x20D0, # .. 0x20FF ; Combining Diacritical Marks for Symbols 0x2100, # .. 0x214F ; Letterlike Symbols 0x2150, # .. 0x218F ; Number Forms 0x2190, # .. 0x21FF ; Arrows 0x2200, # .. 0x22FF ; Mathematical Operators 0x2300, # .. 0x23FF ; Miscellaneous Technical 0x2400, # .. 0x243F ; Control Pictures 0x2440, # .. 0x245F ; Optical Character Recognition 0x2460, # .. 0x24FF ; Enclosed Alphanumerics 0x2500, # .. 0x257F ; Box Drawing 0x2580, # .. 0x259F ; Block Elements 0x25A0, # .. 0x25FF ; Geometric Shapes 0x2600, # .. 0x26FF ; Miscellaneous Symbols 0x2700, # .. 0x27BF ; Dingbats 0x27C0, # .. 0x27EF ; Miscellaneous Mathematical Symbols-A 0x27F0, # .. 0x27FF ; Supplemental Arrows-A 0x2800, # .. 0x28FF ; Braille Patterns 0x2900, # .. 0x297F ; Supplemental Arrows-B 0x2980, # .. 0x29FF ; Miscellaneous Mathematical Symbols-B 0x2A00, # .. 0x2AFF ; Supplemental Mathematical Operators 0x2B00, # .. 0x2BFF ; Miscellaneous Symbols and Arrows 0x2C00, # .. 0x2C5F ; Glagolitic 0x2C60, # .. 0x2C7F ; Latin Extended-C 0x2C80, # .. 0x2CFF ; Coptic 0x2D00, # .. 0x2D2F ; Georgian Supplement 0x2D30, # .. 0x2D7F ; Tifinagh 0x2D80, # .. 0x2DDF ; Ethiopic Extended 0x2DE0, # .. 0x2DFF ; Cyrillic Extended-A 0x2E00, # .. 0x2E7F ; Supplemental Punctuation 0x2E80, # .. 0x2EFF ; CJK Radicals Supplement 0x2F00, # .. 0x2FDF ; Kangxi Radicals 0x2FE0, # .. 0x2FEF ; No_Block 0x2FF0, # .. 0x2FFF ; Ideographic Description Characters 0x3000, # .. 0x303F ; CJK Symbols and Punctuation 0x3040, # .. 0x309F ; Hiragana 0x30A0, # .. 0x30FF ; Katakana 0x3100, # .. 0x312F ; Bopomofo 0x3130, # .. 0x318F ; Hangul Compatibility Jamo 0x3190, # .. 0x319F ; Kanbun 0x31A0, # .. 0x31BF ; Bopomofo Extended 0x31C0, # .. 0x31EF ; CJK Strokes 0x31F0, # .. 0x31FF ; Katakana Phonetic Extensions 0x3200, # .. 0x32FF ; Enclosed CJK Letters and Months 0x3300, # .. 0x33FF ; CJK Compatibility 0x3400, # .. 0x4DBF ; CJK Unified Ideographs Extension A 0x4DC0, # .. 0x4DFF ; Yijing Hexagram Symbols 0x4E00, # .. 0x9FFF ; CJK Unified Ideographs 0xA000, # .. 0xA48F ; Yi Syllables 0xA490, # .. 0xA4CF ; Yi Radicals 0xA4D0, # .. 0xA4FF ; Lisu 0xA500, # .. 0xA63F ; Vai 0xA640, # .. 0xA69F ; Cyrillic Extended-B 0xA6A0, # .. 0xA6FF ; Bamum 0xA700, # .. 0xA71F ; Modifier Tone Letters 0xA720, # .. 0xA7FF ; Latin Extended-D 0xA800, # .. 0xA82F ; Syloti Nagri 0xA830, # .. 0xA83F ; Common Indic Number Forms 0xA840, # .. 0xA87F ; Phags-pa 0xA880, # .. 0xA8DF ; Saurashtra 0xA8E0, # .. 0xA8FF ; Devanagari Extended 0xA900, # .. 0xA92F ; Kayah Li 0xA930, # .. 0xA95F ; Rejang 0xA960, # .. 0xA97F ; Hangul Jamo Extended-A 0xA980, # .. 0xA9DF ; Javanese 0xA9E0, # .. 0xA9FF ; Myanmar Extended-B 0xAA00, # .. 0xAA5F ; Cham 0xAA60, # .. 0xAA7F ; Myanmar Extended-A 0xAA80, # .. 0xAADF ; Tai Viet 0xAAE0, # .. 0xAAFF ; Meetei Mayek Extensions 0xAB00, # .. 0xAB2F ; Ethiopic Extended-A 0xAB30, # .. 0xAB6F ; Latin Extended-E 0xAB70, # .. 0xABBF ; Cherokee Supplement 0xABC0, # .. 0xABFF ; Meetei Mayek 0xAC00, # .. 0xD7AF ; Hangul Syllables 0xD7B0, # .. 0xD7FF ; Hangul Jamo Extended-B 0xD800, # .. 0xDB7F ; High Surrogates 0xDB80, # .. 0xDBFF ; High Private Use Surrogates 0xDC00, # .. 0xDFFF ; Low Surrogates 0xE000, # .. 0xF8FF ; Private Use Area 0xF900, # .. 0xFAFF ; CJK Compatibility Ideographs 0xFB00, # .. 0xFB4F ; Alphabetic Presentation Forms 0xFB50, # .. 0xFDFF ; Arabic Presentation Forms-A 0xFE00, # .. 0xFE0F ; Variation Selectors 0xFE10, # .. 0xFE1F ; Vertical Forms 0xFE20, # .. 0xFE2F ; Combining Half Marks 0xFE30, # .. 0xFE4F ; CJK Compatibility Forms 0xFE50, # .. 0xFE6F ; Small Form Variants 0xFE70, # .. 0xFEFF ; Arabic Presentation Forms-B 0xFF00, # .. 0xFFEF ; Halfwidth and Fullwidth Forms 0xFFF0, # .. 0xFFFF ; Specials 0x10000, # .. 0x1007F ; Linear B Syllabary 0x10080, # .. 0x100FF ; Linear B Ideograms 0x10100, # .. 0x1013F ; Aegean Numbers 0x10140, # .. 0x1018F ; Ancient Greek Numbers 0x10190, # .. 0x101CF ; Ancient Symbols 0x101D0, # .. 0x101FF ; Phaistos Disc 0x10200, # .. 0x1027F ; No_Block 0x10280, # .. 0x1029F ; Lycian 0x102A0, # .. 0x102DF ; Carian 0x102E0, # .. 0x102FF ; Coptic Epact Numbers 0x10300, # .. 0x1032F ; Old Italic 0x10330, # .. 0x1034F ; Gothic 0x10350, # .. 0x1037F ; Old Permic 0x10380, # .. 0x1039F ; Ugaritic 0x103A0, # .. 0x103DF ; Old Persian 0x103E0, # .. 0x103FF ; No_Block 0x10400, # .. 0x1044F ; Deseret 0x10450, # .. 0x1047F ; Shavian 0x10480, # .. 0x104AF ; Osmanya 0x104B0, # .. 0x104FF ; Osage 0x10500, # .. 0x1052F ; Elbasan 0x10530, # .. 0x1056F ; Caucasian Albanian 0x10570, # .. 0x105BF ; Vithkuqi 0x105C0, # .. 0x105FF ; No_Block 0x10600, # .. 0x1077F ; Linear A 0x10780, # .. 0x107BF ; Latin Extended-F 0x107C0, # .. 0x107FF ; No_Block 0x10800, # .. 0x1083F ; Cypriot Syllabary 0x10840, # .. 0x1085F ; Imperial Aramaic 0x10860, # .. 0x1087F ; Palmyrene 0x10880, # .. 0x108AF ; Nabataean 0x108B0, # .. 0x108DF ; No_Block 0x108E0, # .. 0x108FF ; Hatran 0x10900, # .. 0x1091F ; Phoenician 0x10920, # .. 0x1093F ; Lydian 0x10940, # .. 0x1097F ; No_Block 0x10980, # .. 0x1099F ; Meroitic Hieroglyphs 0x109A0, # .. 0x109FF ; Meroitic Cursive 0x10A00, # .. 0x10A5F ; Kharoshthi 0x10A60, # .. 0x10A7F ; Old South Arabian 0x10A80, # .. 0x10A9F ; Old North Arabian 0x10AA0, # .. 0x10ABF ; No_Block 0x10AC0, # .. 0x10AFF ; Manichaean 0x10B00, # .. 0x10B3F ; Avestan 0x10B40, # .. 0x10B5F ; Inscriptional Parthian 0x10B60, # .. 0x10B7F ; Inscriptional Pahlavi 0x10B80, # .. 0x10BAF ; Psalter Pahlavi 0x10BB0, # .. 0x10BFF ; No_Block 0x10C00, # .. 0x10C4F ; Old Turkic 0x10C50, # .. 0x10C7F ; No_Block 0x10C80, # .. 0x10CFF ; Old Hungarian 0x10D00, # .. 0x10D3F ; Hanifi Rohingya 0x10D40, # .. 0x10E5F ; No_Block 0x10E60, # .. 0x10E7F ; Rumi Numeral Symbols 0x10E80, # .. 0x10EBF ; Yezidi 0x10EC0, # .. 0x10EFF ; Arabic Extended-C 0x10F00, # .. 0x10F2F ; Old Sogdian 0x10F30, # .. 0x10F6F ; Sogdian 0x10F70, # .. 0x10FAF ; Old Uyghur 0x10FB0, # .. 0x10FDF ; Chorasmian 0x10FE0, # .. 0x10FFF ; Elymaic 0x11000, # .. 0x1107F ; Brahmi 0x11080, # .. 0x110CF ; Kaithi 0x110D0, # .. 0x110FF ; Sora Sompeng 0x11100, # .. 0x1114F ; Chakma 0x11150, # .. 0x1117F ; Mahajani 0x11180, # .. 0x111DF ; Sharada 0x111E0, # .. 0x111FF ; Sinhala Archaic Numbers 0x11200, # .. 0x1124F ; Khojki 0x11250, # .. 0x1127F ; No_Block 0x11280, # .. 0x112AF ; Multani 0x112B0, # .. 0x112FF ; Khudawadi 0x11300, # .. 0x1137F ; Grantha 0x11380, # .. 0x113FF ; No_Block 0x11400, # .. 0x1147F ; Newa 0x11480, # .. 0x114DF ; Tirhuta 0x114E0, # .. 0x1157F ; No_Block 0x11580, # .. 0x115FF ; Siddham 0x11600, # .. 0x1165F ; Modi 0x11660, # .. 0x1167F ; Mongolian Supplement 0x11680, # .. 0x116CF ; Takri 0x116D0, # .. 0x116FF ; No_Block 0x11700, # .. 0x1174F ; Ahom 0x11750, # .. 0x117FF ; No_Block 0x11800, # .. 0x1184F ; Dogra 0x11850, # .. 0x1189F ; No_Block 0x118A0, # .. 0x118FF ; Warang Citi 0x11900, # .. 0x1195F ; Dives Akuru 0x11960, # .. 0x1199F ; No_Block 0x119A0, # .. 0x119FF ; Nandinagari 0x11A00, # .. 0x11A4F ; Zanabazar Square 0x11A50, # .. 0x11AAF ; Soyombo 0x11AB0, # .. 0x11ABF ; Unified Canadian Aboriginal Syllabics Extended-A 0x11AC0, # .. 0x11AFF ; Pau Cin Hau 0x11B00, # .. 0x11B5F ; Devanagari Extended-A 0x11B60, # .. 0x11BFF ; No_Block 0x11C00, # .. 0x11C6F ; Bhaiksuki 0x11C70, # .. 0x11CBF ; Marchen 0x11CC0, # .. 0x11CFF ; No_Block 0x11D00, # .. 0x11D5F ; Masaram Gondi 0x11D60, # .. 0x11DAF ; Gunjala Gondi 0x11DB0, # .. 0x11EDF ; No_Block 0x11EE0, # .. 0x11EFF ; Makasar 0x11F00, # .. 0x11F5F ; Kawi 0x11F60, # .. 0x11FAF ; No_Block 0x11FB0, # .. 0x11FBF ; Lisu Supplement 0x11FC0, # .. 0x11FFF ; Tamil Supplement 0x12000, # .. 0x123FF ; Cuneiform 0x12400, # .. 0x1247F ; Cuneiform Numbers and Punctuation 0x12480, # .. 0x1254F ; Early Dynastic Cuneiform 0x12550, # .. 0x12F8F ; No_Block 0x12F90, # .. 0x12FFF ; Cypro-Minoan 0x13000, # .. 0x1342F ; Egyptian Hieroglyphs 0x13430, # .. 0x1345F ; Egyptian Hieroglyph Format Controls 0x13460, # .. 0x143FF ; No_Block 0x14400, # .. 0x1467F ; Anatolian Hieroglyphs 0x14680, # .. 0x167FF ; No_Block 0x16800, # .. 0x16A3F ; Bamum Supplement 0x16A40, # .. 0x16A6F ; Mro 0x16A70, # .. 0x16ACF ; Tangsa 0x16AD0, # .. 0x16AFF ; Bassa Vah 0x16B00, # .. 0x16B8F ; Pahawh Hmong 0x16B90, # .. 0x16E3F ; No_Block 0x16E40, # .. 0x16E9F ; Medefaidrin 0x16EA0, # .. 0x16EFF ; No_Block 0x16F00, # .. 0x16F9F ; Miao 0x16FA0, # .. 0x16FDF ; No_Block 0x16FE0, # .. 0x16FFF ; Ideographic Symbols and Punctuation 0x17000, # .. 0x187FF ; Tangut 0x18800, # .. 0x18AFF ; Tangut Components 0x18B00, # .. 0x18CFF ; Khitan Small Script 0x18D00, # .. 0x18D7F ; Tangut Supplement 0x18D80, # .. 0x1AFEF ; No_Block 0x1AFF0, # .. 0x1AFFF ; Kana Extended-B 0x1B000, # .. 0x1B0FF ; Kana Supplement 0x1B100, # .. 0x1B12F ; Kana Extended-A 0x1B130, # .. 0x1B16F ; Small Kana Extension 0x1B170, # .. 0x1B2FF ; Nushu 0x1B300, # .. 0x1BBFF ; No_Block 0x1BC00, # .. 0x1BC9F ; Duployan 0x1BCA0, # .. 0x1BCAF ; Shorthand Format Controls 0x1BCB0, # .. 0x1CEFF ; No_Block 0x1CF00, # .. 0x1CFCF ; Znamenny Musical Notation 0x1CFD0, # .. 0x1CFFF ; No_Block 0x1D000, # .. 0x1D0FF ; Byzantine Musical Symbols 0x1D100, # .. 0x1D1FF ; Musical Symbols 0x1D200, # .. 0x1D24F ; Ancient Greek Musical Notation 0x1D250, # .. 0x1D2BF ; No_Block 0x1D2C0, # .. 0x1D2DF ; Kaktovik Numerals 0x1D2E0, # .. 0x1D2FF ; Mayan Numerals 0x1D300, # .. 0x1D35F ; Tai Xuan Jing Symbols 0x1D360, # .. 0x1D37F ; Counting Rod Numerals 0x1D380, # .. 0x1D3FF ; No_Block 0x1D400, # .. 0x1D7FF ; Mathematical Alphanumeric Symbols 0x1D800, # .. 0x1DAAF ; Sutton SignWriting 0x1DAB0, # .. 0x1DEFF ; No_Block 0x1DF00, # .. 0x1DFFF ; Latin Extended-G 0x1E000, # .. 0x1E02F ; Glagolitic Supplement 0x1E030, # .. 0x1E08F ; Cyrillic Extended-D 0x1E090, # .. 0x1E0FF ; No_Block 0x1E100, # .. 0x1E14F ; Nyiakeng Puachue Hmong 0x1E150, # .. 0x1E28F ; No_Block 0x1E290, # .. 0x1E2BF ; Toto 0x1E2C0, # .. 0x1E2FF ; Wancho 0x1E300, # .. 0x1E4CF ; No_Block 0x1E4D0, # .. 0x1E4FF ; Nag Mundari 0x1E500, # .. 0x1E7DF ; No_Block 0x1E7E0, # .. 0x1E7FF ; Ethiopic Extended-B 0x1E800, # .. 0x1E8DF ; Mende Kikakui 0x1E8E0, # .. 0x1E8FF ; No_Block 0x1E900, # .. 0x1E95F ; Adlam 0x1E960, # .. 0x1EC6F ; No_Block 0x1EC70, # .. 0x1ECBF ; Indic Siyaq Numbers 0x1ECC0, # .. 0x1ECFF ; No_Block 0x1ED00, # .. 0x1ED4F ; Ottoman Siyaq Numbers 0x1ED50, # .. 0x1EDFF ; No_Block 0x1EE00, # .. 0x1EEFF ; Arabic Mathematical Alphabetic Symbols 0x1EF00, # .. 0x1EFFF ; No_Block 0x1F000, # .. 0x1F02F ; Mahjong Tiles 0x1F030, # .. 0x1F09F ; Domino Tiles 0x1F0A0, # .. 0x1F0FF ; Playing Cards 0x1F100, # .. 0x1F1FF ; Enclosed Alphanumeric Supplement 0x1F200, # .. 0x1F2FF ; Enclosed Ideographic Supplement 0x1F300, # .. 0x1F5FF ; Miscellaneous Symbols and Pictographs 0x1F600, # .. 0x1F64F ; Emoticons 0x1F650, # .. 0x1F67F ; Ornamental Dingbats 0x1F680, # .. 0x1F6FF ; Transport and Map Symbols 0x1F700, # .. 0x1F77F ; Alchemical Symbols 0x1F780, # .. 0x1F7FF ; Geometric Shapes Extended 0x1F800, # .. 0x1F8FF ; Supplemental Arrows-C 0x1F900, # .. 0x1F9FF ; Supplemental Symbols and Pictographs 0x1FA00, # .. 0x1FA6F ; Chess Symbols 0x1FA70, # .. 0x1FAFF ; Symbols and Pictographs Extended-A 0x1FB00, # .. 0x1FBFF ; Symbols for Legacy Computing 0x1FC00, # .. 0x1FFFF ; No_Block 0x20000, # .. 0x2A6DF ; CJK Unified Ideographs Extension B 0x2A6E0, # .. 0x2A6FF ; No_Block 0x2A700, # .. 0x2B73F ; CJK Unified Ideographs Extension C 0x2B740, # .. 0x2B81F ; CJK Unified Ideographs Extension D 0x2B820, # .. 0x2CEAF ; CJK Unified Ideographs Extension E 0x2CEB0, # .. 0x2EBEF ; CJK Unified Ideographs Extension F 0x2EBF0, # .. 0x2F7FF ; No_Block 0x2F800, # .. 0x2FA1F ; CJK Compatibility Ideographs Supplement 0x2FA20, # .. 0x2FFFF ; No_Block 0x30000, # .. 0x3134F ; CJK Unified Ideographs Extension G 0x31350, # .. 0x323AF ; CJK Unified Ideographs Extension H 0x323B0, # .. 0xDFFFF ; No_Block 0xE0000, # .. 0xE007F ; Tags 0xE0080, # .. 0xE00FF ; No_Block 0xE0100, # .. 0xE01EF ; Variation Selectors Supplement 0xE01F0, # .. 0xEFFFF ; No_Block 0xF0000, # .. 0xFFFFF ; Supplementary Private Use Area-A 0x100000, # .. 0x10FFFF ; Supplementary Private Use Area-B ] VALUES = [ "Basic Latin", # 0000..007F "Latin-1 Supplement", # 0080..00FF "Latin Extended-A", # 0100..017F "Latin Extended-B", # 0180..024F "IPA Extensions", # 0250..02AF "Spacing Modifier Letters", # 02B0..02FF "Combining Diacritical Marks", # 0300..036F "Greek and Coptic", # 0370..03FF "Cyrillic", # 0400..04FF "Cyrillic Supplement", # 0500..052F "Armenian", # 0530..058F "Hebrew", # 0590..05FF "Arabic", # 0600..06FF "Syriac", # 0700..074F "Arabic Supplement", # 0750..077F "Thaana", # 0780..07BF "NKo", # 07C0..07FF "Samaritan", # 0800..083F "Mandaic", # 0840..085F "Syriac Supplement", # 0860..086F "Arabic Extended-B", # 0870..089F "Arabic Extended-A", # 08A0..08FF "Devanagari", # 0900..097F "Bengali", # 0980..09FF "Gurmukhi", # 0A00..0A7F "Gujarati", # 0A80..0AFF "Oriya", # 0B00..0B7F "Tamil", # 0B80..0BFF "Telugu", # 0C00..0C7F "Kannada", # 0C80..0CFF "Malayalam", # 0D00..0D7F "Sinhala", # 0D80..0DFF "Thai", # 0E00..0E7F "Lao", # 0E80..0EFF "Tibetan", # 0F00..0FFF "Myanmar", # 1000..109F "Georgian", # 10A0..10FF "Hangul Jamo", # 1100..11FF "Ethiopic", # 1200..137F "Ethiopic Supplement", # 1380..139F "Cherokee", # 13A0..13FF "Unified Canadian Aboriginal Syllabics", # 1400..167F "Ogham", # 1680..169F "Runic", # 16A0..16FF "Tagalog", # 1700..171F "Hanunoo", # 1720..173F "Buhid", # 1740..175F "Tagbanwa", # 1760..177F "Khmer", # 1780..17FF "Mongolian", # 1800..18AF "Unified Canadian Aboriginal Syllabics Extended", # 18B0..18FF "Limbu", # 1900..194F "Tai Le", # 1950..197F "New Tai Lue", # 1980..19DF "Khmer Symbols", # 19E0..19FF "Buginese", # 1A00..1A1F "Tai Tham", # 1A20..1AAF "Combining Diacritical Marks Extended", # 1AB0..1AFF "Balinese", # 1B00..1B7F "Sundanese", # 1B80..1BBF "Batak", # 1BC0..1BFF "Lepcha", # 1C00..1C4F "Ol Chiki", # 1C50..1C7F "Cyrillic Extended-C", # 1C80..1C8F "Georgian Extended", # 1C90..1CBF "Sundanese Supplement", # 1CC0..1CCF "Vedic Extensions", # 1CD0..1CFF "Phonetic Extensions", # 1D00..1D7F "Phonetic Extensions Supplement", # 1D80..1DBF "Combining Diacritical Marks Supplement", # 1DC0..1DFF "Latin Extended Additional", # 1E00..1EFF "Greek Extended", # 1F00..1FFF "General Punctuation", # 2000..206F "Superscripts and Subscripts", # 2070..209F "Currency Symbols", # 20A0..20CF "Combining Diacritical Marks for Symbols", # 20D0..20FF "Letterlike Symbols", # 2100..214F "Number Forms", # 2150..218F "Arrows", # 2190..21FF "Mathematical Operators", # 2200..22FF "Miscellaneous Technical", # 2300..23FF "Control Pictures", # 2400..243F "Optical Character Recognition", # 2440..245F "Enclosed Alphanumerics", # 2460..24FF "Box Drawing", # 2500..257F "Block Elements", # 2580..259F "Geometric Shapes", # 25A0..25FF "Miscellaneous Symbols", # 2600..26FF "Dingbats", # 2700..27BF "Miscellaneous Mathematical Symbols-A", # 27C0..27EF "Supplemental Arrows-A", # 27F0..27FF "Braille Patterns", # 2800..28FF "Supplemental Arrows-B", # 2900..297F "Miscellaneous Mathematical Symbols-B", # 2980..29FF "Supplemental Mathematical Operators", # 2A00..2AFF "Miscellaneous Symbols and Arrows", # 2B00..2BFF "Glagolitic", # 2C00..2C5F "Latin Extended-C", # 2C60..2C7F "Coptic", # 2C80..2CFF "Georgian Supplement", # 2D00..2D2F "Tifinagh", # 2D30..2D7F "Ethiopic Extended", # 2D80..2DDF "Cyrillic Extended-A", # 2DE0..2DFF "Supplemental Punctuation", # 2E00..2E7F "CJK Radicals Supplement", # 2E80..2EFF "Kangxi Radicals", # 2F00..2FDF "No_Block", # 2FE0..2FEF "Ideographic Description Characters", # 2FF0..2FFF "CJK Symbols and Punctuation", # 3000..303F "Hiragana", # 3040..309F "Katakana", # 30A0..30FF "Bopomofo", # 3100..312F "Hangul Compatibility Jamo", # 3130..318F "Kanbun", # 3190..319F "Bopomofo Extended", # 31A0..31BF "CJK Strokes", # 31C0..31EF "Katakana Phonetic Extensions", # 31F0..31FF "Enclosed CJK Letters and Months", # 3200..32FF "CJK Compatibility", # 3300..33FF "CJK Unified Ideographs Extension A", # 3400..4DBF "Yijing Hexagram Symbols", # 4DC0..4DFF "CJK Unified Ideographs", # 4E00..9FFF "Yi Syllables", # A000..A48F "Yi Radicals", # A490..A4CF "Lisu", # A4D0..A4FF "Vai", # A500..A63F "Cyrillic Extended-B", # A640..A69F "Bamum", # A6A0..A6FF "Modifier Tone Letters", # A700..A71F "Latin Extended-D", # A720..A7FF "Syloti Nagri", # A800..A82F "Common Indic Number Forms", # A830..A83F "Phags-pa", # A840..A87F "Saurashtra", # A880..A8DF "Devanagari Extended", # A8E0..A8FF "Kayah Li", # A900..A92F "Rejang", # A930..A95F "Hangul Jamo Extended-A", # A960..A97F "Javanese", # A980..A9DF "Myanmar Extended-B", # A9E0..A9FF "Cham", # AA00..AA5F "Myanmar Extended-A", # AA60..AA7F "Tai Viet", # AA80..AADF "Meetei Mayek Extensions", # AAE0..AAFF "Ethiopic Extended-A", # AB00..AB2F "Latin Extended-E", # AB30..AB6F "Cherokee Supplement", # AB70..ABBF "Meetei Mayek", # ABC0..ABFF "Hangul Syllables", # AC00..D7AF "Hangul Jamo Extended-B", # D7B0..D7FF "High Surrogates", # D800..DB7F "High Private Use Surrogates", # DB80..DBFF "Low Surrogates", # DC00..DFFF "Private Use Area", # E000..F8FF "CJK Compatibility Ideographs", # F900..FAFF "Alphabetic Presentation Forms", # FB00..FB4F "Arabic Presentation Forms-A", # FB50..FDFF "Variation Selectors", # FE00..FE0F "Vertical Forms", # FE10..FE1F "Combining Half Marks", # FE20..FE2F "CJK Compatibility Forms", # FE30..FE4F "Small Form Variants", # FE50..FE6F "Arabic Presentation Forms-B", # FE70..FEFF "Halfwidth and Fullwidth Forms", # FF00..FFEF "Specials", # FFF0..FFFF "Linear B Syllabary", # 10000..1007F "Linear B Ideograms", # 10080..100FF "Aegean Numbers", # 10100..1013F "Ancient Greek Numbers", # 10140..1018F "Ancient Symbols", # 10190..101CF "Phaistos Disc", # 101D0..101FF "No_Block", # 10200..1027F "Lycian", # 10280..1029F "Carian", # 102A0..102DF "Coptic Epact Numbers", # 102E0..102FF "Old Italic", # 10300..1032F "Gothic", # 10330..1034F "Old Permic", # 10350..1037F "Ugaritic", # 10380..1039F "Old Persian", # 103A0..103DF "No_Block", # 103E0..103FF "Deseret", # 10400..1044F "Shavian", # 10450..1047F "Osmanya", # 10480..104AF "Osage", # 104B0..104FF "Elbasan", # 10500..1052F "Caucasian Albanian", # 10530..1056F "Vithkuqi", # 10570..105BF "No_Block", # 105C0..105FF "Linear A", # 10600..1077F "Latin Extended-F", # 10780..107BF "No_Block", # 107C0..107FF "Cypriot Syllabary", # 10800..1083F "Imperial Aramaic", # 10840..1085F "Palmyrene", # 10860..1087F "Nabataean", # 10880..108AF "No_Block", # 108B0..108DF "Hatran", # 108E0..108FF "Phoenician", # 10900..1091F "Lydian", # 10920..1093F "No_Block", # 10940..1097F "Meroitic Hieroglyphs", # 10980..1099F "Meroitic Cursive", # 109A0..109FF "Kharoshthi", # 10A00..10A5F "Old South Arabian", # 10A60..10A7F "Old North Arabian", # 10A80..10A9F "No_Block", # 10AA0..10ABF "Manichaean", # 10AC0..10AFF "Avestan", # 10B00..10B3F "Inscriptional Parthian", # 10B40..10B5F "Inscriptional Pahlavi", # 10B60..10B7F "Psalter Pahlavi", # 10B80..10BAF "No_Block", # 10BB0..10BFF "Old Turkic", # 10C00..10C4F "No_Block", # 10C50..10C7F "Old Hungarian", # 10C80..10CFF "Hanifi Rohingya", # 10D00..10D3F "No_Block", # 10D40..10E5F "Rumi Numeral Symbols", # 10E60..10E7F "Yezidi", # 10E80..10EBF "Arabic Extended-C", # 10EC0..10EFF "Old Sogdian", # 10F00..10F2F "Sogdian", # 10F30..10F6F "Old Uyghur", # 10F70..10FAF "Chorasmian", # 10FB0..10FDF "Elymaic", # 10FE0..10FFF "Brahmi", # 11000..1107F "Kaithi", # 11080..110CF "Sora Sompeng", # 110D0..110FF "Chakma", # 11100..1114F "Mahajani", # 11150..1117F "Sharada", # 11180..111DF "Sinhala Archaic Numbers", # 111E0..111FF "Khojki", # 11200..1124F "No_Block", # 11250..1127F "Multani", # 11280..112AF "Khudawadi", # 112B0..112FF "Grantha", # 11300..1137F "No_Block", # 11380..113FF "Newa", # 11400..1147F "Tirhuta", # 11480..114DF "No_Block", # 114E0..1157F "Siddham", # 11580..115FF "Modi", # 11600..1165F "Mongolian Supplement", # 11660..1167F "Takri", # 11680..116CF "No_Block", # 116D0..116FF "Ahom", # 11700..1174F "No_Block", # 11750..117FF "Dogra", # 11800..1184F "No_Block", # 11850..1189F "Warang Citi", # 118A0..118FF "Dives Akuru", # 11900..1195F "No_Block", # 11960..1199F "Nandinagari", # 119A0..119FF "Zanabazar Square", # 11A00..11A4F "Soyombo", # 11A50..11AAF "Unified Canadian Aboriginal Syllabics Extended-A", # 11AB0..11ABF "Pau Cin Hau", # 11AC0..11AFF "Devanagari Extended-A", # 11B00..11B5F "No_Block", # 11B60..11BFF "Bhaiksuki", # 11C00..11C6F "Marchen", # 11C70..11CBF "No_Block", # 11CC0..11CFF "Masaram Gondi", # 11D00..11D5F "Gunjala Gondi", # 11D60..11DAF "No_Block", # 11DB0..11EDF "Makasar", # 11EE0..11EFF "Kawi", # 11F00..11F5F "No_Block", # 11F60..11FAF "Lisu Supplement", # 11FB0..11FBF "Tamil Supplement", # 11FC0..11FFF "Cuneiform", # 12000..123FF "Cuneiform Numbers and Punctuation", # 12400..1247F "Early Dynastic Cuneiform", # 12480..1254F "No_Block", # 12550..12F8F "Cypro-Minoan", # 12F90..12FFF "Egyptian Hieroglyphs", # 13000..1342F "Egyptian Hieroglyph Format Controls", # 13430..1345F "No_Block", # 13460..143FF "Anatolian Hieroglyphs", # 14400..1467F "No_Block", # 14680..167FF "Bamum Supplement", # 16800..16A3F "Mro", # 16A40..16A6F "Tangsa", # 16A70..16ACF "Bassa Vah", # 16AD0..16AFF "Pahawh Hmong", # 16B00..16B8F "No_Block", # 16B90..16E3F "Medefaidrin", # 16E40..16E9F "No_Block", # 16EA0..16EFF "Miao", # 16F00..16F9F "No_Block", # 16FA0..16FDF "Ideographic Symbols and Punctuation", # 16FE0..16FFF "Tangut", # 17000..187FF "Tangut Components", # 18800..18AFF "Khitan Small Script", # 18B00..18CFF "Tangut Supplement", # 18D00..18D7F "No_Block", # 18D80..1AFEF "Kana Extended-B", # 1AFF0..1AFFF "Kana Supplement", # 1B000..1B0FF "Kana Extended-A", # 1B100..1B12F "Small Kana Extension", # 1B130..1B16F "Nushu", # 1B170..1B2FF "No_Block", # 1B300..1BBFF "Duployan", # 1BC00..1BC9F "Shorthand Format Controls", # 1BCA0..1BCAF "No_Block", # 1BCB0..1CEFF "Znamenny Musical Notation", # 1CF00..1CFCF "No_Block", # 1CFD0..1CFFF "Byzantine Musical Symbols", # 1D000..1D0FF "Musical Symbols", # 1D100..1D1FF "Ancient Greek Musical Notation", # 1D200..1D24F "No_Block", # 1D250..1D2BF "Kaktovik Numerals", # 1D2C0..1D2DF "Mayan Numerals", # 1D2E0..1D2FF "Tai Xuan Jing Symbols", # 1D300..1D35F "Counting Rod Numerals", # 1D360..1D37F "No_Block", # 1D380..1D3FF "Mathematical Alphanumeric Symbols", # 1D400..1D7FF "Sutton SignWriting", # 1D800..1DAAF "No_Block", # 1DAB0..1DEFF "Latin Extended-G", # 1DF00..1DFFF "Glagolitic Supplement", # 1E000..1E02F "Cyrillic Extended-D", # 1E030..1E08F "No_Block", # 1E090..1E0FF "Nyiakeng Puachue Hmong", # 1E100..1E14F "No_Block", # 1E150..1E28F "Toto", # 1E290..1E2BF "Wancho", # 1E2C0..1E2FF "No_Block", # 1E300..1E4CF "Nag Mundari", # 1E4D0..1E4FF "No_Block", # 1E500..1E7DF "Ethiopic Extended-B", # 1E7E0..1E7FF "Mende Kikakui", # 1E800..1E8DF "No_Block", # 1E8E0..1E8FF "Adlam", # 1E900..1E95F "No_Block", # 1E960..1EC6F "Indic Siyaq Numbers", # 1EC70..1ECBF "No_Block", # 1ECC0..1ECFF "Ottoman Siyaq Numbers", # 1ED00..1ED4F "No_Block", # 1ED50..1EDFF "Arabic Mathematical Alphabetic Symbols", # 1EE00..1EEFF "No_Block", # 1EF00..1EFFF "Mahjong Tiles", # 1F000..1F02F "Domino Tiles", # 1F030..1F09F "Playing Cards", # 1F0A0..1F0FF "Enclosed Alphanumeric Supplement", # 1F100..1F1FF "Enclosed Ideographic Supplement", # 1F200..1F2FF "Miscellaneous Symbols and Pictographs", # 1F300..1F5FF "Emoticons", # 1F600..1F64F "Ornamental Dingbats", # 1F650..1F67F "Transport and Map Symbols", # 1F680..1F6FF "Alchemical Symbols", # 1F700..1F77F "Geometric Shapes Extended", # 1F780..1F7FF "Supplemental Arrows-C", # 1F800..1F8FF "Supplemental Symbols and Pictographs", # 1F900..1F9FF "Chess Symbols", # 1FA00..1FA6F "Symbols and Pictographs Extended-A", # 1FA70..1FAFF "Symbols for Legacy Computing", # 1FB00..1FBFF "No_Block", # 1FC00..1FFFF "CJK Unified Ideographs Extension B", # 20000..2A6DF "No_Block", # 2A6E0..2A6FF "CJK Unified Ideographs Extension C", # 2A700..2B73F "CJK Unified Ideographs Extension D", # 2B740..2B81F "CJK Unified Ideographs Extension E", # 2B820..2CEAF "CJK Unified Ideographs Extension F", # 2CEB0..2EBEF "No_Block", # 2EBF0..2F7FF "CJK Compatibility Ideographs Supplement", # 2F800..2FA1F "No_Block", # 2FA20..2FFFF "CJK Unified Ideographs Extension G", # 30000..3134F "CJK Unified Ideographs Extension H", # 31350..323AF "No_Block", # 323B0..DFFFF "Tags", # E0000..E007F "No_Block", # E0080..E00FF "Variation Selectors Supplement", # E0100..E01EF "No_Block", # E01F0..EFFFF "Supplementary Private Use Area-A", # F0000..FFFFF "Supplementary Private Use Area-B", # 100000..10FFFF ] PKaZZZR�~��fontTools/unicodedata/OTTags.py# Data updated to OpenType 1.8.2 as of January 2018. # Complete list of OpenType script tags at: # https://www.microsoft.com/typography/otspec/scripttags.htm # Most of the script tags are the same as the ISO 15924 tag but lowercased, # so we only have to handle the exceptional cases: # - KATAKANA and HIRAGANA both map to 'kana'; # - spaces at the end are preserved, unlike ISO 15924; # - we map special script codes for Inherited, Common and Unknown to DFLT. DEFAULT_SCRIPT = "DFLT" SCRIPT_ALIASES = { "jamo": "hang", } SCRIPT_EXCEPTIONS = { "Hira": "kana", "Hrkt": "kana", "Laoo": "lao ", "Yiii": "yi ", "Nkoo": "nko ", "Vaii": "vai ", "Zmth": "math", "Zinh": DEFAULT_SCRIPT, "Zyyy": DEFAULT_SCRIPT, "Zzzz": DEFAULT_SCRIPT, } SCRIPT_EXCEPTIONS_REVERSED = { "math": "Zmth", } NEW_SCRIPT_TAGS = { "Beng": ("bng2",), "Deva": ("dev2",), "Gujr": ("gjr2",), "Guru": ("gur2",), "Knda": ("knd2",), "Mlym": ("mlm2",), "Orya": ("ory2",), "Taml": ("tml2",), "Telu": ("tel2",), "Mymr": ("mym2",), } NEW_SCRIPT_TAGS_REVERSED = { value: key for key, values in NEW_SCRIPT_TAGS.items() for value in values } PKaZZZ c_��K�K)fontTools/unicodedata/ScriptExtensions.py# -*- coding: utf-8 -*- # # NOTE: This file was auto-generated with MetaTools/buildUCD.py. # Source: https://unicode.org/Public/UNIDATA/ScriptExtensions.txt # License: http://unicode.org/copyright.html#License # # ScriptExtensions-15.0.0.txt # Date: 2022-02-02, 00:57:11 GMT # © 2022 Unicode®, Inc. # Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. # For terms of use, see https://www.unicode.org/terms_of_use.html # # Unicode Character Database # For documentation, see https://www.unicode.org/reports/tr44/ # # The Script_Extensions property indicates which characters are commonly used # with more than one script, but with a limited number of scripts. # For each code point, there is one or more property values. Each such value is a Script property value. # For more information, see: # UAX #24, Unicode Script Property: https://www.unicode.org/reports/tr24/ # Especially the sections: # https://www.unicode.org/reports/tr24/#Assignment_Script_Values # https://www.unicode.org/reports/tr24/#Assignment_ScriptX_Values # # Each Script_Extensions value in this file consists of a set # of one or more abbreviated Script property values. The ordering of the # values in that set is not material, but for stability in presentation # it is given here as alphabetical. # # The Script_Extensions values are presented in sorted order in the file. # They are sorted first by the number of Script property values in their sets, # and then alphabetically by first differing Script property value. # # Following each distinct Script_Extensions value is the list of code # points associated with that value, listed in code point order. # # All code points not explicitly listed for Script_Extensions # have as their value the corresponding Script property value # # @missing: 0000..10FFFF; <script> RANGES = [ 0x0000, # .. 0x0341 ; None 0x0342, # .. 0x0342 ; {'Grek'} 0x0343, # .. 0x0344 ; None 0x0345, # .. 0x0345 ; {'Grek'} 0x0346, # .. 0x0362 ; None 0x0363, # .. 0x036F ; {'Latn'} 0x0370, # .. 0x0482 ; None 0x0483, # .. 0x0483 ; {'Cyrl', 'Perm'} 0x0484, # .. 0x0484 ; {'Cyrl', 'Glag'} 0x0485, # .. 0x0486 ; {'Cyrl', 'Latn'} 0x0487, # .. 0x0487 ; {'Cyrl', 'Glag'} 0x0488, # .. 0x060B ; None 0x060C, # .. 0x060C ; {'Arab', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'} 0x060D, # .. 0x061A ; None 0x061B, # .. 0x061B ; {'Arab', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'} 0x061C, # .. 0x061C ; {'Arab', 'Syrc', 'Thaa'} 0x061D, # .. 0x061E ; None 0x061F, # .. 0x061F ; {'Adlm', 'Arab', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'} 0x0620, # .. 0x063F ; None 0x0640, # .. 0x0640 ; {'Adlm', 'Arab', 'Mand', 'Mani', 'Ougr', 'Phlp', 'Rohg', 'Sogd', 'Syrc'} 0x0641, # .. 0x064A ; None 0x064B, # .. 0x0655 ; {'Arab', 'Syrc'} 0x0656, # .. 0x065F ; None 0x0660, # .. 0x0669 ; {'Arab', 'Thaa', 'Yezi'} 0x066A, # .. 0x066F ; None 0x0670, # .. 0x0670 ; {'Arab', 'Syrc'} 0x0671, # .. 0x06D3 ; None 0x06D4, # .. 0x06D4 ; {'Arab', 'Rohg'} 0x06D5, # .. 0x0950 ; None 0x0951, # .. 0x0951 ; {'Beng', 'Deva', 'Gran', 'Gujr', 'Guru', 'Knda', 'Latn', 'Mlym', 'Orya', 'Shrd', 'Taml', 'Telu', 'Tirh'} 0x0952, # .. 0x0952 ; {'Beng', 'Deva', 'Gran', 'Gujr', 'Guru', 'Knda', 'Latn', 'Mlym', 'Orya', 'Taml', 'Telu', 'Tirh'} 0x0953, # .. 0x0963 ; None 0x0964, # .. 0x0964 ; {'Beng', 'Deva', 'Dogr', 'Gong', 'Gonm', 'Gran', 'Gujr', 'Guru', 'Knda', 'Mahj', 'Mlym', 'Nand', 'Orya', 'Sind', 'Sinh', 'Sylo', 'Takr', 'Taml', 'Telu', 'Tirh'} 0x0965, # .. 0x0965 ; {'Beng', 'Deva', 'Dogr', 'Gong', 'Gonm', 'Gran', 'Gujr', 'Guru', 'Knda', 'Limb', 'Mahj', 'Mlym', 'Nand', 'Orya', 'Sind', 'Sinh', 'Sylo', 'Takr', 'Taml', 'Telu', 'Tirh'} 0x0966, # .. 0x096F ; {'Deva', 'Dogr', 'Kthi', 'Mahj'} 0x0970, # .. 0x09E5 ; None 0x09E6, # .. 0x09EF ; {'Beng', 'Cakm', 'Sylo'} 0x09F0, # .. 0x0A65 ; None 0x0A66, # .. 0x0A6F ; {'Guru', 'Mult'} 0x0A70, # .. 0x0AE5 ; None 0x0AE6, # .. 0x0AEF ; {'Gujr', 'Khoj'} 0x0AF0, # .. 0x0BE5 ; None 0x0BE6, # .. 0x0BF3 ; {'Gran', 'Taml'} 0x0BF4, # .. 0x0CE5 ; None 0x0CE6, # .. 0x0CEF ; {'Knda', 'Nand'} 0x0CF0, # .. 0x103F ; None 0x1040, # .. 0x1049 ; {'Cakm', 'Mymr', 'Tale'} 0x104A, # .. 0x10FA ; None 0x10FB, # .. 0x10FB ; {'Geor', 'Latn'} 0x10FC, # .. 0x1734 ; None 0x1735, # .. 0x1736 ; {'Buhd', 'Hano', 'Tagb', 'Tglg'} 0x1737, # .. 0x1801 ; None 0x1802, # .. 0x1803 ; {'Mong', 'Phag'} 0x1804, # .. 0x1804 ; None 0x1805, # .. 0x1805 ; {'Mong', 'Phag'} 0x1806, # .. 0x1CCF ; None 0x1CD0, # .. 0x1CD0 ; {'Beng', 'Deva', 'Gran', 'Knda'} 0x1CD1, # .. 0x1CD1 ; {'Deva'} 0x1CD2, # .. 0x1CD2 ; {'Beng', 'Deva', 'Gran', 'Knda'} 0x1CD3, # .. 0x1CD3 ; {'Deva', 'Gran'} 0x1CD4, # .. 0x1CD4 ; {'Deva'} 0x1CD5, # .. 0x1CD6 ; {'Beng', 'Deva'} 0x1CD7, # .. 0x1CD7 ; {'Deva', 'Shrd'} 0x1CD8, # .. 0x1CD8 ; {'Beng', 'Deva'} 0x1CD9, # .. 0x1CD9 ; {'Deva', 'Shrd'} 0x1CDA, # .. 0x1CDA ; {'Deva', 'Knda', 'Mlym', 'Orya', 'Taml', 'Telu'} 0x1CDB, # .. 0x1CDB ; {'Deva'} 0x1CDC, # .. 0x1CDD ; {'Deva', 'Shrd'} 0x1CDE, # .. 0x1CDF ; {'Deva'} 0x1CE0, # .. 0x1CE0 ; {'Deva', 'Shrd'} 0x1CE1, # .. 0x1CE1 ; {'Beng', 'Deva'} 0x1CE2, # .. 0x1CE8 ; {'Deva'} 0x1CE9, # .. 0x1CE9 ; {'Deva', 'Nand'} 0x1CEA, # .. 0x1CEA ; {'Beng', 'Deva'} 0x1CEB, # .. 0x1CEC ; {'Deva'} 0x1CED, # .. 0x1CED ; {'Beng', 'Deva'} 0x1CEE, # .. 0x1CF1 ; {'Deva'} 0x1CF2, # .. 0x1CF2 ; {'Beng', 'Deva', 'Gran', 'Knda', 'Nand', 'Orya', 'Telu', 'Tirh'} 0x1CF3, # .. 0x1CF3 ; {'Deva', 'Gran'} 0x1CF4, # .. 0x1CF4 ; {'Deva', 'Gran', 'Knda'} 0x1CF5, # .. 0x1CF6 ; {'Beng', 'Deva'} 0x1CF7, # .. 0x1CF7 ; {'Beng'} 0x1CF8, # .. 0x1CF9 ; {'Deva', 'Gran'} 0x1CFA, # .. 0x1CFA ; {'Nand'} 0x1CFB, # .. 0x1DBF ; None 0x1DC0, # .. 0x1DC1 ; {'Grek'} 0x1DC2, # .. 0x1DF7 ; None 0x1DF8, # .. 0x1DF8 ; {'Cyrl', 'Syrc'} 0x1DF9, # .. 0x1DF9 ; None 0x1DFA, # .. 0x1DFA ; {'Syrc'} 0x1DFB, # .. 0x202E ; None 0x202F, # .. 0x202F ; {'Latn', 'Mong'} 0x2030, # .. 0x20EF ; None 0x20F0, # .. 0x20F0 ; {'Deva', 'Gran', 'Latn'} 0x20F1, # .. 0x2E42 ; None 0x2E43, # .. 0x2E43 ; {'Cyrl', 'Glag'} 0x2E44, # .. 0x3000 ; None 0x3001, # .. 0x3002 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'} 0x3003, # .. 0x3003 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'} 0x3004, # .. 0x3005 ; None 0x3006, # .. 0x3006 ; {'Hani'} 0x3007, # .. 0x3007 ; None 0x3008, # .. 0x3011 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'} 0x3012, # .. 0x3012 ; None 0x3013, # .. 0x3013 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'} 0x3014, # .. 0x301B ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'} 0x301C, # .. 0x301F ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'} 0x3020, # .. 0x3029 ; None 0x302A, # .. 0x302D ; {'Bopo', 'Hani'} 0x302E, # .. 0x302F ; None 0x3030, # .. 0x3030 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'} 0x3031, # .. 0x3035 ; {'Hira', 'Kana'} 0x3036, # .. 0x3036 ; None 0x3037, # .. 0x3037 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'} 0x3038, # .. 0x303B ; None 0x303C, # .. 0x303D ; {'Hani', 'Hira', 'Kana'} 0x303E, # .. 0x303F ; {'Hani'} 0x3040, # .. 0x3098 ; None 0x3099, # .. 0x309C ; {'Hira', 'Kana'} 0x309D, # .. 0x309F ; None 0x30A0, # .. 0x30A0 ; {'Hira', 'Kana'} 0x30A1, # .. 0x30FA ; None 0x30FB, # .. 0x30FB ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'} 0x30FC, # .. 0x30FC ; {'Hira', 'Kana'} 0x30FD, # .. 0x318F ; None 0x3190, # .. 0x319F ; {'Hani'} 0x31A0, # .. 0x31BF ; None 0x31C0, # .. 0x31E3 ; {'Hani'} 0x31E4, # .. 0x321F ; None 0x3220, # .. 0x3247 ; {'Hani'} 0x3248, # .. 0x327F ; None 0x3280, # .. 0x32B0 ; {'Hani'} 0x32B1, # .. 0x32BF ; None 0x32C0, # .. 0x32CB ; {'Hani'} 0x32CC, # .. 0x32FE ; None 0x32FF, # .. 0x32FF ; {'Hani'} 0x3300, # .. 0x3357 ; None 0x3358, # .. 0x3370 ; {'Hani'} 0x3371, # .. 0x337A ; None 0x337B, # .. 0x337F ; {'Hani'} 0x3380, # .. 0x33DF ; None 0x33E0, # .. 0x33FE ; {'Hani'} 0x33FF, # .. 0xA66E ; None 0xA66F, # .. 0xA66F ; {'Cyrl', 'Glag'} 0xA670, # .. 0xA6FF ; None 0xA700, # .. 0xA707 ; {'Hani', 'Latn'} 0xA708, # .. 0xA82F ; None 0xA830, # .. 0xA832 ; {'Deva', 'Dogr', 'Gujr', 'Guru', 'Khoj', 'Knda', 'Kthi', 'Mahj', 'Mlym', 'Modi', 'Nand', 'Sind', 'Takr', 'Tirh'} 0xA833, # .. 0xA835 ; {'Deva', 'Dogr', 'Gujr', 'Guru', 'Khoj', 'Knda', 'Kthi', 'Mahj', 'Modi', 'Nand', 'Sind', 'Takr', 'Tirh'} 0xA836, # .. 0xA839 ; {'Deva', 'Dogr', 'Gujr', 'Guru', 'Khoj', 'Kthi', 'Mahj', 'Modi', 'Sind', 'Takr', 'Tirh'} 0xA83A, # .. 0xA8F0 ; None 0xA8F1, # .. 0xA8F1 ; {'Beng', 'Deva'} 0xA8F2, # .. 0xA8F2 ; None 0xA8F3, # .. 0xA8F3 ; {'Deva', 'Taml'} 0xA8F4, # .. 0xA92D ; None 0xA92E, # .. 0xA92E ; {'Kali', 'Latn', 'Mymr'} 0xA92F, # .. 0xA9CE ; None 0xA9CF, # .. 0xA9CF ; {'Bugi', 'Java'} 0xA9D0, # .. 0xFD3D ; None 0xFD3E, # .. 0xFD3F ; {'Arab', 'Nkoo'} 0xFD40, # .. 0xFDF1 ; None 0xFDF2, # .. 0xFDF2 ; {'Arab', 'Thaa'} 0xFDF3, # .. 0xFDFC ; None 0xFDFD, # .. 0xFDFD ; {'Arab', 'Thaa'} 0xFDFE, # .. 0xFE44 ; None 0xFE45, # .. 0xFE46 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'} 0xFE47, # .. 0xFF60 ; None 0xFF61, # .. 0xFF65 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'} 0xFF66, # .. 0xFF6F ; None 0xFF70, # .. 0xFF70 ; {'Hira', 'Kana'} 0xFF71, # .. 0xFF9D ; None 0xFF9E, # .. 0xFF9F ; {'Hira', 'Kana'} 0xFFA0, # .. 0x100FF ; None 0x10100, # .. 0x10101 ; {'Cpmn', 'Cprt', 'Linb'} 0x10102, # .. 0x10102 ; {'Cprt', 'Linb'} 0x10103, # .. 0x10106 ; None 0x10107, # .. 0x10133 ; {'Cprt', 'Lina', 'Linb'} 0x10134, # .. 0x10136 ; None 0x10137, # .. 0x1013F ; {'Cprt', 'Linb'} 0x10140, # .. 0x102DF ; None 0x102E0, # .. 0x102FB ; {'Arab', 'Copt'} 0x102FC, # .. 0x10AF1 ; None 0x10AF2, # .. 0x10AF2 ; {'Mani', 'Ougr'} 0x10AF3, # .. 0x11300 ; None 0x11301, # .. 0x11301 ; {'Gran', 'Taml'} 0x11302, # .. 0x11302 ; None 0x11303, # .. 0x11303 ; {'Gran', 'Taml'} 0x11304, # .. 0x1133A ; None 0x1133B, # .. 0x1133C ; {'Gran', 'Taml'} 0x1133D, # .. 0x11FCF ; None 0x11FD0, # .. 0x11FD1 ; {'Gran', 'Taml'} 0x11FD2, # .. 0x11FD2 ; None 0x11FD3, # .. 0x11FD3 ; {'Gran', 'Taml'} 0x11FD4, # .. 0x1BC9F ; None 0x1BCA0, # .. 0x1BCA3 ; {'Dupl'} 0x1BCA4, # .. 0x1D35F ; None 0x1D360, # .. 0x1D371 ; {'Hani'} 0x1D372, # .. 0x1F24F ; None 0x1F250, # .. 0x1F251 ; {'Hani'} 0x1F252, # .. 0x10FFFF ; None ] VALUES = [ None, # 0000..0341 {"Grek"}, # 0342..0342 None, # 0343..0344 {"Grek"}, # 0345..0345 None, # 0346..0362 {"Latn"}, # 0363..036F None, # 0370..0482 {"Cyrl", "Perm"}, # 0483..0483 {"Cyrl", "Glag"}, # 0484..0484 {"Cyrl", "Latn"}, # 0485..0486 {"Cyrl", "Glag"}, # 0487..0487 None, # 0488..060B {"Arab", "Nkoo", "Rohg", "Syrc", "Thaa", "Yezi"}, # 060C..060C None, # 060D..061A {"Arab", "Nkoo", "Rohg", "Syrc", "Thaa", "Yezi"}, # 061B..061B {"Arab", "Syrc", "Thaa"}, # 061C..061C None, # 061D..061E {"Adlm", "Arab", "Nkoo", "Rohg", "Syrc", "Thaa", "Yezi"}, # 061F..061F None, # 0620..063F { "Adlm", "Arab", "Mand", "Mani", "Ougr", "Phlp", "Rohg", "Sogd", "Syrc", }, # 0640..0640 None, # 0641..064A {"Arab", "Syrc"}, # 064B..0655 None, # 0656..065F {"Arab", "Thaa", "Yezi"}, # 0660..0669 None, # 066A..066F {"Arab", "Syrc"}, # 0670..0670 None, # 0671..06D3 {"Arab", "Rohg"}, # 06D4..06D4 None, # 06D5..0950 { "Beng", "Deva", "Gran", "Gujr", "Guru", "Knda", "Latn", "Mlym", "Orya", "Shrd", "Taml", "Telu", "Tirh", }, # 0951..0951 { "Beng", "Deva", "Gran", "Gujr", "Guru", "Knda", "Latn", "Mlym", "Orya", "Taml", "Telu", "Tirh", }, # 0952..0952 None, # 0953..0963 { "Beng", "Deva", "Dogr", "Gong", "Gonm", "Gran", "Gujr", "Guru", "Knda", "Mahj", "Mlym", "Nand", "Orya", "Sind", "Sinh", "Sylo", "Takr", "Taml", "Telu", "Tirh", }, # 0964..0964 { "Beng", "Deva", "Dogr", "Gong", "Gonm", "Gran", "Gujr", "Guru", "Knda", "Limb", "Mahj", "Mlym", "Nand", "Orya", "Sind", "Sinh", "Sylo", "Takr", "Taml", "Telu", "Tirh", }, # 0965..0965 {"Deva", "Dogr", "Kthi", "Mahj"}, # 0966..096F None, # 0970..09E5 {"Beng", "Cakm", "Sylo"}, # 09E6..09EF None, # 09F0..0A65 {"Guru", "Mult"}, # 0A66..0A6F None, # 0A70..0AE5 {"Gujr", "Khoj"}, # 0AE6..0AEF None, # 0AF0..0BE5 {"Gran", "Taml"}, # 0BE6..0BF3 None, # 0BF4..0CE5 {"Knda", "Nand"}, # 0CE6..0CEF None, # 0CF0..103F {"Cakm", "Mymr", "Tale"}, # 1040..1049 None, # 104A..10FA {"Geor", "Latn"}, # 10FB..10FB None, # 10FC..1734 {"Buhd", "Hano", "Tagb", "Tglg"}, # 1735..1736 None, # 1737..1801 {"Mong", "Phag"}, # 1802..1803 None, # 1804..1804 {"Mong", "Phag"}, # 1805..1805 None, # 1806..1CCF {"Beng", "Deva", "Gran", "Knda"}, # 1CD0..1CD0 {"Deva"}, # 1CD1..1CD1 {"Beng", "Deva", "Gran", "Knda"}, # 1CD2..1CD2 {"Deva", "Gran"}, # 1CD3..1CD3 {"Deva"}, # 1CD4..1CD4 {"Beng", "Deva"}, # 1CD5..1CD6 {"Deva", "Shrd"}, # 1CD7..1CD7 {"Beng", "Deva"}, # 1CD8..1CD8 {"Deva", "Shrd"}, # 1CD9..1CD9 {"Deva", "Knda", "Mlym", "Orya", "Taml", "Telu"}, # 1CDA..1CDA {"Deva"}, # 1CDB..1CDB {"Deva", "Shrd"}, # 1CDC..1CDD {"Deva"}, # 1CDE..1CDF {"Deva", "Shrd"}, # 1CE0..1CE0 {"Beng", "Deva"}, # 1CE1..1CE1 {"Deva"}, # 1CE2..1CE8 {"Deva", "Nand"}, # 1CE9..1CE9 {"Beng", "Deva"}, # 1CEA..1CEA {"Deva"}, # 1CEB..1CEC {"Beng", "Deva"}, # 1CED..1CED {"Deva"}, # 1CEE..1CF1 {"Beng", "Deva", "Gran", "Knda", "Nand", "Orya", "Telu", "Tirh"}, # 1CF2..1CF2 {"Deva", "Gran"}, # 1CF3..1CF3 {"Deva", "Gran", "Knda"}, # 1CF4..1CF4 {"Beng", "Deva"}, # 1CF5..1CF6 {"Beng"}, # 1CF7..1CF7 {"Deva", "Gran"}, # 1CF8..1CF9 {"Nand"}, # 1CFA..1CFA None, # 1CFB..1DBF {"Grek"}, # 1DC0..1DC1 None, # 1DC2..1DF7 {"Cyrl", "Syrc"}, # 1DF8..1DF8 None, # 1DF9..1DF9 {"Syrc"}, # 1DFA..1DFA None, # 1DFB..202E {"Latn", "Mong"}, # 202F..202F None, # 2030..20EF {"Deva", "Gran", "Latn"}, # 20F0..20F0 None, # 20F1..2E42 {"Cyrl", "Glag"}, # 2E43..2E43 None, # 2E44..3000 {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # 3001..3002 {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 3003..3003 None, # 3004..3005 {"Hani"}, # 3006..3006 None, # 3007..3007 {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # 3008..3011 None, # 3012..3012 {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 3013..3013 {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # 3014..301B {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 301C..301F None, # 3020..3029 {"Bopo", "Hani"}, # 302A..302D None, # 302E..302F {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 3030..3030 {"Hira", "Kana"}, # 3031..3035 None, # 3036..3036 {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 3037..3037 None, # 3038..303B {"Hani", "Hira", "Kana"}, # 303C..303D {"Hani"}, # 303E..303F None, # 3040..3098 {"Hira", "Kana"}, # 3099..309C None, # 309D..309F {"Hira", "Kana"}, # 30A0..30A0 None, # 30A1..30FA {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # 30FB..30FB {"Hira", "Kana"}, # 30FC..30FC None, # 30FD..318F {"Hani"}, # 3190..319F None, # 31A0..31BF {"Hani"}, # 31C0..31E3 None, # 31E4..321F {"Hani"}, # 3220..3247 None, # 3248..327F {"Hani"}, # 3280..32B0 None, # 32B1..32BF {"Hani"}, # 32C0..32CB None, # 32CC..32FE {"Hani"}, # 32FF..32FF None, # 3300..3357 {"Hani"}, # 3358..3370 None, # 3371..337A {"Hani"}, # 337B..337F None, # 3380..33DF {"Hani"}, # 33E0..33FE None, # 33FF..A66E {"Cyrl", "Glag"}, # A66F..A66F None, # A670..A6FF {"Hani", "Latn"}, # A700..A707 None, # A708..A82F { "Deva", "Dogr", "Gujr", "Guru", "Khoj", "Knda", "Kthi", "Mahj", "Mlym", "Modi", "Nand", "Sind", "Takr", "Tirh", }, # A830..A832 { "Deva", "Dogr", "Gujr", "Guru", "Khoj", "Knda", "Kthi", "Mahj", "Modi", "Nand", "Sind", "Takr", "Tirh", }, # A833..A835 { "Deva", "Dogr", "Gujr", "Guru", "Khoj", "Kthi", "Mahj", "Modi", "Sind", "Takr", "Tirh", }, # A836..A839 None, # A83A..A8F0 {"Beng", "Deva"}, # A8F1..A8F1 None, # A8F2..A8F2 {"Deva", "Taml"}, # A8F3..A8F3 None, # A8F4..A92D {"Kali", "Latn", "Mymr"}, # A92E..A92E None, # A92F..A9CE {"Bugi", "Java"}, # A9CF..A9CF None, # A9D0..FD3D {"Arab", "Nkoo"}, # FD3E..FD3F None, # FD40..FDF1 {"Arab", "Thaa"}, # FDF2..FDF2 None, # FDF3..FDFC {"Arab", "Thaa"}, # FDFD..FDFD None, # FDFE..FE44 {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # FE45..FE46 None, # FE47..FF60 {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # FF61..FF65 None, # FF66..FF6F {"Hira", "Kana"}, # FF70..FF70 None, # FF71..FF9D {"Hira", "Kana"}, # FF9E..FF9F None, # FFA0..100FF {"Cpmn", "Cprt", "Linb"}, # 10100..10101 {"Cprt", "Linb"}, # 10102..10102 None, # 10103..10106 {"Cprt", "Lina", "Linb"}, # 10107..10133 None, # 10134..10136 {"Cprt", "Linb"}, # 10137..1013F None, # 10140..102DF {"Arab", "Copt"}, # 102E0..102FB None, # 102FC..10AF1 {"Mani", "Ougr"}, # 10AF2..10AF2 None, # 10AF3..11300 {"Gran", "Taml"}, # 11301..11301 None, # 11302..11302 {"Gran", "Taml"}, # 11303..11303 None, # 11304..1133A {"Gran", "Taml"}, # 1133B..1133C None, # 1133D..11FCF {"Gran", "Taml"}, # 11FD0..11FD1 None, # 11FD2..11FD2 {"Gran", "Taml"}, # 11FD3..11FD3 None, # 11FD4..1BC9F {"Dupl"}, # 1BCA0..1BCA3 None, # 1BCA4..1D35F {"Hani"}, # 1D360..1D371 None, # 1D372..1F24F {"Hani"}, # 1F250..1F251 None, # 1F252..10FFFF ] PKaZZZ� �l���� fontTools/unicodedata/Scripts.py# -*- coding: utf-8 -*- # # NOTE: This file was auto-generated with MetaTools/buildUCD.py. # Source: https://unicode.org/Public/UNIDATA/Scripts.txt # License: http://unicode.org/copyright.html#License # # Scripts-15.0.0.txt # Date: 2022-04-26, 23:15:02 GMT # © 2022 Unicode®, Inc. # Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. # For terms of use, see https://www.unicode.org/terms_of_use.html # # Unicode Character Database # For documentation, see https://www.unicode.org/reports/tr44/ # For more information, see: # UAX #24, Unicode Script Property: https://www.unicode.org/reports/tr24/ # Especially the sections: # https://www.unicode.org/reports/tr24/#Assignment_Script_Values # https://www.unicode.org/reports/tr24/#Assignment_ScriptX_Values # RANGES = [ 0x0000, # .. 0x0040 ; Common 0x0041, # .. 0x005A ; Latin 0x005B, # .. 0x0060 ; Common 0x0061, # .. 0x007A ; Latin 0x007B, # .. 0x00A9 ; Common 0x00AA, # .. 0x00AA ; Latin 0x00AB, # .. 0x00B9 ; Common 0x00BA, # .. 0x00BA ; Latin 0x00BB, # .. 0x00BF ; Common 0x00C0, # .. 0x00D6 ; Latin 0x00D7, # .. 0x00D7 ; Common 0x00D8, # .. 0x00F6 ; Latin 0x00F7, # .. 0x00F7 ; Common 0x00F8, # .. 0x02B8 ; Latin 0x02B9, # .. 0x02DF ; Common 0x02E0, # .. 0x02E4 ; Latin 0x02E5, # .. 0x02E9 ; Common 0x02EA, # .. 0x02EB ; Bopomofo 0x02EC, # .. 0x02FF ; Common 0x0300, # .. 0x036F ; Inherited 0x0370, # .. 0x0373 ; Greek 0x0374, # .. 0x0374 ; Common 0x0375, # .. 0x0377 ; Greek 0x0378, # .. 0x0379 ; Unknown 0x037A, # .. 0x037D ; Greek 0x037E, # .. 0x037E ; Common 0x037F, # .. 0x037F ; Greek 0x0380, # .. 0x0383 ; Unknown 0x0384, # .. 0x0384 ; Greek 0x0385, # .. 0x0385 ; Common 0x0386, # .. 0x0386 ; Greek 0x0387, # .. 0x0387 ; Common 0x0388, # .. 0x038A ; Greek 0x038B, # .. 0x038B ; Unknown 0x038C, # .. 0x038C ; Greek 0x038D, # .. 0x038D ; Unknown 0x038E, # .. 0x03A1 ; Greek 0x03A2, # .. 0x03A2 ; Unknown 0x03A3, # .. 0x03E1 ; Greek 0x03E2, # .. 0x03EF ; Coptic 0x03F0, # .. 0x03FF ; Greek 0x0400, # .. 0x0484 ; Cyrillic 0x0485, # .. 0x0486 ; Inherited 0x0487, # .. 0x052F ; Cyrillic 0x0530, # .. 0x0530 ; Unknown 0x0531, # .. 0x0556 ; Armenian 0x0557, # .. 0x0558 ; Unknown 0x0559, # .. 0x058A ; Armenian 0x058B, # .. 0x058C ; Unknown 0x058D, # .. 0x058F ; Armenian 0x0590, # .. 0x0590 ; Unknown 0x0591, # .. 0x05C7 ; Hebrew 0x05C8, # .. 0x05CF ; Unknown 0x05D0, # .. 0x05EA ; Hebrew 0x05EB, # .. 0x05EE ; Unknown 0x05EF, # .. 0x05F4 ; Hebrew 0x05F5, # .. 0x05FF ; Unknown 0x0600, # .. 0x0604 ; Arabic 0x0605, # .. 0x0605 ; Common 0x0606, # .. 0x060B ; Arabic 0x060C, # .. 0x060C ; Common 0x060D, # .. 0x061A ; Arabic 0x061B, # .. 0x061B ; Common 0x061C, # .. 0x061E ; Arabic 0x061F, # .. 0x061F ; Common 0x0620, # .. 0x063F ; Arabic 0x0640, # .. 0x0640 ; Common 0x0641, # .. 0x064A ; Arabic 0x064B, # .. 0x0655 ; Inherited 0x0656, # .. 0x066F ; Arabic 0x0670, # .. 0x0670 ; Inherited 0x0671, # .. 0x06DC ; Arabic 0x06DD, # .. 0x06DD ; Common 0x06DE, # .. 0x06FF ; Arabic 0x0700, # .. 0x070D ; Syriac 0x070E, # .. 0x070E ; Unknown 0x070F, # .. 0x074A ; Syriac 0x074B, # .. 0x074C ; Unknown 0x074D, # .. 0x074F ; Syriac 0x0750, # .. 0x077F ; Arabic 0x0780, # .. 0x07B1 ; Thaana 0x07B2, # .. 0x07BF ; Unknown 0x07C0, # .. 0x07FA ; Nko 0x07FB, # .. 0x07FC ; Unknown 0x07FD, # .. 0x07FF ; Nko 0x0800, # .. 0x082D ; Samaritan 0x082E, # .. 0x082F ; Unknown 0x0830, # .. 0x083E ; Samaritan 0x083F, # .. 0x083F ; Unknown 0x0840, # .. 0x085B ; Mandaic 0x085C, # .. 0x085D ; Unknown 0x085E, # .. 0x085E ; Mandaic 0x085F, # .. 0x085F ; Unknown 0x0860, # .. 0x086A ; Syriac 0x086B, # .. 0x086F ; Unknown 0x0870, # .. 0x088E ; Arabic 0x088F, # .. 0x088F ; Unknown 0x0890, # .. 0x0891 ; Arabic 0x0892, # .. 0x0897 ; Unknown 0x0898, # .. 0x08E1 ; Arabic 0x08E2, # .. 0x08E2 ; Common 0x08E3, # .. 0x08FF ; Arabic 0x0900, # .. 0x0950 ; Devanagari 0x0951, # .. 0x0954 ; Inherited 0x0955, # .. 0x0963 ; Devanagari 0x0964, # .. 0x0965 ; Common 0x0966, # .. 0x097F ; Devanagari 0x0980, # .. 0x0983 ; Bengali 0x0984, # .. 0x0984 ; Unknown 0x0985, # .. 0x098C ; Bengali 0x098D, # .. 0x098E ; Unknown 0x098F, # .. 0x0990 ; Bengali 0x0991, # .. 0x0992 ; Unknown 0x0993, # .. 0x09A8 ; Bengali 0x09A9, # .. 0x09A9 ; Unknown 0x09AA, # .. 0x09B0 ; Bengali 0x09B1, # .. 0x09B1 ; Unknown 0x09B2, # .. 0x09B2 ; Bengali 0x09B3, # .. 0x09B5 ; Unknown 0x09B6, # .. 0x09B9 ; Bengali 0x09BA, # .. 0x09BB ; Unknown 0x09BC, # .. 0x09C4 ; Bengali 0x09C5, # .. 0x09C6 ; Unknown 0x09C7, # .. 0x09C8 ; Bengali 0x09C9, # .. 0x09CA ; Unknown 0x09CB, # .. 0x09CE ; Bengali 0x09CF, # .. 0x09D6 ; Unknown 0x09D7, # .. 0x09D7 ; Bengali 0x09D8, # .. 0x09DB ; Unknown 0x09DC, # .. 0x09DD ; Bengali 0x09DE, # .. 0x09DE ; Unknown 0x09DF, # .. 0x09E3 ; Bengali 0x09E4, # .. 0x09E5 ; Unknown 0x09E6, # .. 0x09FE ; Bengali 0x09FF, # .. 0x0A00 ; Unknown 0x0A01, # .. 0x0A03 ; Gurmukhi 0x0A04, # .. 0x0A04 ; Unknown 0x0A05, # .. 0x0A0A ; Gurmukhi 0x0A0B, # .. 0x0A0E ; Unknown 0x0A0F, # .. 0x0A10 ; Gurmukhi 0x0A11, # .. 0x0A12 ; Unknown 0x0A13, # .. 0x0A28 ; Gurmukhi 0x0A29, # .. 0x0A29 ; Unknown 0x0A2A, # .. 0x0A30 ; Gurmukhi 0x0A31, # .. 0x0A31 ; Unknown 0x0A32, # .. 0x0A33 ; Gurmukhi 0x0A34, # .. 0x0A34 ; Unknown 0x0A35, # .. 0x0A36 ; Gurmukhi 0x0A37, # .. 0x0A37 ; Unknown 0x0A38, # .. 0x0A39 ; Gurmukhi 0x0A3A, # .. 0x0A3B ; Unknown 0x0A3C, # .. 0x0A3C ; Gurmukhi 0x0A3D, # .. 0x0A3D ; Unknown 0x0A3E, # .. 0x0A42 ; Gurmukhi 0x0A43, # .. 0x0A46 ; Unknown 0x0A47, # .. 0x0A48 ; Gurmukhi 0x0A49, # .. 0x0A4A ; Unknown 0x0A4B, # .. 0x0A4D ; Gurmukhi 0x0A4E, # .. 0x0A50 ; Unknown 0x0A51, # .. 0x0A51 ; Gurmukhi 0x0A52, # .. 0x0A58 ; Unknown 0x0A59, # .. 0x0A5C ; Gurmukhi 0x0A5D, # .. 0x0A5D ; Unknown 0x0A5E, # .. 0x0A5E ; Gurmukhi 0x0A5F, # .. 0x0A65 ; Unknown 0x0A66, # .. 0x0A76 ; Gurmukhi 0x0A77, # .. 0x0A80 ; Unknown 0x0A81, # .. 0x0A83 ; Gujarati 0x0A84, # .. 0x0A84 ; Unknown 0x0A85, # .. 0x0A8D ; Gujarati 0x0A8E, # .. 0x0A8E ; Unknown 0x0A8F, # .. 0x0A91 ; Gujarati 0x0A92, # .. 0x0A92 ; Unknown 0x0A93, # .. 0x0AA8 ; Gujarati 0x0AA9, # .. 0x0AA9 ; Unknown 0x0AAA, # .. 0x0AB0 ; Gujarati 0x0AB1, # .. 0x0AB1 ; Unknown 0x0AB2, # .. 0x0AB3 ; Gujarati 0x0AB4, # .. 0x0AB4 ; Unknown 0x0AB5, # .. 0x0AB9 ; Gujarati 0x0ABA, # .. 0x0ABB ; Unknown 0x0ABC, # .. 0x0AC5 ; Gujarati 0x0AC6, # .. 0x0AC6 ; Unknown 0x0AC7, # .. 0x0AC9 ; Gujarati 0x0ACA, # .. 0x0ACA ; Unknown 0x0ACB, # .. 0x0ACD ; Gujarati 0x0ACE, # .. 0x0ACF ; Unknown 0x0AD0, # .. 0x0AD0 ; Gujarati 0x0AD1, # .. 0x0ADF ; Unknown 0x0AE0, # .. 0x0AE3 ; Gujarati 0x0AE4, # .. 0x0AE5 ; Unknown 0x0AE6, # .. 0x0AF1 ; Gujarati 0x0AF2, # .. 0x0AF8 ; Unknown 0x0AF9, # .. 0x0AFF ; Gujarati 0x0B00, # .. 0x0B00 ; Unknown 0x0B01, # .. 0x0B03 ; Oriya 0x0B04, # .. 0x0B04 ; Unknown 0x0B05, # .. 0x0B0C ; Oriya 0x0B0D, # .. 0x0B0E ; Unknown 0x0B0F, # .. 0x0B10 ; Oriya 0x0B11, # .. 0x0B12 ; Unknown 0x0B13, # .. 0x0B28 ; Oriya 0x0B29, # .. 0x0B29 ; Unknown 0x0B2A, # .. 0x0B30 ; Oriya 0x0B31, # .. 0x0B31 ; Unknown 0x0B32, # .. 0x0B33 ; Oriya 0x0B34, # .. 0x0B34 ; Unknown 0x0B35, # .. 0x0B39 ; Oriya 0x0B3A, # .. 0x0B3B ; Unknown 0x0B3C, # .. 0x0B44 ; Oriya 0x0B45, # .. 0x0B46 ; Unknown 0x0B47, # .. 0x0B48 ; Oriya 0x0B49, # .. 0x0B4A ; Unknown 0x0B4B, # .. 0x0B4D ; Oriya 0x0B4E, # .. 0x0B54 ; Unknown 0x0B55, # .. 0x0B57 ; Oriya 0x0B58, # .. 0x0B5B ; Unknown 0x0B5C, # .. 0x0B5D ; Oriya 0x0B5E, # .. 0x0B5E ; Unknown 0x0B5F, # .. 0x0B63 ; Oriya 0x0B64, # .. 0x0B65 ; Unknown 0x0B66, # .. 0x0B77 ; Oriya 0x0B78, # .. 0x0B81 ; Unknown 0x0B82, # .. 0x0B83 ; Tamil 0x0B84, # .. 0x0B84 ; Unknown 0x0B85, # .. 0x0B8A ; Tamil 0x0B8B, # .. 0x0B8D ; Unknown 0x0B8E, # .. 0x0B90 ; Tamil 0x0B91, # .. 0x0B91 ; Unknown 0x0B92, # .. 0x0B95 ; Tamil 0x0B96, # .. 0x0B98 ; Unknown 0x0B99, # .. 0x0B9A ; Tamil 0x0B9B, # .. 0x0B9B ; Unknown 0x0B9C, # .. 0x0B9C ; Tamil 0x0B9D, # .. 0x0B9D ; Unknown 0x0B9E, # .. 0x0B9F ; Tamil 0x0BA0, # .. 0x0BA2 ; Unknown 0x0BA3, # .. 0x0BA4 ; Tamil 0x0BA5, # .. 0x0BA7 ; Unknown 0x0BA8, # .. 0x0BAA ; Tamil 0x0BAB, # .. 0x0BAD ; Unknown 0x0BAE, # .. 0x0BB9 ; Tamil 0x0BBA, # .. 0x0BBD ; Unknown 0x0BBE, # .. 0x0BC2 ; Tamil 0x0BC3, # .. 0x0BC5 ; Unknown 0x0BC6, # .. 0x0BC8 ; Tamil 0x0BC9, # .. 0x0BC9 ; Unknown 0x0BCA, # .. 0x0BCD ; Tamil 0x0BCE, # .. 0x0BCF ; Unknown 0x0BD0, # .. 0x0BD0 ; Tamil 0x0BD1, # .. 0x0BD6 ; Unknown 0x0BD7, # .. 0x0BD7 ; Tamil 0x0BD8, # .. 0x0BE5 ; Unknown 0x0BE6, # .. 0x0BFA ; Tamil 0x0BFB, # .. 0x0BFF ; Unknown 0x0C00, # .. 0x0C0C ; Telugu 0x0C0D, # .. 0x0C0D ; Unknown 0x0C0E, # .. 0x0C10 ; Telugu 0x0C11, # .. 0x0C11 ; Unknown 0x0C12, # .. 0x0C28 ; Telugu 0x0C29, # .. 0x0C29 ; Unknown 0x0C2A, # .. 0x0C39 ; Telugu 0x0C3A, # .. 0x0C3B ; Unknown 0x0C3C, # .. 0x0C44 ; Telugu 0x0C45, # .. 0x0C45 ; Unknown 0x0C46, # .. 0x0C48 ; Telugu 0x0C49, # .. 0x0C49 ; Unknown 0x0C4A, # .. 0x0C4D ; Telugu 0x0C4E, # .. 0x0C54 ; Unknown 0x0C55, # .. 0x0C56 ; Telugu 0x0C57, # .. 0x0C57 ; Unknown 0x0C58, # .. 0x0C5A ; Telugu 0x0C5B, # .. 0x0C5C ; Unknown 0x0C5D, # .. 0x0C5D ; Telugu 0x0C5E, # .. 0x0C5F ; Unknown 0x0C60, # .. 0x0C63 ; Telugu 0x0C64, # .. 0x0C65 ; Unknown 0x0C66, # .. 0x0C6F ; Telugu 0x0C70, # .. 0x0C76 ; Unknown 0x0C77, # .. 0x0C7F ; Telugu 0x0C80, # .. 0x0C8C ; Kannada 0x0C8D, # .. 0x0C8D ; Unknown 0x0C8E, # .. 0x0C90 ; Kannada 0x0C91, # .. 0x0C91 ; Unknown 0x0C92, # .. 0x0CA8 ; Kannada 0x0CA9, # .. 0x0CA9 ; Unknown 0x0CAA, # .. 0x0CB3 ; Kannada 0x0CB4, # .. 0x0CB4 ; Unknown 0x0CB5, # .. 0x0CB9 ; Kannada 0x0CBA, # .. 0x0CBB ; Unknown 0x0CBC, # .. 0x0CC4 ; Kannada 0x0CC5, # .. 0x0CC5 ; Unknown 0x0CC6, # .. 0x0CC8 ; Kannada 0x0CC9, # .. 0x0CC9 ; Unknown 0x0CCA, # .. 0x0CCD ; Kannada 0x0CCE, # .. 0x0CD4 ; Unknown 0x0CD5, # .. 0x0CD6 ; Kannada 0x0CD7, # .. 0x0CDC ; Unknown 0x0CDD, # .. 0x0CDE ; Kannada 0x0CDF, # .. 0x0CDF ; Unknown 0x0CE0, # .. 0x0CE3 ; Kannada 0x0CE4, # .. 0x0CE5 ; Unknown 0x0CE6, # .. 0x0CEF ; Kannada 0x0CF0, # .. 0x0CF0 ; Unknown 0x0CF1, # .. 0x0CF3 ; Kannada 0x0CF4, # .. 0x0CFF ; Unknown 0x0D00, # .. 0x0D0C ; Malayalam 0x0D0D, # .. 0x0D0D ; Unknown 0x0D0E, # .. 0x0D10 ; Malayalam 0x0D11, # .. 0x0D11 ; Unknown 0x0D12, # .. 0x0D44 ; Malayalam 0x0D45, # .. 0x0D45 ; Unknown 0x0D46, # .. 0x0D48 ; Malayalam 0x0D49, # .. 0x0D49 ; Unknown 0x0D4A, # .. 0x0D4F ; Malayalam 0x0D50, # .. 0x0D53 ; Unknown 0x0D54, # .. 0x0D63 ; Malayalam 0x0D64, # .. 0x0D65 ; Unknown 0x0D66, # .. 0x0D7F ; Malayalam 0x0D80, # .. 0x0D80 ; Unknown 0x0D81, # .. 0x0D83 ; Sinhala 0x0D84, # .. 0x0D84 ; Unknown 0x0D85, # .. 0x0D96 ; Sinhala 0x0D97, # .. 0x0D99 ; Unknown 0x0D9A, # .. 0x0DB1 ; Sinhala 0x0DB2, # .. 0x0DB2 ; Unknown 0x0DB3, # .. 0x0DBB ; Sinhala 0x0DBC, # .. 0x0DBC ; Unknown 0x0DBD, # .. 0x0DBD ; Sinhala 0x0DBE, # .. 0x0DBF ; Unknown 0x0DC0, # .. 0x0DC6 ; Sinhala 0x0DC7, # .. 0x0DC9 ; Unknown 0x0DCA, # .. 0x0DCA ; Sinhala 0x0DCB, # .. 0x0DCE ; Unknown 0x0DCF, # .. 0x0DD4 ; Sinhala 0x0DD5, # .. 0x0DD5 ; Unknown 0x0DD6, # .. 0x0DD6 ; Sinhala 0x0DD7, # .. 0x0DD7 ; Unknown 0x0DD8, # .. 0x0DDF ; Sinhala 0x0DE0, # .. 0x0DE5 ; Unknown 0x0DE6, # .. 0x0DEF ; Sinhala 0x0DF0, # .. 0x0DF1 ; Unknown 0x0DF2, # .. 0x0DF4 ; Sinhala 0x0DF5, # .. 0x0E00 ; Unknown 0x0E01, # .. 0x0E3A ; Thai 0x0E3B, # .. 0x0E3E ; Unknown 0x0E3F, # .. 0x0E3F ; Common 0x0E40, # .. 0x0E5B ; Thai 0x0E5C, # .. 0x0E80 ; Unknown 0x0E81, # .. 0x0E82 ; Lao 0x0E83, # .. 0x0E83 ; Unknown 0x0E84, # .. 0x0E84 ; Lao 0x0E85, # .. 0x0E85 ; Unknown 0x0E86, # .. 0x0E8A ; Lao 0x0E8B, # .. 0x0E8B ; Unknown 0x0E8C, # .. 0x0EA3 ; Lao 0x0EA4, # .. 0x0EA4 ; Unknown 0x0EA5, # .. 0x0EA5 ; Lao 0x0EA6, # .. 0x0EA6 ; Unknown 0x0EA7, # .. 0x0EBD ; Lao 0x0EBE, # .. 0x0EBF ; Unknown 0x0EC0, # .. 0x0EC4 ; Lao 0x0EC5, # .. 0x0EC5 ; Unknown 0x0EC6, # .. 0x0EC6 ; Lao 0x0EC7, # .. 0x0EC7 ; Unknown 0x0EC8, # .. 0x0ECE ; Lao 0x0ECF, # .. 0x0ECF ; Unknown 0x0ED0, # .. 0x0ED9 ; Lao 0x0EDA, # .. 0x0EDB ; Unknown 0x0EDC, # .. 0x0EDF ; Lao 0x0EE0, # .. 0x0EFF ; Unknown 0x0F00, # .. 0x0F47 ; Tibetan 0x0F48, # .. 0x0F48 ; Unknown 0x0F49, # .. 0x0F6C ; Tibetan 0x0F6D, # .. 0x0F70 ; Unknown 0x0F71, # .. 0x0F97 ; Tibetan 0x0F98, # .. 0x0F98 ; Unknown 0x0F99, # .. 0x0FBC ; Tibetan 0x0FBD, # .. 0x0FBD ; Unknown 0x0FBE, # .. 0x0FCC ; Tibetan 0x0FCD, # .. 0x0FCD ; Unknown 0x0FCE, # .. 0x0FD4 ; Tibetan 0x0FD5, # .. 0x0FD8 ; Common 0x0FD9, # .. 0x0FDA ; Tibetan 0x0FDB, # .. 0x0FFF ; Unknown 0x1000, # .. 0x109F ; Myanmar 0x10A0, # .. 0x10C5 ; Georgian 0x10C6, # .. 0x10C6 ; Unknown 0x10C7, # .. 0x10C7 ; Georgian 0x10C8, # .. 0x10CC ; Unknown 0x10CD, # .. 0x10CD ; Georgian 0x10CE, # .. 0x10CF ; Unknown 0x10D0, # .. 0x10FA ; Georgian 0x10FB, # .. 0x10FB ; Common 0x10FC, # .. 0x10FF ; Georgian 0x1100, # .. 0x11FF ; Hangul 0x1200, # .. 0x1248 ; Ethiopic 0x1249, # .. 0x1249 ; Unknown 0x124A, # .. 0x124D ; Ethiopic 0x124E, # .. 0x124F ; Unknown 0x1250, # .. 0x1256 ; Ethiopic 0x1257, # .. 0x1257 ; Unknown 0x1258, # .. 0x1258 ; Ethiopic 0x1259, # .. 0x1259 ; Unknown 0x125A, # .. 0x125D ; Ethiopic 0x125E, # .. 0x125F ; Unknown 0x1260, # .. 0x1288 ; Ethiopic 0x1289, # .. 0x1289 ; Unknown 0x128A, # .. 0x128D ; Ethiopic 0x128E, # .. 0x128F ; Unknown 0x1290, # .. 0x12B0 ; Ethiopic 0x12B1, # .. 0x12B1 ; Unknown 0x12B2, # .. 0x12B5 ; Ethiopic 0x12B6, # .. 0x12B7 ; Unknown 0x12B8, # .. 0x12BE ; Ethiopic 0x12BF, # .. 0x12BF ; Unknown 0x12C0, # .. 0x12C0 ; Ethiopic 0x12C1, # .. 0x12C1 ; Unknown 0x12C2, # .. 0x12C5 ; Ethiopic 0x12C6, # .. 0x12C7 ; Unknown 0x12C8, # .. 0x12D6 ; Ethiopic 0x12D7, # .. 0x12D7 ; Unknown 0x12D8, # .. 0x1310 ; Ethiopic 0x1311, # .. 0x1311 ; Unknown 0x1312, # .. 0x1315 ; Ethiopic 0x1316, # .. 0x1317 ; Unknown 0x1318, # .. 0x135A ; Ethiopic 0x135B, # .. 0x135C ; Unknown 0x135D, # .. 0x137C ; Ethiopic 0x137D, # .. 0x137F ; Unknown 0x1380, # .. 0x1399 ; Ethiopic 0x139A, # .. 0x139F ; Unknown 0x13A0, # .. 0x13F5 ; Cherokee 0x13F6, # .. 0x13F7 ; Unknown 0x13F8, # .. 0x13FD ; Cherokee 0x13FE, # .. 0x13FF ; Unknown 0x1400, # .. 0x167F ; Canadian_Aboriginal 0x1680, # .. 0x169C ; Ogham 0x169D, # .. 0x169F ; Unknown 0x16A0, # .. 0x16EA ; Runic 0x16EB, # .. 0x16ED ; Common 0x16EE, # .. 0x16F8 ; Runic 0x16F9, # .. 0x16FF ; Unknown 0x1700, # .. 0x1715 ; Tagalog 0x1716, # .. 0x171E ; Unknown 0x171F, # .. 0x171F ; Tagalog 0x1720, # .. 0x1734 ; Hanunoo 0x1735, # .. 0x1736 ; Common 0x1737, # .. 0x173F ; Unknown 0x1740, # .. 0x1753 ; Buhid 0x1754, # .. 0x175F ; Unknown 0x1760, # .. 0x176C ; Tagbanwa 0x176D, # .. 0x176D ; Unknown 0x176E, # .. 0x1770 ; Tagbanwa 0x1771, # .. 0x1771 ; Unknown 0x1772, # .. 0x1773 ; Tagbanwa 0x1774, # .. 0x177F ; Unknown 0x1780, # .. 0x17DD ; Khmer 0x17DE, # .. 0x17DF ; Unknown 0x17E0, # .. 0x17E9 ; Khmer 0x17EA, # .. 0x17EF ; Unknown 0x17F0, # .. 0x17F9 ; Khmer 0x17FA, # .. 0x17FF ; Unknown 0x1800, # .. 0x1801 ; Mongolian 0x1802, # .. 0x1803 ; Common 0x1804, # .. 0x1804 ; Mongolian 0x1805, # .. 0x1805 ; Common 0x1806, # .. 0x1819 ; Mongolian 0x181A, # .. 0x181F ; Unknown 0x1820, # .. 0x1878 ; Mongolian 0x1879, # .. 0x187F ; Unknown 0x1880, # .. 0x18AA ; Mongolian 0x18AB, # .. 0x18AF ; Unknown 0x18B0, # .. 0x18F5 ; Canadian_Aboriginal 0x18F6, # .. 0x18FF ; Unknown 0x1900, # .. 0x191E ; Limbu 0x191F, # .. 0x191F ; Unknown 0x1920, # .. 0x192B ; Limbu 0x192C, # .. 0x192F ; Unknown 0x1930, # .. 0x193B ; Limbu 0x193C, # .. 0x193F ; Unknown 0x1940, # .. 0x1940 ; Limbu 0x1941, # .. 0x1943 ; Unknown 0x1944, # .. 0x194F ; Limbu 0x1950, # .. 0x196D ; Tai_Le 0x196E, # .. 0x196F ; Unknown 0x1970, # .. 0x1974 ; Tai_Le 0x1975, # .. 0x197F ; Unknown 0x1980, # .. 0x19AB ; New_Tai_Lue 0x19AC, # .. 0x19AF ; Unknown 0x19B0, # .. 0x19C9 ; New_Tai_Lue 0x19CA, # .. 0x19CF ; Unknown 0x19D0, # .. 0x19DA ; New_Tai_Lue 0x19DB, # .. 0x19DD ; Unknown 0x19DE, # .. 0x19DF ; New_Tai_Lue 0x19E0, # .. 0x19FF ; Khmer 0x1A00, # .. 0x1A1B ; Buginese 0x1A1C, # .. 0x1A1D ; Unknown 0x1A1E, # .. 0x1A1F ; Buginese 0x1A20, # .. 0x1A5E ; Tai_Tham 0x1A5F, # .. 0x1A5F ; Unknown 0x1A60, # .. 0x1A7C ; Tai_Tham 0x1A7D, # .. 0x1A7E ; Unknown 0x1A7F, # .. 0x1A89 ; Tai_Tham 0x1A8A, # .. 0x1A8F ; Unknown 0x1A90, # .. 0x1A99 ; Tai_Tham 0x1A9A, # .. 0x1A9F ; Unknown 0x1AA0, # .. 0x1AAD ; Tai_Tham 0x1AAE, # .. 0x1AAF ; Unknown 0x1AB0, # .. 0x1ACE ; Inherited 0x1ACF, # .. 0x1AFF ; Unknown 0x1B00, # .. 0x1B4C ; Balinese 0x1B4D, # .. 0x1B4F ; Unknown 0x1B50, # .. 0x1B7E ; Balinese 0x1B7F, # .. 0x1B7F ; Unknown 0x1B80, # .. 0x1BBF ; Sundanese 0x1BC0, # .. 0x1BF3 ; Batak 0x1BF4, # .. 0x1BFB ; Unknown 0x1BFC, # .. 0x1BFF ; Batak 0x1C00, # .. 0x1C37 ; Lepcha 0x1C38, # .. 0x1C3A ; Unknown 0x1C3B, # .. 0x1C49 ; Lepcha 0x1C4A, # .. 0x1C4C ; Unknown 0x1C4D, # .. 0x1C4F ; Lepcha 0x1C50, # .. 0x1C7F ; Ol_Chiki 0x1C80, # .. 0x1C88 ; Cyrillic 0x1C89, # .. 0x1C8F ; Unknown 0x1C90, # .. 0x1CBA ; Georgian 0x1CBB, # .. 0x1CBC ; Unknown 0x1CBD, # .. 0x1CBF ; Georgian 0x1CC0, # .. 0x1CC7 ; Sundanese 0x1CC8, # .. 0x1CCF ; Unknown 0x1CD0, # .. 0x1CD2 ; Inherited 0x1CD3, # .. 0x1CD3 ; Common 0x1CD4, # .. 0x1CE0 ; Inherited 0x1CE1, # .. 0x1CE1 ; Common 0x1CE2, # .. 0x1CE8 ; Inherited 0x1CE9, # .. 0x1CEC ; Common 0x1CED, # .. 0x1CED ; Inherited 0x1CEE, # .. 0x1CF3 ; Common 0x1CF4, # .. 0x1CF4 ; Inherited 0x1CF5, # .. 0x1CF7 ; Common 0x1CF8, # .. 0x1CF9 ; Inherited 0x1CFA, # .. 0x1CFA ; Common 0x1CFB, # .. 0x1CFF ; Unknown 0x1D00, # .. 0x1D25 ; Latin 0x1D26, # .. 0x1D2A ; Greek 0x1D2B, # .. 0x1D2B ; Cyrillic 0x1D2C, # .. 0x1D5C ; Latin 0x1D5D, # .. 0x1D61 ; Greek 0x1D62, # .. 0x1D65 ; Latin 0x1D66, # .. 0x1D6A ; Greek 0x1D6B, # .. 0x1D77 ; Latin 0x1D78, # .. 0x1D78 ; Cyrillic 0x1D79, # .. 0x1DBE ; Latin 0x1DBF, # .. 0x1DBF ; Greek 0x1DC0, # .. 0x1DFF ; Inherited 0x1E00, # .. 0x1EFF ; Latin 0x1F00, # .. 0x1F15 ; Greek 0x1F16, # .. 0x1F17 ; Unknown 0x1F18, # .. 0x1F1D ; Greek 0x1F1E, # .. 0x1F1F ; Unknown 0x1F20, # .. 0x1F45 ; Greek 0x1F46, # .. 0x1F47 ; Unknown 0x1F48, # .. 0x1F4D ; Greek 0x1F4E, # .. 0x1F4F ; Unknown 0x1F50, # .. 0x1F57 ; Greek 0x1F58, # .. 0x1F58 ; Unknown 0x1F59, # .. 0x1F59 ; Greek 0x1F5A, # .. 0x1F5A ; Unknown 0x1F5B, # .. 0x1F5B ; Greek 0x1F5C, # .. 0x1F5C ; Unknown 0x1F5D, # .. 0x1F5D ; Greek 0x1F5E, # .. 0x1F5E ; Unknown 0x1F5F, # .. 0x1F7D ; Greek 0x1F7E, # .. 0x1F7F ; Unknown 0x1F80, # .. 0x1FB4 ; Greek 0x1FB5, # .. 0x1FB5 ; Unknown 0x1FB6, # .. 0x1FC4 ; Greek 0x1FC5, # .. 0x1FC5 ; Unknown 0x1FC6, # .. 0x1FD3 ; Greek 0x1FD4, # .. 0x1FD5 ; Unknown 0x1FD6, # .. 0x1FDB ; Greek 0x1FDC, # .. 0x1FDC ; Unknown 0x1FDD, # .. 0x1FEF ; Greek 0x1FF0, # .. 0x1FF1 ; Unknown 0x1FF2, # .. 0x1FF4 ; Greek 0x1FF5, # .. 0x1FF5 ; Unknown 0x1FF6, # .. 0x1FFE ; Greek 0x1FFF, # .. 0x1FFF ; Unknown 0x2000, # .. 0x200B ; Common 0x200C, # .. 0x200D ; Inherited 0x200E, # .. 0x2064 ; Common 0x2065, # .. 0x2065 ; Unknown 0x2066, # .. 0x2070 ; Common 0x2071, # .. 0x2071 ; Latin 0x2072, # .. 0x2073 ; Unknown 0x2074, # .. 0x207E ; Common 0x207F, # .. 0x207F ; Latin 0x2080, # .. 0x208E ; Common 0x208F, # .. 0x208F ; Unknown 0x2090, # .. 0x209C ; Latin 0x209D, # .. 0x209F ; Unknown 0x20A0, # .. 0x20C0 ; Common 0x20C1, # .. 0x20CF ; Unknown 0x20D0, # .. 0x20F0 ; Inherited 0x20F1, # .. 0x20FF ; Unknown 0x2100, # .. 0x2125 ; Common 0x2126, # .. 0x2126 ; Greek 0x2127, # .. 0x2129 ; Common 0x212A, # .. 0x212B ; Latin 0x212C, # .. 0x2131 ; Common 0x2132, # .. 0x2132 ; Latin 0x2133, # .. 0x214D ; Common 0x214E, # .. 0x214E ; Latin 0x214F, # .. 0x215F ; Common 0x2160, # .. 0x2188 ; Latin 0x2189, # .. 0x218B ; Common 0x218C, # .. 0x218F ; Unknown 0x2190, # .. 0x2426 ; Common 0x2427, # .. 0x243F ; Unknown 0x2440, # .. 0x244A ; Common 0x244B, # .. 0x245F ; Unknown 0x2460, # .. 0x27FF ; Common 0x2800, # .. 0x28FF ; Braille 0x2900, # .. 0x2B73 ; Common 0x2B74, # .. 0x2B75 ; Unknown 0x2B76, # .. 0x2B95 ; Common 0x2B96, # .. 0x2B96 ; Unknown 0x2B97, # .. 0x2BFF ; Common 0x2C00, # .. 0x2C5F ; Glagolitic 0x2C60, # .. 0x2C7F ; Latin 0x2C80, # .. 0x2CF3 ; Coptic 0x2CF4, # .. 0x2CF8 ; Unknown 0x2CF9, # .. 0x2CFF ; Coptic 0x2D00, # .. 0x2D25 ; Georgian 0x2D26, # .. 0x2D26 ; Unknown 0x2D27, # .. 0x2D27 ; Georgian 0x2D28, # .. 0x2D2C ; Unknown 0x2D2D, # .. 0x2D2D ; Georgian 0x2D2E, # .. 0x2D2F ; Unknown 0x2D30, # .. 0x2D67 ; Tifinagh 0x2D68, # .. 0x2D6E ; Unknown 0x2D6F, # .. 0x2D70 ; Tifinagh 0x2D71, # .. 0x2D7E ; Unknown 0x2D7F, # .. 0x2D7F ; Tifinagh 0x2D80, # .. 0x2D96 ; Ethiopic 0x2D97, # .. 0x2D9F ; Unknown 0x2DA0, # .. 0x2DA6 ; Ethiopic 0x2DA7, # .. 0x2DA7 ; Unknown 0x2DA8, # .. 0x2DAE ; Ethiopic 0x2DAF, # .. 0x2DAF ; Unknown 0x2DB0, # .. 0x2DB6 ; Ethiopic 0x2DB7, # .. 0x2DB7 ; Unknown 0x2DB8, # .. 0x2DBE ; Ethiopic 0x2DBF, # .. 0x2DBF ; Unknown 0x2DC0, # .. 0x2DC6 ; Ethiopic 0x2DC7, # .. 0x2DC7 ; Unknown 0x2DC8, # .. 0x2DCE ; Ethiopic 0x2DCF, # .. 0x2DCF ; Unknown 0x2DD0, # .. 0x2DD6 ; Ethiopic 0x2DD7, # .. 0x2DD7 ; Unknown 0x2DD8, # .. 0x2DDE ; Ethiopic 0x2DDF, # .. 0x2DDF ; Unknown 0x2DE0, # .. 0x2DFF ; Cyrillic 0x2E00, # .. 0x2E5D ; Common 0x2E5E, # .. 0x2E7F ; Unknown 0x2E80, # .. 0x2E99 ; Han 0x2E9A, # .. 0x2E9A ; Unknown 0x2E9B, # .. 0x2EF3 ; Han 0x2EF4, # .. 0x2EFF ; Unknown 0x2F00, # .. 0x2FD5 ; Han 0x2FD6, # .. 0x2FEF ; Unknown 0x2FF0, # .. 0x2FFB ; Common 0x2FFC, # .. 0x2FFF ; Unknown 0x3000, # .. 0x3004 ; Common 0x3005, # .. 0x3005 ; Han 0x3006, # .. 0x3006 ; Common 0x3007, # .. 0x3007 ; Han 0x3008, # .. 0x3020 ; Common 0x3021, # .. 0x3029 ; Han 0x302A, # .. 0x302D ; Inherited 0x302E, # .. 0x302F ; Hangul 0x3030, # .. 0x3037 ; Common 0x3038, # .. 0x303B ; Han 0x303C, # .. 0x303F ; Common 0x3040, # .. 0x3040 ; Unknown 0x3041, # .. 0x3096 ; Hiragana 0x3097, # .. 0x3098 ; Unknown 0x3099, # .. 0x309A ; Inherited 0x309B, # .. 0x309C ; Common 0x309D, # .. 0x309F ; Hiragana 0x30A0, # .. 0x30A0 ; Common 0x30A1, # .. 0x30FA ; Katakana 0x30FB, # .. 0x30FC ; Common 0x30FD, # .. 0x30FF ; Katakana 0x3100, # .. 0x3104 ; Unknown 0x3105, # .. 0x312F ; Bopomofo 0x3130, # .. 0x3130 ; Unknown 0x3131, # .. 0x318E ; Hangul 0x318F, # .. 0x318F ; Unknown 0x3190, # .. 0x319F ; Common 0x31A0, # .. 0x31BF ; Bopomofo 0x31C0, # .. 0x31E3 ; Common 0x31E4, # .. 0x31EF ; Unknown 0x31F0, # .. 0x31FF ; Katakana 0x3200, # .. 0x321E ; Hangul 0x321F, # .. 0x321F ; Unknown 0x3220, # .. 0x325F ; Common 0x3260, # .. 0x327E ; Hangul 0x327F, # .. 0x32CF ; Common 0x32D0, # .. 0x32FE ; Katakana 0x32FF, # .. 0x32FF ; Common 0x3300, # .. 0x3357 ; Katakana 0x3358, # .. 0x33FF ; Common 0x3400, # .. 0x4DBF ; Han 0x4DC0, # .. 0x4DFF ; Common 0x4E00, # .. 0x9FFF ; Han 0xA000, # .. 0xA48C ; Yi 0xA48D, # .. 0xA48F ; Unknown 0xA490, # .. 0xA4C6 ; Yi 0xA4C7, # .. 0xA4CF ; Unknown 0xA4D0, # .. 0xA4FF ; Lisu 0xA500, # .. 0xA62B ; Vai 0xA62C, # .. 0xA63F ; Unknown 0xA640, # .. 0xA69F ; Cyrillic 0xA6A0, # .. 0xA6F7 ; Bamum 0xA6F8, # .. 0xA6FF ; Unknown 0xA700, # .. 0xA721 ; Common 0xA722, # .. 0xA787 ; Latin 0xA788, # .. 0xA78A ; Common 0xA78B, # .. 0xA7CA ; Latin 0xA7CB, # .. 0xA7CF ; Unknown 0xA7D0, # .. 0xA7D1 ; Latin 0xA7D2, # .. 0xA7D2 ; Unknown 0xA7D3, # .. 0xA7D3 ; Latin 0xA7D4, # .. 0xA7D4 ; Unknown 0xA7D5, # .. 0xA7D9 ; Latin 0xA7DA, # .. 0xA7F1 ; Unknown 0xA7F2, # .. 0xA7FF ; Latin 0xA800, # .. 0xA82C ; Syloti_Nagri 0xA82D, # .. 0xA82F ; Unknown 0xA830, # .. 0xA839 ; Common 0xA83A, # .. 0xA83F ; Unknown 0xA840, # .. 0xA877 ; Phags_Pa 0xA878, # .. 0xA87F ; Unknown 0xA880, # .. 0xA8C5 ; Saurashtra 0xA8C6, # .. 0xA8CD ; Unknown 0xA8CE, # .. 0xA8D9 ; Saurashtra 0xA8DA, # .. 0xA8DF ; Unknown 0xA8E0, # .. 0xA8FF ; Devanagari 0xA900, # .. 0xA92D ; Kayah_Li 0xA92E, # .. 0xA92E ; Common 0xA92F, # .. 0xA92F ; Kayah_Li 0xA930, # .. 0xA953 ; Rejang 0xA954, # .. 0xA95E ; Unknown 0xA95F, # .. 0xA95F ; Rejang 0xA960, # .. 0xA97C ; Hangul 0xA97D, # .. 0xA97F ; Unknown 0xA980, # .. 0xA9CD ; Javanese 0xA9CE, # .. 0xA9CE ; Unknown 0xA9CF, # .. 0xA9CF ; Common 0xA9D0, # .. 0xA9D9 ; Javanese 0xA9DA, # .. 0xA9DD ; Unknown 0xA9DE, # .. 0xA9DF ; Javanese 0xA9E0, # .. 0xA9FE ; Myanmar 0xA9FF, # .. 0xA9FF ; Unknown 0xAA00, # .. 0xAA36 ; Cham 0xAA37, # .. 0xAA3F ; Unknown 0xAA40, # .. 0xAA4D ; Cham 0xAA4E, # .. 0xAA4F ; Unknown 0xAA50, # .. 0xAA59 ; Cham 0xAA5A, # .. 0xAA5B ; Unknown 0xAA5C, # .. 0xAA5F ; Cham 0xAA60, # .. 0xAA7F ; Myanmar 0xAA80, # .. 0xAAC2 ; Tai_Viet 0xAAC3, # .. 0xAADA ; Unknown 0xAADB, # .. 0xAADF ; Tai_Viet 0xAAE0, # .. 0xAAF6 ; Meetei_Mayek 0xAAF7, # .. 0xAB00 ; Unknown 0xAB01, # .. 0xAB06 ; Ethiopic 0xAB07, # .. 0xAB08 ; Unknown 0xAB09, # .. 0xAB0E ; Ethiopic 0xAB0F, # .. 0xAB10 ; Unknown 0xAB11, # .. 0xAB16 ; Ethiopic 0xAB17, # .. 0xAB1F ; Unknown 0xAB20, # .. 0xAB26 ; Ethiopic 0xAB27, # .. 0xAB27 ; Unknown 0xAB28, # .. 0xAB2E ; Ethiopic 0xAB2F, # .. 0xAB2F ; Unknown 0xAB30, # .. 0xAB5A ; Latin 0xAB5B, # .. 0xAB5B ; Common 0xAB5C, # .. 0xAB64 ; Latin 0xAB65, # .. 0xAB65 ; Greek 0xAB66, # .. 0xAB69 ; Latin 0xAB6A, # .. 0xAB6B ; Common 0xAB6C, # .. 0xAB6F ; Unknown 0xAB70, # .. 0xABBF ; Cherokee 0xABC0, # .. 0xABED ; Meetei_Mayek 0xABEE, # .. 0xABEF ; Unknown 0xABF0, # .. 0xABF9 ; Meetei_Mayek 0xABFA, # .. 0xABFF ; Unknown 0xAC00, # .. 0xD7A3 ; Hangul 0xD7A4, # .. 0xD7AF ; Unknown 0xD7B0, # .. 0xD7C6 ; Hangul 0xD7C7, # .. 0xD7CA ; Unknown 0xD7CB, # .. 0xD7FB ; Hangul 0xD7FC, # .. 0xF8FF ; Unknown 0xF900, # .. 0xFA6D ; Han 0xFA6E, # .. 0xFA6F ; Unknown 0xFA70, # .. 0xFAD9 ; Han 0xFADA, # .. 0xFAFF ; Unknown 0xFB00, # .. 0xFB06 ; Latin 0xFB07, # .. 0xFB12 ; Unknown 0xFB13, # .. 0xFB17 ; Armenian 0xFB18, # .. 0xFB1C ; Unknown 0xFB1D, # .. 0xFB36 ; Hebrew 0xFB37, # .. 0xFB37 ; Unknown 0xFB38, # .. 0xFB3C ; Hebrew 0xFB3D, # .. 0xFB3D ; Unknown 0xFB3E, # .. 0xFB3E ; Hebrew 0xFB3F, # .. 0xFB3F ; Unknown 0xFB40, # .. 0xFB41 ; Hebrew 0xFB42, # .. 0xFB42 ; Unknown 0xFB43, # .. 0xFB44 ; Hebrew 0xFB45, # .. 0xFB45 ; Unknown 0xFB46, # .. 0xFB4F ; Hebrew 0xFB50, # .. 0xFBC2 ; Arabic 0xFBC3, # .. 0xFBD2 ; Unknown 0xFBD3, # .. 0xFD3D ; Arabic 0xFD3E, # .. 0xFD3F ; Common 0xFD40, # .. 0xFD8F ; Arabic 0xFD90, # .. 0xFD91 ; Unknown 0xFD92, # .. 0xFDC7 ; Arabic 0xFDC8, # .. 0xFDCE ; Unknown 0xFDCF, # .. 0xFDCF ; Arabic 0xFDD0, # .. 0xFDEF ; Unknown 0xFDF0, # .. 0xFDFF ; Arabic 0xFE00, # .. 0xFE0F ; Inherited 0xFE10, # .. 0xFE19 ; Common 0xFE1A, # .. 0xFE1F ; Unknown 0xFE20, # .. 0xFE2D ; Inherited 0xFE2E, # .. 0xFE2F ; Cyrillic 0xFE30, # .. 0xFE52 ; Common 0xFE53, # .. 0xFE53 ; Unknown 0xFE54, # .. 0xFE66 ; Common 0xFE67, # .. 0xFE67 ; Unknown 0xFE68, # .. 0xFE6B ; Common 0xFE6C, # .. 0xFE6F ; Unknown 0xFE70, # .. 0xFE74 ; Arabic 0xFE75, # .. 0xFE75 ; Unknown 0xFE76, # .. 0xFEFC ; Arabic 0xFEFD, # .. 0xFEFE ; Unknown 0xFEFF, # .. 0xFEFF ; Common 0xFF00, # .. 0xFF00 ; Unknown 0xFF01, # .. 0xFF20 ; Common 0xFF21, # .. 0xFF3A ; Latin 0xFF3B, # .. 0xFF40 ; Common 0xFF41, # .. 0xFF5A ; Latin 0xFF5B, # .. 0xFF65 ; Common 0xFF66, # .. 0xFF6F ; Katakana 0xFF70, # .. 0xFF70 ; Common 0xFF71, # .. 0xFF9D ; Katakana 0xFF9E, # .. 0xFF9F ; Common 0xFFA0, # .. 0xFFBE ; Hangul 0xFFBF, # .. 0xFFC1 ; Unknown 0xFFC2, # .. 0xFFC7 ; Hangul 0xFFC8, # .. 0xFFC9 ; Unknown 0xFFCA, # .. 0xFFCF ; Hangul 0xFFD0, # .. 0xFFD1 ; Unknown 0xFFD2, # .. 0xFFD7 ; Hangul 0xFFD8, # .. 0xFFD9 ; Unknown 0xFFDA, # .. 0xFFDC ; Hangul 0xFFDD, # .. 0xFFDF ; Unknown 0xFFE0, # .. 0xFFE6 ; Common 0xFFE7, # .. 0xFFE7 ; Unknown 0xFFE8, # .. 0xFFEE ; Common 0xFFEF, # .. 0xFFF8 ; Unknown 0xFFF9, # .. 0xFFFD ; Common 0xFFFE, # .. 0xFFFF ; Unknown 0x10000, # .. 0x1000B ; Linear_B 0x1000C, # .. 0x1000C ; Unknown 0x1000D, # .. 0x10026 ; Linear_B 0x10027, # .. 0x10027 ; Unknown 0x10028, # .. 0x1003A ; Linear_B 0x1003B, # .. 0x1003B ; Unknown 0x1003C, # .. 0x1003D ; Linear_B 0x1003E, # .. 0x1003E ; Unknown 0x1003F, # .. 0x1004D ; Linear_B 0x1004E, # .. 0x1004F ; Unknown 0x10050, # .. 0x1005D ; Linear_B 0x1005E, # .. 0x1007F ; Unknown 0x10080, # .. 0x100FA ; Linear_B 0x100FB, # .. 0x100FF ; Unknown 0x10100, # .. 0x10102 ; Common 0x10103, # .. 0x10106 ; Unknown 0x10107, # .. 0x10133 ; Common 0x10134, # .. 0x10136 ; Unknown 0x10137, # .. 0x1013F ; Common 0x10140, # .. 0x1018E ; Greek 0x1018F, # .. 0x1018F ; Unknown 0x10190, # .. 0x1019C ; Common 0x1019D, # .. 0x1019F ; Unknown 0x101A0, # .. 0x101A0 ; Greek 0x101A1, # .. 0x101CF ; Unknown 0x101D0, # .. 0x101FC ; Common 0x101FD, # .. 0x101FD ; Inherited 0x101FE, # .. 0x1027F ; Unknown 0x10280, # .. 0x1029C ; Lycian 0x1029D, # .. 0x1029F ; Unknown 0x102A0, # .. 0x102D0 ; Carian 0x102D1, # .. 0x102DF ; Unknown 0x102E0, # .. 0x102E0 ; Inherited 0x102E1, # .. 0x102FB ; Common 0x102FC, # .. 0x102FF ; Unknown 0x10300, # .. 0x10323 ; Old_Italic 0x10324, # .. 0x1032C ; Unknown 0x1032D, # .. 0x1032F ; Old_Italic 0x10330, # .. 0x1034A ; Gothic 0x1034B, # .. 0x1034F ; Unknown 0x10350, # .. 0x1037A ; Old_Permic 0x1037B, # .. 0x1037F ; Unknown 0x10380, # .. 0x1039D ; Ugaritic 0x1039E, # .. 0x1039E ; Unknown 0x1039F, # .. 0x1039F ; Ugaritic 0x103A0, # .. 0x103C3 ; Old_Persian 0x103C4, # .. 0x103C7 ; Unknown 0x103C8, # .. 0x103D5 ; Old_Persian 0x103D6, # .. 0x103FF ; Unknown 0x10400, # .. 0x1044F ; Deseret 0x10450, # .. 0x1047F ; Shavian 0x10480, # .. 0x1049D ; Osmanya 0x1049E, # .. 0x1049F ; Unknown 0x104A0, # .. 0x104A9 ; Osmanya 0x104AA, # .. 0x104AF ; Unknown 0x104B0, # .. 0x104D3 ; Osage 0x104D4, # .. 0x104D7 ; Unknown 0x104D8, # .. 0x104FB ; Osage 0x104FC, # .. 0x104FF ; Unknown 0x10500, # .. 0x10527 ; Elbasan 0x10528, # .. 0x1052F ; Unknown 0x10530, # .. 0x10563 ; Caucasian_Albanian 0x10564, # .. 0x1056E ; Unknown 0x1056F, # .. 0x1056F ; Caucasian_Albanian 0x10570, # .. 0x1057A ; Vithkuqi 0x1057B, # .. 0x1057B ; Unknown 0x1057C, # .. 0x1058A ; Vithkuqi 0x1058B, # .. 0x1058B ; Unknown 0x1058C, # .. 0x10592 ; Vithkuqi 0x10593, # .. 0x10593 ; Unknown 0x10594, # .. 0x10595 ; Vithkuqi 0x10596, # .. 0x10596 ; Unknown 0x10597, # .. 0x105A1 ; Vithkuqi 0x105A2, # .. 0x105A2 ; Unknown 0x105A3, # .. 0x105B1 ; Vithkuqi 0x105B2, # .. 0x105B2 ; Unknown 0x105B3, # .. 0x105B9 ; Vithkuqi 0x105BA, # .. 0x105BA ; Unknown 0x105BB, # .. 0x105BC ; Vithkuqi 0x105BD, # .. 0x105FF ; Unknown 0x10600, # .. 0x10736 ; Linear_A 0x10737, # .. 0x1073F ; Unknown 0x10740, # .. 0x10755 ; Linear_A 0x10756, # .. 0x1075F ; Unknown 0x10760, # .. 0x10767 ; Linear_A 0x10768, # .. 0x1077F ; Unknown 0x10780, # .. 0x10785 ; Latin 0x10786, # .. 0x10786 ; Unknown 0x10787, # .. 0x107B0 ; Latin 0x107B1, # .. 0x107B1 ; Unknown 0x107B2, # .. 0x107BA ; Latin 0x107BB, # .. 0x107FF ; Unknown 0x10800, # .. 0x10805 ; Cypriot 0x10806, # .. 0x10807 ; Unknown 0x10808, # .. 0x10808 ; Cypriot 0x10809, # .. 0x10809 ; Unknown 0x1080A, # .. 0x10835 ; Cypriot 0x10836, # .. 0x10836 ; Unknown 0x10837, # .. 0x10838 ; Cypriot 0x10839, # .. 0x1083B ; Unknown 0x1083C, # .. 0x1083C ; Cypriot 0x1083D, # .. 0x1083E ; Unknown 0x1083F, # .. 0x1083F ; Cypriot 0x10840, # .. 0x10855 ; Imperial_Aramaic 0x10856, # .. 0x10856 ; Unknown 0x10857, # .. 0x1085F ; Imperial_Aramaic 0x10860, # .. 0x1087F ; Palmyrene 0x10880, # .. 0x1089E ; Nabataean 0x1089F, # .. 0x108A6 ; Unknown 0x108A7, # .. 0x108AF ; Nabataean 0x108B0, # .. 0x108DF ; Unknown 0x108E0, # .. 0x108F2 ; Hatran 0x108F3, # .. 0x108F3 ; Unknown 0x108F4, # .. 0x108F5 ; Hatran 0x108F6, # .. 0x108FA ; Unknown 0x108FB, # .. 0x108FF ; Hatran 0x10900, # .. 0x1091B ; Phoenician 0x1091C, # .. 0x1091E ; Unknown 0x1091F, # .. 0x1091F ; Phoenician 0x10920, # .. 0x10939 ; Lydian 0x1093A, # .. 0x1093E ; Unknown 0x1093F, # .. 0x1093F ; Lydian 0x10940, # .. 0x1097F ; Unknown 0x10980, # .. 0x1099F ; Meroitic_Hieroglyphs 0x109A0, # .. 0x109B7 ; Meroitic_Cursive 0x109B8, # .. 0x109BB ; Unknown 0x109BC, # .. 0x109CF ; Meroitic_Cursive 0x109D0, # .. 0x109D1 ; Unknown 0x109D2, # .. 0x109FF ; Meroitic_Cursive 0x10A00, # .. 0x10A03 ; Kharoshthi 0x10A04, # .. 0x10A04 ; Unknown 0x10A05, # .. 0x10A06 ; Kharoshthi 0x10A07, # .. 0x10A0B ; Unknown 0x10A0C, # .. 0x10A13 ; Kharoshthi 0x10A14, # .. 0x10A14 ; Unknown 0x10A15, # .. 0x10A17 ; Kharoshthi 0x10A18, # .. 0x10A18 ; Unknown 0x10A19, # .. 0x10A35 ; Kharoshthi 0x10A36, # .. 0x10A37 ; Unknown 0x10A38, # .. 0x10A3A ; Kharoshthi 0x10A3B, # .. 0x10A3E ; Unknown 0x10A3F, # .. 0x10A48 ; Kharoshthi 0x10A49, # .. 0x10A4F ; Unknown 0x10A50, # .. 0x10A58 ; Kharoshthi 0x10A59, # .. 0x10A5F ; Unknown 0x10A60, # .. 0x10A7F ; Old_South_Arabian 0x10A80, # .. 0x10A9F ; Old_North_Arabian 0x10AA0, # .. 0x10ABF ; Unknown 0x10AC0, # .. 0x10AE6 ; Manichaean 0x10AE7, # .. 0x10AEA ; Unknown 0x10AEB, # .. 0x10AF6 ; Manichaean 0x10AF7, # .. 0x10AFF ; Unknown 0x10B00, # .. 0x10B35 ; Avestan 0x10B36, # .. 0x10B38 ; Unknown 0x10B39, # .. 0x10B3F ; Avestan 0x10B40, # .. 0x10B55 ; Inscriptional_Parthian 0x10B56, # .. 0x10B57 ; Unknown 0x10B58, # .. 0x10B5F ; Inscriptional_Parthian 0x10B60, # .. 0x10B72 ; Inscriptional_Pahlavi 0x10B73, # .. 0x10B77 ; Unknown 0x10B78, # .. 0x10B7F ; Inscriptional_Pahlavi 0x10B80, # .. 0x10B91 ; Psalter_Pahlavi 0x10B92, # .. 0x10B98 ; Unknown 0x10B99, # .. 0x10B9C ; Psalter_Pahlavi 0x10B9D, # .. 0x10BA8 ; Unknown 0x10BA9, # .. 0x10BAF ; Psalter_Pahlavi 0x10BB0, # .. 0x10BFF ; Unknown 0x10C00, # .. 0x10C48 ; Old_Turkic 0x10C49, # .. 0x10C7F ; Unknown 0x10C80, # .. 0x10CB2 ; Old_Hungarian 0x10CB3, # .. 0x10CBF ; Unknown 0x10CC0, # .. 0x10CF2 ; Old_Hungarian 0x10CF3, # .. 0x10CF9 ; Unknown 0x10CFA, # .. 0x10CFF ; Old_Hungarian 0x10D00, # .. 0x10D27 ; Hanifi_Rohingya 0x10D28, # .. 0x10D2F ; Unknown 0x10D30, # .. 0x10D39 ; Hanifi_Rohingya 0x10D3A, # .. 0x10E5F ; Unknown 0x10E60, # .. 0x10E7E ; Arabic 0x10E7F, # .. 0x10E7F ; Unknown 0x10E80, # .. 0x10EA9 ; Yezidi 0x10EAA, # .. 0x10EAA ; Unknown 0x10EAB, # .. 0x10EAD ; Yezidi 0x10EAE, # .. 0x10EAF ; Unknown 0x10EB0, # .. 0x10EB1 ; Yezidi 0x10EB2, # .. 0x10EFC ; Unknown 0x10EFD, # .. 0x10EFF ; Arabic 0x10F00, # .. 0x10F27 ; Old_Sogdian 0x10F28, # .. 0x10F2F ; Unknown 0x10F30, # .. 0x10F59 ; Sogdian 0x10F5A, # .. 0x10F6F ; Unknown 0x10F70, # .. 0x10F89 ; Old_Uyghur 0x10F8A, # .. 0x10FAF ; Unknown 0x10FB0, # .. 0x10FCB ; Chorasmian 0x10FCC, # .. 0x10FDF ; Unknown 0x10FE0, # .. 0x10FF6 ; Elymaic 0x10FF7, # .. 0x10FFF ; Unknown 0x11000, # .. 0x1104D ; Brahmi 0x1104E, # .. 0x11051 ; Unknown 0x11052, # .. 0x11075 ; Brahmi 0x11076, # .. 0x1107E ; Unknown 0x1107F, # .. 0x1107F ; Brahmi 0x11080, # .. 0x110C2 ; Kaithi 0x110C3, # .. 0x110CC ; Unknown 0x110CD, # .. 0x110CD ; Kaithi 0x110CE, # .. 0x110CF ; Unknown 0x110D0, # .. 0x110E8 ; Sora_Sompeng 0x110E9, # .. 0x110EF ; Unknown 0x110F0, # .. 0x110F9 ; Sora_Sompeng 0x110FA, # .. 0x110FF ; Unknown 0x11100, # .. 0x11134 ; Chakma 0x11135, # .. 0x11135 ; Unknown 0x11136, # .. 0x11147 ; Chakma 0x11148, # .. 0x1114F ; Unknown 0x11150, # .. 0x11176 ; Mahajani 0x11177, # .. 0x1117F ; Unknown 0x11180, # .. 0x111DF ; Sharada 0x111E0, # .. 0x111E0 ; Unknown 0x111E1, # .. 0x111F4 ; Sinhala 0x111F5, # .. 0x111FF ; Unknown 0x11200, # .. 0x11211 ; Khojki 0x11212, # .. 0x11212 ; Unknown 0x11213, # .. 0x11241 ; Khojki 0x11242, # .. 0x1127F ; Unknown 0x11280, # .. 0x11286 ; Multani 0x11287, # .. 0x11287 ; Unknown 0x11288, # .. 0x11288 ; Multani 0x11289, # .. 0x11289 ; Unknown 0x1128A, # .. 0x1128D ; Multani 0x1128E, # .. 0x1128E ; Unknown 0x1128F, # .. 0x1129D ; Multani 0x1129E, # .. 0x1129E ; Unknown 0x1129F, # .. 0x112A9 ; Multani 0x112AA, # .. 0x112AF ; Unknown 0x112B0, # .. 0x112EA ; Khudawadi 0x112EB, # .. 0x112EF ; Unknown 0x112F0, # .. 0x112F9 ; Khudawadi 0x112FA, # .. 0x112FF ; Unknown 0x11300, # .. 0x11303 ; Grantha 0x11304, # .. 0x11304 ; Unknown 0x11305, # .. 0x1130C ; Grantha 0x1130D, # .. 0x1130E ; Unknown 0x1130F, # .. 0x11310 ; Grantha 0x11311, # .. 0x11312 ; Unknown 0x11313, # .. 0x11328 ; Grantha 0x11329, # .. 0x11329 ; Unknown 0x1132A, # .. 0x11330 ; Grantha 0x11331, # .. 0x11331 ; Unknown 0x11332, # .. 0x11333 ; Grantha 0x11334, # .. 0x11334 ; Unknown 0x11335, # .. 0x11339 ; Grantha 0x1133A, # .. 0x1133A ; Unknown 0x1133B, # .. 0x1133B ; Inherited 0x1133C, # .. 0x11344 ; Grantha 0x11345, # .. 0x11346 ; Unknown 0x11347, # .. 0x11348 ; Grantha 0x11349, # .. 0x1134A ; Unknown 0x1134B, # .. 0x1134D ; Grantha 0x1134E, # .. 0x1134F ; Unknown 0x11350, # .. 0x11350 ; Grantha 0x11351, # .. 0x11356 ; Unknown 0x11357, # .. 0x11357 ; Grantha 0x11358, # .. 0x1135C ; Unknown 0x1135D, # .. 0x11363 ; Grantha 0x11364, # .. 0x11365 ; Unknown 0x11366, # .. 0x1136C ; Grantha 0x1136D, # .. 0x1136F ; Unknown 0x11370, # .. 0x11374 ; Grantha 0x11375, # .. 0x113FF ; Unknown 0x11400, # .. 0x1145B ; Newa 0x1145C, # .. 0x1145C ; Unknown 0x1145D, # .. 0x11461 ; Newa 0x11462, # .. 0x1147F ; Unknown 0x11480, # .. 0x114C7 ; Tirhuta 0x114C8, # .. 0x114CF ; Unknown 0x114D0, # .. 0x114D9 ; Tirhuta 0x114DA, # .. 0x1157F ; Unknown 0x11580, # .. 0x115B5 ; Siddham 0x115B6, # .. 0x115B7 ; Unknown 0x115B8, # .. 0x115DD ; Siddham 0x115DE, # .. 0x115FF ; Unknown 0x11600, # .. 0x11644 ; Modi 0x11645, # .. 0x1164F ; Unknown 0x11650, # .. 0x11659 ; Modi 0x1165A, # .. 0x1165F ; Unknown 0x11660, # .. 0x1166C ; Mongolian 0x1166D, # .. 0x1167F ; Unknown 0x11680, # .. 0x116B9 ; Takri 0x116BA, # .. 0x116BF ; Unknown 0x116C0, # .. 0x116C9 ; Takri 0x116CA, # .. 0x116FF ; Unknown 0x11700, # .. 0x1171A ; Ahom 0x1171B, # .. 0x1171C ; Unknown 0x1171D, # .. 0x1172B ; Ahom 0x1172C, # .. 0x1172F ; Unknown 0x11730, # .. 0x11746 ; Ahom 0x11747, # .. 0x117FF ; Unknown 0x11800, # .. 0x1183B ; Dogra 0x1183C, # .. 0x1189F ; Unknown 0x118A0, # .. 0x118F2 ; Warang_Citi 0x118F3, # .. 0x118FE ; Unknown 0x118FF, # .. 0x118FF ; Warang_Citi 0x11900, # .. 0x11906 ; Dives_Akuru 0x11907, # .. 0x11908 ; Unknown 0x11909, # .. 0x11909 ; Dives_Akuru 0x1190A, # .. 0x1190B ; Unknown 0x1190C, # .. 0x11913 ; Dives_Akuru 0x11914, # .. 0x11914 ; Unknown 0x11915, # .. 0x11916 ; Dives_Akuru 0x11917, # .. 0x11917 ; Unknown 0x11918, # .. 0x11935 ; Dives_Akuru 0x11936, # .. 0x11936 ; Unknown 0x11937, # .. 0x11938 ; Dives_Akuru 0x11939, # .. 0x1193A ; Unknown 0x1193B, # .. 0x11946 ; Dives_Akuru 0x11947, # .. 0x1194F ; Unknown 0x11950, # .. 0x11959 ; Dives_Akuru 0x1195A, # .. 0x1199F ; Unknown 0x119A0, # .. 0x119A7 ; Nandinagari 0x119A8, # .. 0x119A9 ; Unknown 0x119AA, # .. 0x119D7 ; Nandinagari 0x119D8, # .. 0x119D9 ; Unknown 0x119DA, # .. 0x119E4 ; Nandinagari 0x119E5, # .. 0x119FF ; Unknown 0x11A00, # .. 0x11A47 ; Zanabazar_Square 0x11A48, # .. 0x11A4F ; Unknown 0x11A50, # .. 0x11AA2 ; Soyombo 0x11AA3, # .. 0x11AAF ; Unknown 0x11AB0, # .. 0x11ABF ; Canadian_Aboriginal 0x11AC0, # .. 0x11AF8 ; Pau_Cin_Hau 0x11AF9, # .. 0x11AFF ; Unknown 0x11B00, # .. 0x11B09 ; Devanagari 0x11B0A, # .. 0x11BFF ; Unknown 0x11C00, # .. 0x11C08 ; Bhaiksuki 0x11C09, # .. 0x11C09 ; Unknown 0x11C0A, # .. 0x11C36 ; Bhaiksuki 0x11C37, # .. 0x11C37 ; Unknown 0x11C38, # .. 0x11C45 ; Bhaiksuki 0x11C46, # .. 0x11C4F ; Unknown 0x11C50, # .. 0x11C6C ; Bhaiksuki 0x11C6D, # .. 0x11C6F ; Unknown 0x11C70, # .. 0x11C8F ; Marchen 0x11C90, # .. 0x11C91 ; Unknown 0x11C92, # .. 0x11CA7 ; Marchen 0x11CA8, # .. 0x11CA8 ; Unknown 0x11CA9, # .. 0x11CB6 ; Marchen 0x11CB7, # .. 0x11CFF ; Unknown 0x11D00, # .. 0x11D06 ; Masaram_Gondi 0x11D07, # .. 0x11D07 ; Unknown 0x11D08, # .. 0x11D09 ; Masaram_Gondi 0x11D0A, # .. 0x11D0A ; Unknown 0x11D0B, # .. 0x11D36 ; Masaram_Gondi 0x11D37, # .. 0x11D39 ; Unknown 0x11D3A, # .. 0x11D3A ; Masaram_Gondi 0x11D3B, # .. 0x11D3B ; Unknown 0x11D3C, # .. 0x11D3D ; Masaram_Gondi 0x11D3E, # .. 0x11D3E ; Unknown 0x11D3F, # .. 0x11D47 ; Masaram_Gondi 0x11D48, # .. 0x11D4F ; Unknown 0x11D50, # .. 0x11D59 ; Masaram_Gondi 0x11D5A, # .. 0x11D5F ; Unknown 0x11D60, # .. 0x11D65 ; Gunjala_Gondi 0x11D66, # .. 0x11D66 ; Unknown 0x11D67, # .. 0x11D68 ; Gunjala_Gondi 0x11D69, # .. 0x11D69 ; Unknown 0x11D6A, # .. 0x11D8E ; Gunjala_Gondi 0x11D8F, # .. 0x11D8F ; Unknown 0x11D90, # .. 0x11D91 ; Gunjala_Gondi 0x11D92, # .. 0x11D92 ; Unknown 0x11D93, # .. 0x11D98 ; Gunjala_Gondi 0x11D99, # .. 0x11D9F ; Unknown 0x11DA0, # .. 0x11DA9 ; Gunjala_Gondi 0x11DAA, # .. 0x11EDF ; Unknown 0x11EE0, # .. 0x11EF8 ; Makasar 0x11EF9, # .. 0x11EFF ; Unknown 0x11F00, # .. 0x11F10 ; Kawi 0x11F11, # .. 0x11F11 ; Unknown 0x11F12, # .. 0x11F3A ; Kawi 0x11F3B, # .. 0x11F3D ; Unknown 0x11F3E, # .. 0x11F59 ; Kawi 0x11F5A, # .. 0x11FAF ; Unknown 0x11FB0, # .. 0x11FB0 ; Lisu 0x11FB1, # .. 0x11FBF ; Unknown 0x11FC0, # .. 0x11FF1 ; Tamil 0x11FF2, # .. 0x11FFE ; Unknown 0x11FFF, # .. 0x11FFF ; Tamil 0x12000, # .. 0x12399 ; Cuneiform 0x1239A, # .. 0x123FF ; Unknown 0x12400, # .. 0x1246E ; Cuneiform 0x1246F, # .. 0x1246F ; Unknown 0x12470, # .. 0x12474 ; Cuneiform 0x12475, # .. 0x1247F ; Unknown 0x12480, # .. 0x12543 ; Cuneiform 0x12544, # .. 0x12F8F ; Unknown 0x12F90, # .. 0x12FF2 ; Cypro_Minoan 0x12FF3, # .. 0x12FFF ; Unknown 0x13000, # .. 0x13455 ; Egyptian_Hieroglyphs 0x13456, # .. 0x143FF ; Unknown 0x14400, # .. 0x14646 ; Anatolian_Hieroglyphs 0x14647, # .. 0x167FF ; Unknown 0x16800, # .. 0x16A38 ; Bamum 0x16A39, # .. 0x16A3F ; Unknown 0x16A40, # .. 0x16A5E ; Mro 0x16A5F, # .. 0x16A5F ; Unknown 0x16A60, # .. 0x16A69 ; Mro 0x16A6A, # .. 0x16A6D ; Unknown 0x16A6E, # .. 0x16A6F ; Mro 0x16A70, # .. 0x16ABE ; Tangsa 0x16ABF, # .. 0x16ABF ; Unknown 0x16AC0, # .. 0x16AC9 ; Tangsa 0x16ACA, # .. 0x16ACF ; Unknown 0x16AD0, # .. 0x16AED ; Bassa_Vah 0x16AEE, # .. 0x16AEF ; Unknown 0x16AF0, # .. 0x16AF5 ; Bassa_Vah 0x16AF6, # .. 0x16AFF ; Unknown 0x16B00, # .. 0x16B45 ; Pahawh_Hmong 0x16B46, # .. 0x16B4F ; Unknown 0x16B50, # .. 0x16B59 ; Pahawh_Hmong 0x16B5A, # .. 0x16B5A ; Unknown 0x16B5B, # .. 0x16B61 ; Pahawh_Hmong 0x16B62, # .. 0x16B62 ; Unknown 0x16B63, # .. 0x16B77 ; Pahawh_Hmong 0x16B78, # .. 0x16B7C ; Unknown 0x16B7D, # .. 0x16B8F ; Pahawh_Hmong 0x16B90, # .. 0x16E3F ; Unknown 0x16E40, # .. 0x16E9A ; Medefaidrin 0x16E9B, # .. 0x16EFF ; Unknown 0x16F00, # .. 0x16F4A ; Miao 0x16F4B, # .. 0x16F4E ; Unknown 0x16F4F, # .. 0x16F87 ; Miao 0x16F88, # .. 0x16F8E ; Unknown 0x16F8F, # .. 0x16F9F ; Miao 0x16FA0, # .. 0x16FDF ; Unknown 0x16FE0, # .. 0x16FE0 ; Tangut 0x16FE1, # .. 0x16FE1 ; Nushu 0x16FE2, # .. 0x16FE3 ; Han 0x16FE4, # .. 0x16FE4 ; Khitan_Small_Script 0x16FE5, # .. 0x16FEF ; Unknown 0x16FF0, # .. 0x16FF1 ; Han 0x16FF2, # .. 0x16FFF ; Unknown 0x17000, # .. 0x187F7 ; Tangut 0x187F8, # .. 0x187FF ; Unknown 0x18800, # .. 0x18AFF ; Tangut 0x18B00, # .. 0x18CD5 ; Khitan_Small_Script 0x18CD6, # .. 0x18CFF ; Unknown 0x18D00, # .. 0x18D08 ; Tangut 0x18D09, # .. 0x1AFEF ; Unknown 0x1AFF0, # .. 0x1AFF3 ; Katakana 0x1AFF4, # .. 0x1AFF4 ; Unknown 0x1AFF5, # .. 0x1AFFB ; Katakana 0x1AFFC, # .. 0x1AFFC ; Unknown 0x1AFFD, # .. 0x1AFFE ; Katakana 0x1AFFF, # .. 0x1AFFF ; Unknown 0x1B000, # .. 0x1B000 ; Katakana 0x1B001, # .. 0x1B11F ; Hiragana 0x1B120, # .. 0x1B122 ; Katakana 0x1B123, # .. 0x1B131 ; Unknown 0x1B132, # .. 0x1B132 ; Hiragana 0x1B133, # .. 0x1B14F ; Unknown 0x1B150, # .. 0x1B152 ; Hiragana 0x1B153, # .. 0x1B154 ; Unknown 0x1B155, # .. 0x1B155 ; Katakana 0x1B156, # .. 0x1B163 ; Unknown 0x1B164, # .. 0x1B167 ; Katakana 0x1B168, # .. 0x1B16F ; Unknown 0x1B170, # .. 0x1B2FB ; Nushu 0x1B2FC, # .. 0x1BBFF ; Unknown 0x1BC00, # .. 0x1BC6A ; Duployan 0x1BC6B, # .. 0x1BC6F ; Unknown 0x1BC70, # .. 0x1BC7C ; Duployan 0x1BC7D, # .. 0x1BC7F ; Unknown 0x1BC80, # .. 0x1BC88 ; Duployan 0x1BC89, # .. 0x1BC8F ; Unknown 0x1BC90, # .. 0x1BC99 ; Duployan 0x1BC9A, # .. 0x1BC9B ; Unknown 0x1BC9C, # .. 0x1BC9F ; Duployan 0x1BCA0, # .. 0x1BCA3 ; Common 0x1BCA4, # .. 0x1CEFF ; Unknown 0x1CF00, # .. 0x1CF2D ; Inherited 0x1CF2E, # .. 0x1CF2F ; Unknown 0x1CF30, # .. 0x1CF46 ; Inherited 0x1CF47, # .. 0x1CF4F ; Unknown 0x1CF50, # .. 0x1CFC3 ; Common 0x1CFC4, # .. 0x1CFFF ; Unknown 0x1D000, # .. 0x1D0F5 ; Common 0x1D0F6, # .. 0x1D0FF ; Unknown 0x1D100, # .. 0x1D126 ; Common 0x1D127, # .. 0x1D128 ; Unknown 0x1D129, # .. 0x1D166 ; Common 0x1D167, # .. 0x1D169 ; Inherited 0x1D16A, # .. 0x1D17A ; Common 0x1D17B, # .. 0x1D182 ; Inherited 0x1D183, # .. 0x1D184 ; Common 0x1D185, # .. 0x1D18B ; Inherited 0x1D18C, # .. 0x1D1A9 ; Common 0x1D1AA, # .. 0x1D1AD ; Inherited 0x1D1AE, # .. 0x1D1EA ; Common 0x1D1EB, # .. 0x1D1FF ; Unknown 0x1D200, # .. 0x1D245 ; Greek 0x1D246, # .. 0x1D2BF ; Unknown 0x1D2C0, # .. 0x1D2D3 ; Common 0x1D2D4, # .. 0x1D2DF ; Unknown 0x1D2E0, # .. 0x1D2F3 ; Common 0x1D2F4, # .. 0x1D2FF ; Unknown 0x1D300, # .. 0x1D356 ; Common 0x1D357, # .. 0x1D35F ; Unknown 0x1D360, # .. 0x1D378 ; Common 0x1D379, # .. 0x1D3FF ; Unknown 0x1D400, # .. 0x1D454 ; Common 0x1D455, # .. 0x1D455 ; Unknown 0x1D456, # .. 0x1D49C ; Common 0x1D49D, # .. 0x1D49D ; Unknown 0x1D49E, # .. 0x1D49F ; Common 0x1D4A0, # .. 0x1D4A1 ; Unknown 0x1D4A2, # .. 0x1D4A2 ; Common 0x1D4A3, # .. 0x1D4A4 ; Unknown 0x1D4A5, # .. 0x1D4A6 ; Common 0x1D4A7, # .. 0x1D4A8 ; Unknown 0x1D4A9, # .. 0x1D4AC ; Common 0x1D4AD, # .. 0x1D4AD ; Unknown 0x1D4AE, # .. 0x1D4B9 ; Common 0x1D4BA, # .. 0x1D4BA ; Unknown 0x1D4BB, # .. 0x1D4BB ; Common 0x1D4BC, # .. 0x1D4BC ; Unknown 0x1D4BD, # .. 0x1D4C3 ; Common 0x1D4C4, # .. 0x1D4C4 ; Unknown 0x1D4C5, # .. 0x1D505 ; Common 0x1D506, # .. 0x1D506 ; Unknown 0x1D507, # .. 0x1D50A ; Common 0x1D50B, # .. 0x1D50C ; Unknown 0x1D50D, # .. 0x1D514 ; Common 0x1D515, # .. 0x1D515 ; Unknown 0x1D516, # .. 0x1D51C ; Common 0x1D51D, # .. 0x1D51D ; Unknown 0x1D51E, # .. 0x1D539 ; Common 0x1D53A, # .. 0x1D53A ; Unknown 0x1D53B, # .. 0x1D53E ; Common 0x1D53F, # .. 0x1D53F ; Unknown 0x1D540, # .. 0x1D544 ; Common 0x1D545, # .. 0x1D545 ; Unknown 0x1D546, # .. 0x1D546 ; Common 0x1D547, # .. 0x1D549 ; Unknown 0x1D54A, # .. 0x1D550 ; Common 0x1D551, # .. 0x1D551 ; Unknown 0x1D552, # .. 0x1D6A5 ; Common 0x1D6A6, # .. 0x1D6A7 ; Unknown 0x1D6A8, # .. 0x1D7CB ; Common 0x1D7CC, # .. 0x1D7CD ; Unknown 0x1D7CE, # .. 0x1D7FF ; Common 0x1D800, # .. 0x1DA8B ; SignWriting 0x1DA8C, # .. 0x1DA9A ; Unknown 0x1DA9B, # .. 0x1DA9F ; SignWriting 0x1DAA0, # .. 0x1DAA0 ; Unknown 0x1DAA1, # .. 0x1DAAF ; SignWriting 0x1DAB0, # .. 0x1DEFF ; Unknown 0x1DF00, # .. 0x1DF1E ; Latin 0x1DF1F, # .. 0x1DF24 ; Unknown 0x1DF25, # .. 0x1DF2A ; Latin 0x1DF2B, # .. 0x1DFFF ; Unknown 0x1E000, # .. 0x1E006 ; Glagolitic 0x1E007, # .. 0x1E007 ; Unknown 0x1E008, # .. 0x1E018 ; Glagolitic 0x1E019, # .. 0x1E01A ; Unknown 0x1E01B, # .. 0x1E021 ; Glagolitic 0x1E022, # .. 0x1E022 ; Unknown 0x1E023, # .. 0x1E024 ; Glagolitic 0x1E025, # .. 0x1E025 ; Unknown 0x1E026, # .. 0x1E02A ; Glagolitic 0x1E02B, # .. 0x1E02F ; Unknown 0x1E030, # .. 0x1E06D ; Cyrillic 0x1E06E, # .. 0x1E08E ; Unknown 0x1E08F, # .. 0x1E08F ; Cyrillic 0x1E090, # .. 0x1E0FF ; Unknown 0x1E100, # .. 0x1E12C ; Nyiakeng_Puachue_Hmong 0x1E12D, # .. 0x1E12F ; Unknown 0x1E130, # .. 0x1E13D ; Nyiakeng_Puachue_Hmong 0x1E13E, # .. 0x1E13F ; Unknown 0x1E140, # .. 0x1E149 ; Nyiakeng_Puachue_Hmong 0x1E14A, # .. 0x1E14D ; Unknown 0x1E14E, # .. 0x1E14F ; Nyiakeng_Puachue_Hmong 0x1E150, # .. 0x1E28F ; Unknown 0x1E290, # .. 0x1E2AE ; Toto 0x1E2AF, # .. 0x1E2BF ; Unknown 0x1E2C0, # .. 0x1E2F9 ; Wancho 0x1E2FA, # .. 0x1E2FE ; Unknown 0x1E2FF, # .. 0x1E2FF ; Wancho 0x1E300, # .. 0x1E4CF ; Unknown 0x1E4D0, # .. 0x1E4F9 ; Nag_Mundari 0x1E4FA, # .. 0x1E7DF ; Unknown 0x1E7E0, # .. 0x1E7E6 ; Ethiopic 0x1E7E7, # .. 0x1E7E7 ; Unknown 0x1E7E8, # .. 0x1E7EB ; Ethiopic 0x1E7EC, # .. 0x1E7EC ; Unknown 0x1E7ED, # .. 0x1E7EE ; Ethiopic 0x1E7EF, # .. 0x1E7EF ; Unknown 0x1E7F0, # .. 0x1E7FE ; Ethiopic 0x1E7FF, # .. 0x1E7FF ; Unknown 0x1E800, # .. 0x1E8C4 ; Mende_Kikakui 0x1E8C5, # .. 0x1E8C6 ; Unknown 0x1E8C7, # .. 0x1E8D6 ; Mende_Kikakui 0x1E8D7, # .. 0x1E8FF ; Unknown 0x1E900, # .. 0x1E94B ; Adlam 0x1E94C, # .. 0x1E94F ; Unknown 0x1E950, # .. 0x1E959 ; Adlam 0x1E95A, # .. 0x1E95D ; Unknown 0x1E95E, # .. 0x1E95F ; Adlam 0x1E960, # .. 0x1EC70 ; Unknown 0x1EC71, # .. 0x1ECB4 ; Common 0x1ECB5, # .. 0x1ED00 ; Unknown 0x1ED01, # .. 0x1ED3D ; Common 0x1ED3E, # .. 0x1EDFF ; Unknown 0x1EE00, # .. 0x1EE03 ; Arabic 0x1EE04, # .. 0x1EE04 ; Unknown 0x1EE05, # .. 0x1EE1F ; Arabic 0x1EE20, # .. 0x1EE20 ; Unknown 0x1EE21, # .. 0x1EE22 ; Arabic 0x1EE23, # .. 0x1EE23 ; Unknown 0x1EE24, # .. 0x1EE24 ; Arabic 0x1EE25, # .. 0x1EE26 ; Unknown 0x1EE27, # .. 0x1EE27 ; Arabic 0x1EE28, # .. 0x1EE28 ; Unknown 0x1EE29, # .. 0x1EE32 ; Arabic 0x1EE33, # .. 0x1EE33 ; Unknown 0x1EE34, # .. 0x1EE37 ; Arabic 0x1EE38, # .. 0x1EE38 ; Unknown 0x1EE39, # .. 0x1EE39 ; Arabic 0x1EE3A, # .. 0x1EE3A ; Unknown 0x1EE3B, # .. 0x1EE3B ; Arabic 0x1EE3C, # .. 0x1EE41 ; Unknown 0x1EE42, # .. 0x1EE42 ; Arabic 0x1EE43, # .. 0x1EE46 ; Unknown 0x1EE47, # .. 0x1EE47 ; Arabic 0x1EE48, # .. 0x1EE48 ; Unknown 0x1EE49, # .. 0x1EE49 ; Arabic 0x1EE4A, # .. 0x1EE4A ; Unknown 0x1EE4B, # .. 0x1EE4B ; Arabic 0x1EE4C, # .. 0x1EE4C ; Unknown 0x1EE4D, # .. 0x1EE4F ; Arabic 0x1EE50, # .. 0x1EE50 ; Unknown 0x1EE51, # .. 0x1EE52 ; Arabic 0x1EE53, # .. 0x1EE53 ; Unknown 0x1EE54, # .. 0x1EE54 ; Arabic 0x1EE55, # .. 0x1EE56 ; Unknown 0x1EE57, # .. 0x1EE57 ; Arabic 0x1EE58, # .. 0x1EE58 ; Unknown 0x1EE59, # .. 0x1EE59 ; Arabic 0x1EE5A, # .. 0x1EE5A ; Unknown 0x1EE5B, # .. 0x1EE5B ; Arabic 0x1EE5C, # .. 0x1EE5C ; Unknown 0x1EE5D, # .. 0x1EE5D ; Arabic 0x1EE5E, # .. 0x1EE5E ; Unknown 0x1EE5F, # .. 0x1EE5F ; Arabic 0x1EE60, # .. 0x1EE60 ; Unknown 0x1EE61, # .. 0x1EE62 ; Arabic 0x1EE63, # .. 0x1EE63 ; Unknown 0x1EE64, # .. 0x1EE64 ; Arabic 0x1EE65, # .. 0x1EE66 ; Unknown 0x1EE67, # .. 0x1EE6A ; Arabic 0x1EE6B, # .. 0x1EE6B ; Unknown 0x1EE6C, # .. 0x1EE72 ; Arabic 0x1EE73, # .. 0x1EE73 ; Unknown 0x1EE74, # .. 0x1EE77 ; Arabic 0x1EE78, # .. 0x1EE78 ; Unknown 0x1EE79, # .. 0x1EE7C ; Arabic 0x1EE7D, # .. 0x1EE7D ; Unknown 0x1EE7E, # .. 0x1EE7E ; Arabic 0x1EE7F, # .. 0x1EE7F ; Unknown 0x1EE80, # .. 0x1EE89 ; Arabic 0x1EE8A, # .. 0x1EE8A ; Unknown 0x1EE8B, # .. 0x1EE9B ; Arabic 0x1EE9C, # .. 0x1EEA0 ; Unknown 0x1EEA1, # .. 0x1EEA3 ; Arabic 0x1EEA4, # .. 0x1EEA4 ; Unknown 0x1EEA5, # .. 0x1EEA9 ; Arabic 0x1EEAA, # .. 0x1EEAA ; Unknown 0x1EEAB, # .. 0x1EEBB ; Arabic 0x1EEBC, # .. 0x1EEEF ; Unknown 0x1EEF0, # .. 0x1EEF1 ; Arabic 0x1EEF2, # .. 0x1EFFF ; Unknown 0x1F000, # .. 0x1F02B ; Common 0x1F02C, # .. 0x1F02F ; Unknown 0x1F030, # .. 0x1F093 ; Common 0x1F094, # .. 0x1F09F ; Unknown 0x1F0A0, # .. 0x1F0AE ; Common 0x1F0AF, # .. 0x1F0B0 ; Unknown 0x1F0B1, # .. 0x1F0BF ; Common 0x1F0C0, # .. 0x1F0C0 ; Unknown 0x1F0C1, # .. 0x1F0CF ; Common 0x1F0D0, # .. 0x1F0D0 ; Unknown 0x1F0D1, # .. 0x1F0F5 ; Common 0x1F0F6, # .. 0x1F0FF ; Unknown 0x1F100, # .. 0x1F1AD ; Common 0x1F1AE, # .. 0x1F1E5 ; Unknown 0x1F1E6, # .. 0x1F1FF ; Common 0x1F200, # .. 0x1F200 ; Hiragana 0x1F201, # .. 0x1F202 ; Common 0x1F203, # .. 0x1F20F ; Unknown 0x1F210, # .. 0x1F23B ; Common 0x1F23C, # .. 0x1F23F ; Unknown 0x1F240, # .. 0x1F248 ; Common 0x1F249, # .. 0x1F24F ; Unknown 0x1F250, # .. 0x1F251 ; Common 0x1F252, # .. 0x1F25F ; Unknown 0x1F260, # .. 0x1F265 ; Common 0x1F266, # .. 0x1F2FF ; Unknown 0x1F300, # .. 0x1F6D7 ; Common 0x1F6D8, # .. 0x1F6DB ; Unknown 0x1F6DC, # .. 0x1F6EC ; Common 0x1F6ED, # .. 0x1F6EF ; Unknown 0x1F6F0, # .. 0x1F6FC ; Common 0x1F6FD, # .. 0x1F6FF ; Unknown 0x1F700, # .. 0x1F776 ; Common 0x1F777, # .. 0x1F77A ; Unknown 0x1F77B, # .. 0x1F7D9 ; Common 0x1F7DA, # .. 0x1F7DF ; Unknown 0x1F7E0, # .. 0x1F7EB ; Common 0x1F7EC, # .. 0x1F7EF ; Unknown 0x1F7F0, # .. 0x1F7F0 ; Common 0x1F7F1, # .. 0x1F7FF ; Unknown 0x1F800, # .. 0x1F80B ; Common 0x1F80C, # .. 0x1F80F ; Unknown 0x1F810, # .. 0x1F847 ; Common 0x1F848, # .. 0x1F84F ; Unknown 0x1F850, # .. 0x1F859 ; Common 0x1F85A, # .. 0x1F85F ; Unknown 0x1F860, # .. 0x1F887 ; Common 0x1F888, # .. 0x1F88F ; Unknown 0x1F890, # .. 0x1F8AD ; Common 0x1F8AE, # .. 0x1F8AF ; Unknown 0x1F8B0, # .. 0x1F8B1 ; Common 0x1F8B2, # .. 0x1F8FF ; Unknown 0x1F900, # .. 0x1FA53 ; Common 0x1FA54, # .. 0x1FA5F ; Unknown 0x1FA60, # .. 0x1FA6D ; Common 0x1FA6E, # .. 0x1FA6F ; Unknown 0x1FA70, # .. 0x1FA7C ; Common 0x1FA7D, # .. 0x1FA7F ; Unknown 0x1FA80, # .. 0x1FA88 ; Common 0x1FA89, # .. 0x1FA8F ; Unknown 0x1FA90, # .. 0x1FABD ; Common 0x1FABE, # .. 0x1FABE ; Unknown 0x1FABF, # .. 0x1FAC5 ; Common 0x1FAC6, # .. 0x1FACD ; Unknown 0x1FACE, # .. 0x1FADB ; Common 0x1FADC, # .. 0x1FADF ; Unknown 0x1FAE0, # .. 0x1FAE8 ; Common 0x1FAE9, # .. 0x1FAEF ; Unknown 0x1FAF0, # .. 0x1FAF8 ; Common 0x1FAF9, # .. 0x1FAFF ; Unknown 0x1FB00, # .. 0x1FB92 ; Common 0x1FB93, # .. 0x1FB93 ; Unknown 0x1FB94, # .. 0x1FBCA ; Common 0x1FBCB, # .. 0x1FBEF ; Unknown 0x1FBF0, # .. 0x1FBF9 ; Common 0x1FBFA, # .. 0x1FFFF ; Unknown 0x20000, # .. 0x2A6DF ; Han 0x2A6E0, # .. 0x2A6FF ; Unknown 0x2A700, # .. 0x2B739 ; Han 0x2B73A, # .. 0x2B73F ; Unknown 0x2B740, # .. 0x2B81D ; Han 0x2B81E, # .. 0x2B81F ; Unknown 0x2B820, # .. 0x2CEA1 ; Han 0x2CEA2, # .. 0x2CEAF ; Unknown 0x2CEB0, # .. 0x2EBE0 ; Han 0x2EBE1, # .. 0x2F7FF ; Unknown 0x2F800, # .. 0x2FA1D ; Han 0x2FA1E, # .. 0x2FFFF ; Unknown 0x30000, # .. 0x3134A ; Han 0x3134B, # .. 0x3134F ; Unknown 0x31350, # .. 0x323AF ; Han 0x323B0, # .. 0xE0000 ; Unknown 0xE0001, # .. 0xE0001 ; Common 0xE0002, # .. 0xE001F ; Unknown 0xE0020, # .. 0xE007F ; Common 0xE0080, # .. 0xE00FF ; Unknown 0xE0100, # .. 0xE01EF ; Inherited 0xE01F0, # .. 0x10FFFF ; Unknown ] VALUES = [ "Zyyy", # 0000..0040 ; Common "Latn", # 0041..005A ; Latin "Zyyy", # 005B..0060 ; Common "Latn", # 0061..007A ; Latin "Zyyy", # 007B..00A9 ; Common "Latn", # 00AA..00AA ; Latin "Zyyy", # 00AB..00B9 ; Common "Latn", # 00BA..00BA ; Latin "Zyyy", # 00BB..00BF ; Common "Latn", # 00C0..00D6 ; Latin "Zyyy", # 00D7..00D7 ; Common "Latn", # 00D8..00F6 ; Latin "Zyyy", # 00F7..00F7 ; Common "Latn", # 00F8..02B8 ; Latin "Zyyy", # 02B9..02DF ; Common "Latn", # 02E0..02E4 ; Latin "Zyyy", # 02E5..02E9 ; Common "Bopo", # 02EA..02EB ; Bopomofo "Zyyy", # 02EC..02FF ; Common "Zinh", # 0300..036F ; Inherited "Grek", # 0370..0373 ; Greek "Zyyy", # 0374..0374 ; Common "Grek", # 0375..0377 ; Greek "Zzzz", # 0378..0379 ; Unknown "Grek", # 037A..037D ; Greek "Zyyy", # 037E..037E ; Common "Grek", # 037F..037F ; Greek "Zzzz", # 0380..0383 ; Unknown "Grek", # 0384..0384 ; Greek "Zyyy", # 0385..0385 ; Common "Grek", # 0386..0386 ; Greek "Zyyy", # 0387..0387 ; Common "Grek", # 0388..038A ; Greek "Zzzz", # 038B..038B ; Unknown "Grek", # 038C..038C ; Greek "Zzzz", # 038D..038D ; Unknown "Grek", # 038E..03A1 ; Greek "Zzzz", # 03A2..03A2 ; Unknown "Grek", # 03A3..03E1 ; Greek "Copt", # 03E2..03EF ; Coptic "Grek", # 03F0..03FF ; Greek "Cyrl", # 0400..0484 ; Cyrillic "Zinh", # 0485..0486 ; Inherited "Cyrl", # 0487..052F ; Cyrillic "Zzzz", # 0530..0530 ; Unknown "Armn", # 0531..0556 ; Armenian "Zzzz", # 0557..0558 ; Unknown "Armn", # 0559..058A ; Armenian "Zzzz", # 058B..058C ; Unknown "Armn", # 058D..058F ; Armenian "Zzzz", # 0590..0590 ; Unknown "Hebr", # 0591..05C7 ; Hebrew "Zzzz", # 05C8..05CF ; Unknown "Hebr", # 05D0..05EA ; Hebrew "Zzzz", # 05EB..05EE ; Unknown "Hebr", # 05EF..05F4 ; Hebrew "Zzzz", # 05F5..05FF ; Unknown "Arab", # 0600..0604 ; Arabic "Zyyy", # 0605..0605 ; Common "Arab", # 0606..060B ; Arabic "Zyyy", # 060C..060C ; Common "Arab", # 060D..061A ; Arabic "Zyyy", # 061B..061B ; Common "Arab", # 061C..061E ; Arabic "Zyyy", # 061F..061F ; Common "Arab", # 0620..063F ; Arabic "Zyyy", # 0640..0640 ; Common "Arab", # 0641..064A ; Arabic "Zinh", # 064B..0655 ; Inherited "Arab", # 0656..066F ; Arabic "Zinh", # 0670..0670 ; Inherited "Arab", # 0671..06DC ; Arabic "Zyyy", # 06DD..06DD ; Common "Arab", # 06DE..06FF ; Arabic "Syrc", # 0700..070D ; Syriac "Zzzz", # 070E..070E ; Unknown "Syrc", # 070F..074A ; Syriac "Zzzz", # 074B..074C ; Unknown "Syrc", # 074D..074F ; Syriac "Arab", # 0750..077F ; Arabic "Thaa", # 0780..07B1 ; Thaana "Zzzz", # 07B2..07BF ; Unknown "Nkoo", # 07C0..07FA ; Nko "Zzzz", # 07FB..07FC ; Unknown "Nkoo", # 07FD..07FF ; Nko "Samr", # 0800..082D ; Samaritan "Zzzz", # 082E..082F ; Unknown "Samr", # 0830..083E ; Samaritan "Zzzz", # 083F..083F ; Unknown "Mand", # 0840..085B ; Mandaic "Zzzz", # 085C..085D ; Unknown "Mand", # 085E..085E ; Mandaic "Zzzz", # 085F..085F ; Unknown "Syrc", # 0860..086A ; Syriac "Zzzz", # 086B..086F ; Unknown "Arab", # 0870..088E ; Arabic "Zzzz", # 088F..088F ; Unknown "Arab", # 0890..0891 ; Arabic "Zzzz", # 0892..0897 ; Unknown "Arab", # 0898..08E1 ; Arabic "Zyyy", # 08E2..08E2 ; Common "Arab", # 08E3..08FF ; Arabic "Deva", # 0900..0950 ; Devanagari "Zinh", # 0951..0954 ; Inherited "Deva", # 0955..0963 ; Devanagari "Zyyy", # 0964..0965 ; Common "Deva", # 0966..097F ; Devanagari "Beng", # 0980..0983 ; Bengali "Zzzz", # 0984..0984 ; Unknown "Beng", # 0985..098C ; Bengali "Zzzz", # 098D..098E ; Unknown "Beng", # 098F..0990 ; Bengali "Zzzz", # 0991..0992 ; Unknown "Beng", # 0993..09A8 ; Bengali "Zzzz", # 09A9..09A9 ; Unknown "Beng", # 09AA..09B0 ; Bengali "Zzzz", # 09B1..09B1 ; Unknown "Beng", # 09B2..09B2 ; Bengali "Zzzz", # 09B3..09B5 ; Unknown "Beng", # 09B6..09B9 ; Bengali "Zzzz", # 09BA..09BB ; Unknown "Beng", # 09BC..09C4 ; Bengali "Zzzz", # 09C5..09C6 ; Unknown "Beng", # 09C7..09C8 ; Bengali "Zzzz", # 09C9..09CA ; Unknown "Beng", # 09CB..09CE ; Bengali "Zzzz", # 09CF..09D6 ; Unknown "Beng", # 09D7..09D7 ; Bengali "Zzzz", # 09D8..09DB ; Unknown "Beng", # 09DC..09DD ; Bengali "Zzzz", # 09DE..09DE ; Unknown "Beng", # 09DF..09E3 ; Bengali "Zzzz", # 09E4..09E5 ; Unknown "Beng", # 09E6..09FE ; Bengali "Zzzz", # 09FF..0A00 ; Unknown "Guru", # 0A01..0A03 ; Gurmukhi "Zzzz", # 0A04..0A04 ; Unknown "Guru", # 0A05..0A0A ; Gurmukhi "Zzzz", # 0A0B..0A0E ; Unknown "Guru", # 0A0F..0A10 ; Gurmukhi "Zzzz", # 0A11..0A12 ; Unknown "Guru", # 0A13..0A28 ; Gurmukhi "Zzzz", # 0A29..0A29 ; Unknown "Guru", # 0A2A..0A30 ; Gurmukhi "Zzzz", # 0A31..0A31 ; Unknown "Guru", # 0A32..0A33 ; Gurmukhi "Zzzz", # 0A34..0A34 ; Unknown "Guru", # 0A35..0A36 ; Gurmukhi "Zzzz", # 0A37..0A37 ; Unknown "Guru", # 0A38..0A39 ; Gurmukhi "Zzzz", # 0A3A..0A3B ; Unknown "Guru", # 0A3C..0A3C ; Gurmukhi "Zzzz", # 0A3D..0A3D ; Unknown "Guru", # 0A3E..0A42 ; Gurmukhi "Zzzz", # 0A43..0A46 ; Unknown "Guru", # 0A47..0A48 ; Gurmukhi "Zzzz", # 0A49..0A4A ; Unknown "Guru", # 0A4B..0A4D ; Gurmukhi "Zzzz", # 0A4E..0A50 ; Unknown "Guru", # 0A51..0A51 ; Gurmukhi "Zzzz", # 0A52..0A58 ; Unknown "Guru", # 0A59..0A5C ; Gurmukhi "Zzzz", # 0A5D..0A5D ; Unknown "Guru", # 0A5E..0A5E ; Gurmukhi "Zzzz", # 0A5F..0A65 ; Unknown "Guru", # 0A66..0A76 ; Gurmukhi "Zzzz", # 0A77..0A80 ; Unknown "Gujr", # 0A81..0A83 ; Gujarati "Zzzz", # 0A84..0A84 ; Unknown "Gujr", # 0A85..0A8D ; Gujarati "Zzzz", # 0A8E..0A8E ; Unknown "Gujr", # 0A8F..0A91 ; Gujarati "Zzzz", # 0A92..0A92 ; Unknown "Gujr", # 0A93..0AA8 ; Gujarati "Zzzz", # 0AA9..0AA9 ; Unknown "Gujr", # 0AAA..0AB0 ; Gujarati "Zzzz", # 0AB1..0AB1 ; Unknown "Gujr", # 0AB2..0AB3 ; Gujarati "Zzzz", # 0AB4..0AB4 ; Unknown "Gujr", # 0AB5..0AB9 ; Gujarati "Zzzz", # 0ABA..0ABB ; Unknown "Gujr", # 0ABC..0AC5 ; Gujarati "Zzzz", # 0AC6..0AC6 ; Unknown "Gujr", # 0AC7..0AC9 ; Gujarati "Zzzz", # 0ACA..0ACA ; Unknown "Gujr", # 0ACB..0ACD ; Gujarati "Zzzz", # 0ACE..0ACF ; Unknown "Gujr", # 0AD0..0AD0 ; Gujarati "Zzzz", # 0AD1..0ADF ; Unknown "Gujr", # 0AE0..0AE3 ; Gujarati "Zzzz", # 0AE4..0AE5 ; Unknown "Gujr", # 0AE6..0AF1 ; Gujarati "Zzzz", # 0AF2..0AF8 ; Unknown "Gujr", # 0AF9..0AFF ; Gujarati "Zzzz", # 0B00..0B00 ; Unknown "Orya", # 0B01..0B03 ; Oriya "Zzzz", # 0B04..0B04 ; Unknown "Orya", # 0B05..0B0C ; Oriya "Zzzz", # 0B0D..0B0E ; Unknown "Orya", # 0B0F..0B10 ; Oriya "Zzzz", # 0B11..0B12 ; Unknown "Orya", # 0B13..0B28 ; Oriya "Zzzz", # 0B29..0B29 ; Unknown "Orya", # 0B2A..0B30 ; Oriya "Zzzz", # 0B31..0B31 ; Unknown "Orya", # 0B32..0B33 ; Oriya "Zzzz", # 0B34..0B34 ; Unknown "Orya", # 0B35..0B39 ; Oriya "Zzzz", # 0B3A..0B3B ; Unknown "Orya", # 0B3C..0B44 ; Oriya "Zzzz", # 0B45..0B46 ; Unknown "Orya", # 0B47..0B48 ; Oriya "Zzzz", # 0B49..0B4A ; Unknown "Orya", # 0B4B..0B4D ; Oriya "Zzzz", # 0B4E..0B54 ; Unknown "Orya", # 0B55..0B57 ; Oriya "Zzzz", # 0B58..0B5B ; Unknown "Orya", # 0B5C..0B5D ; Oriya "Zzzz", # 0B5E..0B5E ; Unknown "Orya", # 0B5F..0B63 ; Oriya "Zzzz", # 0B64..0B65 ; Unknown "Orya", # 0B66..0B77 ; Oriya "Zzzz", # 0B78..0B81 ; Unknown "Taml", # 0B82..0B83 ; Tamil "Zzzz", # 0B84..0B84 ; Unknown "Taml", # 0B85..0B8A ; Tamil "Zzzz", # 0B8B..0B8D ; Unknown "Taml", # 0B8E..0B90 ; Tamil "Zzzz", # 0B91..0B91 ; Unknown "Taml", # 0B92..0B95 ; Tamil "Zzzz", # 0B96..0B98 ; Unknown "Taml", # 0B99..0B9A ; Tamil "Zzzz", # 0B9B..0B9B ; Unknown "Taml", # 0B9C..0B9C ; Tamil "Zzzz", # 0B9D..0B9D ; Unknown "Taml", # 0B9E..0B9F ; Tamil "Zzzz", # 0BA0..0BA2 ; Unknown "Taml", # 0BA3..0BA4 ; Tamil "Zzzz", # 0BA5..0BA7 ; Unknown "Taml", # 0BA8..0BAA ; Tamil "Zzzz", # 0BAB..0BAD ; Unknown "Taml", # 0BAE..0BB9 ; Tamil "Zzzz", # 0BBA..0BBD ; Unknown "Taml", # 0BBE..0BC2 ; Tamil "Zzzz", # 0BC3..0BC5 ; Unknown "Taml", # 0BC6..0BC8 ; Tamil "Zzzz", # 0BC9..0BC9 ; Unknown "Taml", # 0BCA..0BCD ; Tamil "Zzzz", # 0BCE..0BCF ; Unknown "Taml", # 0BD0..0BD0 ; Tamil "Zzzz", # 0BD1..0BD6 ; Unknown "Taml", # 0BD7..0BD7 ; Tamil "Zzzz", # 0BD8..0BE5 ; Unknown "Taml", # 0BE6..0BFA ; Tamil "Zzzz", # 0BFB..0BFF ; Unknown "Telu", # 0C00..0C0C ; Telugu "Zzzz", # 0C0D..0C0D ; Unknown "Telu", # 0C0E..0C10 ; Telugu "Zzzz", # 0C11..0C11 ; Unknown "Telu", # 0C12..0C28 ; Telugu "Zzzz", # 0C29..0C29 ; Unknown "Telu", # 0C2A..0C39 ; Telugu "Zzzz", # 0C3A..0C3B ; Unknown "Telu", # 0C3C..0C44 ; Telugu "Zzzz", # 0C45..0C45 ; Unknown "Telu", # 0C46..0C48 ; Telugu "Zzzz", # 0C49..0C49 ; Unknown "Telu", # 0C4A..0C4D ; Telugu "Zzzz", # 0C4E..0C54 ; Unknown "Telu", # 0C55..0C56 ; Telugu "Zzzz", # 0C57..0C57 ; Unknown "Telu", # 0C58..0C5A ; Telugu "Zzzz", # 0C5B..0C5C ; Unknown "Telu", # 0C5D..0C5D ; Telugu "Zzzz", # 0C5E..0C5F ; Unknown "Telu", # 0C60..0C63 ; Telugu "Zzzz", # 0C64..0C65 ; Unknown "Telu", # 0C66..0C6F ; Telugu "Zzzz", # 0C70..0C76 ; Unknown "Telu", # 0C77..0C7F ; Telugu "Knda", # 0C80..0C8C ; Kannada "Zzzz", # 0C8D..0C8D ; Unknown "Knda", # 0C8E..0C90 ; Kannada "Zzzz", # 0C91..0C91 ; Unknown "Knda", # 0C92..0CA8 ; Kannada "Zzzz", # 0CA9..0CA9 ; Unknown "Knda", # 0CAA..0CB3 ; Kannada "Zzzz", # 0CB4..0CB4 ; Unknown "Knda", # 0CB5..0CB9 ; Kannada "Zzzz", # 0CBA..0CBB ; Unknown "Knda", # 0CBC..0CC4 ; Kannada "Zzzz", # 0CC5..0CC5 ; Unknown "Knda", # 0CC6..0CC8 ; Kannada "Zzzz", # 0CC9..0CC9 ; Unknown "Knda", # 0CCA..0CCD ; Kannada "Zzzz", # 0CCE..0CD4 ; Unknown "Knda", # 0CD5..0CD6 ; Kannada "Zzzz", # 0CD7..0CDC ; Unknown "Knda", # 0CDD..0CDE ; Kannada "Zzzz", # 0CDF..0CDF ; Unknown "Knda", # 0CE0..0CE3 ; Kannada "Zzzz", # 0CE4..0CE5 ; Unknown "Knda", # 0CE6..0CEF ; Kannada "Zzzz", # 0CF0..0CF0 ; Unknown "Knda", # 0CF1..0CF3 ; Kannada "Zzzz", # 0CF4..0CFF ; Unknown "Mlym", # 0D00..0D0C ; Malayalam "Zzzz", # 0D0D..0D0D ; Unknown "Mlym", # 0D0E..0D10 ; Malayalam "Zzzz", # 0D11..0D11 ; Unknown "Mlym", # 0D12..0D44 ; Malayalam "Zzzz", # 0D45..0D45 ; Unknown "Mlym", # 0D46..0D48 ; Malayalam "Zzzz", # 0D49..0D49 ; Unknown "Mlym", # 0D4A..0D4F ; Malayalam "Zzzz", # 0D50..0D53 ; Unknown "Mlym", # 0D54..0D63 ; Malayalam "Zzzz", # 0D64..0D65 ; Unknown "Mlym", # 0D66..0D7F ; Malayalam "Zzzz", # 0D80..0D80 ; Unknown "Sinh", # 0D81..0D83 ; Sinhala "Zzzz", # 0D84..0D84 ; Unknown "Sinh", # 0D85..0D96 ; Sinhala "Zzzz", # 0D97..0D99 ; Unknown "Sinh", # 0D9A..0DB1 ; Sinhala "Zzzz", # 0DB2..0DB2 ; Unknown "Sinh", # 0DB3..0DBB ; Sinhala "Zzzz", # 0DBC..0DBC ; Unknown "Sinh", # 0DBD..0DBD ; Sinhala "Zzzz", # 0DBE..0DBF ; Unknown "Sinh", # 0DC0..0DC6 ; Sinhala "Zzzz", # 0DC7..0DC9 ; Unknown "Sinh", # 0DCA..0DCA ; Sinhala "Zzzz", # 0DCB..0DCE ; Unknown "Sinh", # 0DCF..0DD4 ; Sinhala "Zzzz", # 0DD5..0DD5 ; Unknown "Sinh", # 0DD6..0DD6 ; Sinhala "Zzzz", # 0DD7..0DD7 ; Unknown "Sinh", # 0DD8..0DDF ; Sinhala "Zzzz", # 0DE0..0DE5 ; Unknown "Sinh", # 0DE6..0DEF ; Sinhala "Zzzz", # 0DF0..0DF1 ; Unknown "Sinh", # 0DF2..0DF4 ; Sinhala "Zzzz", # 0DF5..0E00 ; Unknown "Thai", # 0E01..0E3A ; Thai "Zzzz", # 0E3B..0E3E ; Unknown "Zyyy", # 0E3F..0E3F ; Common "Thai", # 0E40..0E5B ; Thai "Zzzz", # 0E5C..0E80 ; Unknown "Laoo", # 0E81..0E82 ; Lao "Zzzz", # 0E83..0E83 ; Unknown "Laoo", # 0E84..0E84 ; Lao "Zzzz", # 0E85..0E85 ; Unknown "Laoo", # 0E86..0E8A ; Lao "Zzzz", # 0E8B..0E8B ; Unknown "Laoo", # 0E8C..0EA3 ; Lao "Zzzz", # 0EA4..0EA4 ; Unknown "Laoo", # 0EA5..0EA5 ; Lao "Zzzz", # 0EA6..0EA6 ; Unknown "Laoo", # 0EA7..0EBD ; Lao "Zzzz", # 0EBE..0EBF ; Unknown "Laoo", # 0EC0..0EC4 ; Lao "Zzzz", # 0EC5..0EC5 ; Unknown "Laoo", # 0EC6..0EC6 ; Lao "Zzzz", # 0EC7..0EC7 ; Unknown "Laoo", # 0EC8..0ECE ; Lao "Zzzz", # 0ECF..0ECF ; Unknown "Laoo", # 0ED0..0ED9 ; Lao "Zzzz", # 0EDA..0EDB ; Unknown "Laoo", # 0EDC..0EDF ; Lao "Zzzz", # 0EE0..0EFF ; Unknown "Tibt", # 0F00..0F47 ; Tibetan "Zzzz", # 0F48..0F48 ; Unknown "Tibt", # 0F49..0F6C ; Tibetan "Zzzz", # 0F6D..0F70 ; Unknown "Tibt", # 0F71..0F97 ; Tibetan "Zzzz", # 0F98..0F98 ; Unknown "Tibt", # 0F99..0FBC ; Tibetan "Zzzz", # 0FBD..0FBD ; Unknown "Tibt", # 0FBE..0FCC ; Tibetan "Zzzz", # 0FCD..0FCD ; Unknown "Tibt", # 0FCE..0FD4 ; Tibetan "Zyyy", # 0FD5..0FD8 ; Common "Tibt", # 0FD9..0FDA ; Tibetan "Zzzz", # 0FDB..0FFF ; Unknown "Mymr", # 1000..109F ; Myanmar "Geor", # 10A0..10C5 ; Georgian "Zzzz", # 10C6..10C6 ; Unknown "Geor", # 10C7..10C7 ; Georgian "Zzzz", # 10C8..10CC ; Unknown "Geor", # 10CD..10CD ; Georgian "Zzzz", # 10CE..10CF ; Unknown "Geor", # 10D0..10FA ; Georgian "Zyyy", # 10FB..10FB ; Common "Geor", # 10FC..10FF ; Georgian "Hang", # 1100..11FF ; Hangul "Ethi", # 1200..1248 ; Ethiopic "Zzzz", # 1249..1249 ; Unknown "Ethi", # 124A..124D ; Ethiopic "Zzzz", # 124E..124F ; Unknown "Ethi", # 1250..1256 ; Ethiopic "Zzzz", # 1257..1257 ; Unknown "Ethi", # 1258..1258 ; Ethiopic "Zzzz", # 1259..1259 ; Unknown "Ethi", # 125A..125D ; Ethiopic "Zzzz", # 125E..125F ; Unknown "Ethi", # 1260..1288 ; Ethiopic "Zzzz", # 1289..1289 ; Unknown "Ethi", # 128A..128D ; Ethiopic "Zzzz", # 128E..128F ; Unknown "Ethi", # 1290..12B0 ; Ethiopic "Zzzz", # 12B1..12B1 ; Unknown "Ethi", # 12B2..12B5 ; Ethiopic "Zzzz", # 12B6..12B7 ; Unknown "Ethi", # 12B8..12BE ; Ethiopic "Zzzz", # 12BF..12BF ; Unknown "Ethi", # 12C0..12C0 ; Ethiopic "Zzzz", # 12C1..12C1 ; Unknown "Ethi", # 12C2..12C5 ; Ethiopic "Zzzz", # 12C6..12C7 ; Unknown "Ethi", # 12C8..12D6 ; Ethiopic "Zzzz", # 12D7..12D7 ; Unknown "Ethi", # 12D8..1310 ; Ethiopic "Zzzz", # 1311..1311 ; Unknown "Ethi", # 1312..1315 ; Ethiopic "Zzzz", # 1316..1317 ; Unknown "Ethi", # 1318..135A ; Ethiopic "Zzzz", # 135B..135C ; Unknown "Ethi", # 135D..137C ; Ethiopic "Zzzz", # 137D..137F ; Unknown "Ethi", # 1380..1399 ; Ethiopic "Zzzz", # 139A..139F ; Unknown "Cher", # 13A0..13F5 ; Cherokee "Zzzz", # 13F6..13F7 ; Unknown "Cher", # 13F8..13FD ; Cherokee "Zzzz", # 13FE..13FF ; Unknown "Cans", # 1400..167F ; Canadian_Aboriginal "Ogam", # 1680..169C ; Ogham "Zzzz", # 169D..169F ; Unknown "Runr", # 16A0..16EA ; Runic "Zyyy", # 16EB..16ED ; Common "Runr", # 16EE..16F8 ; Runic "Zzzz", # 16F9..16FF ; Unknown "Tglg", # 1700..1715 ; Tagalog "Zzzz", # 1716..171E ; Unknown "Tglg", # 171F..171F ; Tagalog "Hano", # 1720..1734 ; Hanunoo "Zyyy", # 1735..1736 ; Common "Zzzz", # 1737..173F ; Unknown "Buhd", # 1740..1753 ; Buhid "Zzzz", # 1754..175F ; Unknown "Tagb", # 1760..176C ; Tagbanwa "Zzzz", # 176D..176D ; Unknown "Tagb", # 176E..1770 ; Tagbanwa "Zzzz", # 1771..1771 ; Unknown "Tagb", # 1772..1773 ; Tagbanwa "Zzzz", # 1774..177F ; Unknown "Khmr", # 1780..17DD ; Khmer "Zzzz", # 17DE..17DF ; Unknown "Khmr", # 17E0..17E9 ; Khmer "Zzzz", # 17EA..17EF ; Unknown "Khmr", # 17F0..17F9 ; Khmer "Zzzz", # 17FA..17FF ; Unknown "Mong", # 1800..1801 ; Mongolian "Zyyy", # 1802..1803 ; Common "Mong", # 1804..1804 ; Mongolian "Zyyy", # 1805..1805 ; Common "Mong", # 1806..1819 ; Mongolian "Zzzz", # 181A..181F ; Unknown "Mong", # 1820..1878 ; Mongolian "Zzzz", # 1879..187F ; Unknown "Mong", # 1880..18AA ; Mongolian "Zzzz", # 18AB..18AF ; Unknown "Cans", # 18B0..18F5 ; Canadian_Aboriginal "Zzzz", # 18F6..18FF ; Unknown "Limb", # 1900..191E ; Limbu "Zzzz", # 191F..191F ; Unknown "Limb", # 1920..192B ; Limbu "Zzzz", # 192C..192F ; Unknown "Limb", # 1930..193B ; Limbu "Zzzz", # 193C..193F ; Unknown "Limb", # 1940..1940 ; Limbu "Zzzz", # 1941..1943 ; Unknown "Limb", # 1944..194F ; Limbu "Tale", # 1950..196D ; Tai_Le "Zzzz", # 196E..196F ; Unknown "Tale", # 1970..1974 ; Tai_Le "Zzzz", # 1975..197F ; Unknown "Talu", # 1980..19AB ; New_Tai_Lue "Zzzz", # 19AC..19AF ; Unknown "Talu", # 19B0..19C9 ; New_Tai_Lue "Zzzz", # 19CA..19CF ; Unknown "Talu", # 19D0..19DA ; New_Tai_Lue "Zzzz", # 19DB..19DD ; Unknown "Talu", # 19DE..19DF ; New_Tai_Lue "Khmr", # 19E0..19FF ; Khmer "Bugi", # 1A00..1A1B ; Buginese "Zzzz", # 1A1C..1A1D ; Unknown "Bugi", # 1A1E..1A1F ; Buginese "Lana", # 1A20..1A5E ; Tai_Tham "Zzzz", # 1A5F..1A5F ; Unknown "Lana", # 1A60..1A7C ; Tai_Tham "Zzzz", # 1A7D..1A7E ; Unknown "Lana", # 1A7F..1A89 ; Tai_Tham "Zzzz", # 1A8A..1A8F ; Unknown "Lana", # 1A90..1A99 ; Tai_Tham "Zzzz", # 1A9A..1A9F ; Unknown "Lana", # 1AA0..1AAD ; Tai_Tham "Zzzz", # 1AAE..1AAF ; Unknown "Zinh", # 1AB0..1ACE ; Inherited "Zzzz", # 1ACF..1AFF ; Unknown "Bali", # 1B00..1B4C ; Balinese "Zzzz", # 1B4D..1B4F ; Unknown "Bali", # 1B50..1B7E ; Balinese "Zzzz", # 1B7F..1B7F ; Unknown "Sund", # 1B80..1BBF ; Sundanese "Batk", # 1BC0..1BF3 ; Batak "Zzzz", # 1BF4..1BFB ; Unknown "Batk", # 1BFC..1BFF ; Batak "Lepc", # 1C00..1C37 ; Lepcha "Zzzz", # 1C38..1C3A ; Unknown "Lepc", # 1C3B..1C49 ; Lepcha "Zzzz", # 1C4A..1C4C ; Unknown "Lepc", # 1C4D..1C4F ; Lepcha "Olck", # 1C50..1C7F ; Ol_Chiki "Cyrl", # 1C80..1C88 ; Cyrillic "Zzzz", # 1C89..1C8F ; Unknown "Geor", # 1C90..1CBA ; Georgian "Zzzz", # 1CBB..1CBC ; Unknown "Geor", # 1CBD..1CBF ; Georgian "Sund", # 1CC0..1CC7 ; Sundanese "Zzzz", # 1CC8..1CCF ; Unknown "Zinh", # 1CD0..1CD2 ; Inherited "Zyyy", # 1CD3..1CD3 ; Common "Zinh", # 1CD4..1CE0 ; Inherited "Zyyy", # 1CE1..1CE1 ; Common "Zinh", # 1CE2..1CE8 ; Inherited "Zyyy", # 1CE9..1CEC ; Common "Zinh", # 1CED..1CED ; Inherited "Zyyy", # 1CEE..1CF3 ; Common "Zinh", # 1CF4..1CF4 ; Inherited "Zyyy", # 1CF5..1CF7 ; Common "Zinh", # 1CF8..1CF9 ; Inherited "Zyyy", # 1CFA..1CFA ; Common "Zzzz", # 1CFB..1CFF ; Unknown "Latn", # 1D00..1D25 ; Latin "Grek", # 1D26..1D2A ; Greek "Cyrl", # 1D2B..1D2B ; Cyrillic "Latn", # 1D2C..1D5C ; Latin "Grek", # 1D5D..1D61 ; Greek "Latn", # 1D62..1D65 ; Latin "Grek", # 1D66..1D6A ; Greek "Latn", # 1D6B..1D77 ; Latin "Cyrl", # 1D78..1D78 ; Cyrillic "Latn", # 1D79..1DBE ; Latin "Grek", # 1DBF..1DBF ; Greek "Zinh", # 1DC0..1DFF ; Inherited "Latn", # 1E00..1EFF ; Latin "Grek", # 1F00..1F15 ; Greek "Zzzz", # 1F16..1F17 ; Unknown "Grek", # 1F18..1F1D ; Greek "Zzzz", # 1F1E..1F1F ; Unknown "Grek", # 1F20..1F45 ; Greek "Zzzz", # 1F46..1F47 ; Unknown "Grek", # 1F48..1F4D ; Greek "Zzzz", # 1F4E..1F4F ; Unknown "Grek", # 1F50..1F57 ; Greek "Zzzz", # 1F58..1F58 ; Unknown "Grek", # 1F59..1F59 ; Greek "Zzzz", # 1F5A..1F5A ; Unknown "Grek", # 1F5B..1F5B ; Greek "Zzzz", # 1F5C..1F5C ; Unknown "Grek", # 1F5D..1F5D ; Greek "Zzzz", # 1F5E..1F5E ; Unknown "Grek", # 1F5F..1F7D ; Greek "Zzzz", # 1F7E..1F7F ; Unknown "Grek", # 1F80..1FB4 ; Greek "Zzzz", # 1FB5..1FB5 ; Unknown "Grek", # 1FB6..1FC4 ; Greek "Zzzz", # 1FC5..1FC5 ; Unknown "Grek", # 1FC6..1FD3 ; Greek "Zzzz", # 1FD4..1FD5 ; Unknown "Grek", # 1FD6..1FDB ; Greek "Zzzz", # 1FDC..1FDC ; Unknown "Grek", # 1FDD..1FEF ; Greek "Zzzz", # 1FF0..1FF1 ; Unknown "Grek", # 1FF2..1FF4 ; Greek "Zzzz", # 1FF5..1FF5 ; Unknown "Grek", # 1FF6..1FFE ; Greek "Zzzz", # 1FFF..1FFF ; Unknown "Zyyy", # 2000..200B ; Common "Zinh", # 200C..200D ; Inherited "Zyyy", # 200E..2064 ; Common "Zzzz", # 2065..2065 ; Unknown "Zyyy", # 2066..2070 ; Common "Latn", # 2071..2071 ; Latin "Zzzz", # 2072..2073 ; Unknown "Zyyy", # 2074..207E ; Common "Latn", # 207F..207F ; Latin "Zyyy", # 2080..208E ; Common "Zzzz", # 208F..208F ; Unknown "Latn", # 2090..209C ; Latin "Zzzz", # 209D..209F ; Unknown "Zyyy", # 20A0..20C0 ; Common "Zzzz", # 20C1..20CF ; Unknown "Zinh", # 20D0..20F0 ; Inherited "Zzzz", # 20F1..20FF ; Unknown "Zyyy", # 2100..2125 ; Common "Grek", # 2126..2126 ; Greek "Zyyy", # 2127..2129 ; Common "Latn", # 212A..212B ; Latin "Zyyy", # 212C..2131 ; Common "Latn", # 2132..2132 ; Latin "Zyyy", # 2133..214D ; Common "Latn", # 214E..214E ; Latin "Zyyy", # 214F..215F ; Common "Latn", # 2160..2188 ; Latin "Zyyy", # 2189..218B ; Common "Zzzz", # 218C..218F ; Unknown "Zyyy", # 2190..2426 ; Common "Zzzz", # 2427..243F ; Unknown "Zyyy", # 2440..244A ; Common "Zzzz", # 244B..245F ; Unknown "Zyyy", # 2460..27FF ; Common "Brai", # 2800..28FF ; Braille "Zyyy", # 2900..2B73 ; Common "Zzzz", # 2B74..2B75 ; Unknown "Zyyy", # 2B76..2B95 ; Common "Zzzz", # 2B96..2B96 ; Unknown "Zyyy", # 2B97..2BFF ; Common "Glag", # 2C00..2C5F ; Glagolitic "Latn", # 2C60..2C7F ; Latin "Copt", # 2C80..2CF3 ; Coptic "Zzzz", # 2CF4..2CF8 ; Unknown "Copt", # 2CF9..2CFF ; Coptic "Geor", # 2D00..2D25 ; Georgian "Zzzz", # 2D26..2D26 ; Unknown "Geor", # 2D27..2D27 ; Georgian "Zzzz", # 2D28..2D2C ; Unknown "Geor", # 2D2D..2D2D ; Georgian "Zzzz", # 2D2E..2D2F ; Unknown "Tfng", # 2D30..2D67 ; Tifinagh "Zzzz", # 2D68..2D6E ; Unknown "Tfng", # 2D6F..2D70 ; Tifinagh "Zzzz", # 2D71..2D7E ; Unknown "Tfng", # 2D7F..2D7F ; Tifinagh "Ethi", # 2D80..2D96 ; Ethiopic "Zzzz", # 2D97..2D9F ; Unknown "Ethi", # 2DA0..2DA6 ; Ethiopic "Zzzz", # 2DA7..2DA7 ; Unknown "Ethi", # 2DA8..2DAE ; Ethiopic "Zzzz", # 2DAF..2DAF ; Unknown "Ethi", # 2DB0..2DB6 ; Ethiopic "Zzzz", # 2DB7..2DB7 ; Unknown "Ethi", # 2DB8..2DBE ; Ethiopic "Zzzz", # 2DBF..2DBF ; Unknown "Ethi", # 2DC0..2DC6 ; Ethiopic "Zzzz", # 2DC7..2DC7 ; Unknown "Ethi", # 2DC8..2DCE ; Ethiopic "Zzzz", # 2DCF..2DCF ; Unknown "Ethi", # 2DD0..2DD6 ; Ethiopic "Zzzz", # 2DD7..2DD7 ; Unknown "Ethi", # 2DD8..2DDE ; Ethiopic "Zzzz", # 2DDF..2DDF ; Unknown "Cyrl", # 2DE0..2DFF ; Cyrillic "Zyyy", # 2E00..2E5D ; Common "Zzzz", # 2E5E..2E7F ; Unknown "Hani", # 2E80..2E99 ; Han "Zzzz", # 2E9A..2E9A ; Unknown "Hani", # 2E9B..2EF3 ; Han "Zzzz", # 2EF4..2EFF ; Unknown "Hani", # 2F00..2FD5 ; Han "Zzzz", # 2FD6..2FEF ; Unknown "Zyyy", # 2FF0..2FFB ; Common "Zzzz", # 2FFC..2FFF ; Unknown "Zyyy", # 3000..3004 ; Common "Hani", # 3005..3005 ; Han "Zyyy", # 3006..3006 ; Common "Hani", # 3007..3007 ; Han "Zyyy", # 3008..3020 ; Common "Hani", # 3021..3029 ; Han "Zinh", # 302A..302D ; Inherited "Hang", # 302E..302F ; Hangul "Zyyy", # 3030..3037 ; Common "Hani", # 3038..303B ; Han "Zyyy", # 303C..303F ; Common "Zzzz", # 3040..3040 ; Unknown "Hira", # 3041..3096 ; Hiragana "Zzzz", # 3097..3098 ; Unknown "Zinh", # 3099..309A ; Inherited "Zyyy", # 309B..309C ; Common "Hira", # 309D..309F ; Hiragana "Zyyy", # 30A0..30A0 ; Common "Kana", # 30A1..30FA ; Katakana "Zyyy", # 30FB..30FC ; Common "Kana", # 30FD..30FF ; Katakana "Zzzz", # 3100..3104 ; Unknown "Bopo", # 3105..312F ; Bopomofo "Zzzz", # 3130..3130 ; Unknown "Hang", # 3131..318E ; Hangul "Zzzz", # 318F..318F ; Unknown "Zyyy", # 3190..319F ; Common "Bopo", # 31A0..31BF ; Bopomofo "Zyyy", # 31C0..31E3 ; Common "Zzzz", # 31E4..31EF ; Unknown "Kana", # 31F0..31FF ; Katakana "Hang", # 3200..321E ; Hangul "Zzzz", # 321F..321F ; Unknown "Zyyy", # 3220..325F ; Common "Hang", # 3260..327E ; Hangul "Zyyy", # 327F..32CF ; Common "Kana", # 32D0..32FE ; Katakana "Zyyy", # 32FF..32FF ; Common "Kana", # 3300..3357 ; Katakana "Zyyy", # 3358..33FF ; Common "Hani", # 3400..4DBF ; Han "Zyyy", # 4DC0..4DFF ; Common "Hani", # 4E00..9FFF ; Han "Yiii", # A000..A48C ; Yi "Zzzz", # A48D..A48F ; Unknown "Yiii", # A490..A4C6 ; Yi "Zzzz", # A4C7..A4CF ; Unknown "Lisu", # A4D0..A4FF ; Lisu "Vaii", # A500..A62B ; Vai "Zzzz", # A62C..A63F ; Unknown "Cyrl", # A640..A69F ; Cyrillic "Bamu", # A6A0..A6F7 ; Bamum "Zzzz", # A6F8..A6FF ; Unknown "Zyyy", # A700..A721 ; Common "Latn", # A722..A787 ; Latin "Zyyy", # A788..A78A ; Common "Latn", # A78B..A7CA ; Latin "Zzzz", # A7CB..A7CF ; Unknown "Latn", # A7D0..A7D1 ; Latin "Zzzz", # A7D2..A7D2 ; Unknown "Latn", # A7D3..A7D3 ; Latin "Zzzz", # A7D4..A7D4 ; Unknown "Latn", # A7D5..A7D9 ; Latin "Zzzz", # A7DA..A7F1 ; Unknown "Latn", # A7F2..A7FF ; Latin "Sylo", # A800..A82C ; Syloti_Nagri "Zzzz", # A82D..A82F ; Unknown "Zyyy", # A830..A839 ; Common "Zzzz", # A83A..A83F ; Unknown "Phag", # A840..A877 ; Phags_Pa "Zzzz", # A878..A87F ; Unknown "Saur", # A880..A8C5 ; Saurashtra "Zzzz", # A8C6..A8CD ; Unknown "Saur", # A8CE..A8D9 ; Saurashtra "Zzzz", # A8DA..A8DF ; Unknown "Deva", # A8E0..A8FF ; Devanagari "Kali", # A900..A92D ; Kayah_Li "Zyyy", # A92E..A92E ; Common "Kali", # A92F..A92F ; Kayah_Li "Rjng", # A930..A953 ; Rejang "Zzzz", # A954..A95E ; Unknown "Rjng", # A95F..A95F ; Rejang "Hang", # A960..A97C ; Hangul "Zzzz", # A97D..A97F ; Unknown "Java", # A980..A9CD ; Javanese "Zzzz", # A9CE..A9CE ; Unknown "Zyyy", # A9CF..A9CF ; Common "Java", # A9D0..A9D9 ; Javanese "Zzzz", # A9DA..A9DD ; Unknown "Java", # A9DE..A9DF ; Javanese "Mymr", # A9E0..A9FE ; Myanmar "Zzzz", # A9FF..A9FF ; Unknown "Cham", # AA00..AA36 ; Cham "Zzzz", # AA37..AA3F ; Unknown "Cham", # AA40..AA4D ; Cham "Zzzz", # AA4E..AA4F ; Unknown "Cham", # AA50..AA59 ; Cham "Zzzz", # AA5A..AA5B ; Unknown "Cham", # AA5C..AA5F ; Cham "Mymr", # AA60..AA7F ; Myanmar "Tavt", # AA80..AAC2 ; Tai_Viet "Zzzz", # AAC3..AADA ; Unknown "Tavt", # AADB..AADF ; Tai_Viet "Mtei", # AAE0..AAF6 ; Meetei_Mayek "Zzzz", # AAF7..AB00 ; Unknown "Ethi", # AB01..AB06 ; Ethiopic "Zzzz", # AB07..AB08 ; Unknown "Ethi", # AB09..AB0E ; Ethiopic "Zzzz", # AB0F..AB10 ; Unknown "Ethi", # AB11..AB16 ; Ethiopic "Zzzz", # AB17..AB1F ; Unknown "Ethi", # AB20..AB26 ; Ethiopic "Zzzz", # AB27..AB27 ; Unknown "Ethi", # AB28..AB2E ; Ethiopic "Zzzz", # AB2F..AB2F ; Unknown "Latn", # AB30..AB5A ; Latin "Zyyy", # AB5B..AB5B ; Common "Latn", # AB5C..AB64 ; Latin "Grek", # AB65..AB65 ; Greek "Latn", # AB66..AB69 ; Latin "Zyyy", # AB6A..AB6B ; Common "Zzzz", # AB6C..AB6F ; Unknown "Cher", # AB70..ABBF ; Cherokee "Mtei", # ABC0..ABED ; Meetei_Mayek "Zzzz", # ABEE..ABEF ; Unknown "Mtei", # ABF0..ABF9 ; Meetei_Mayek "Zzzz", # ABFA..ABFF ; Unknown "Hang", # AC00..D7A3 ; Hangul "Zzzz", # D7A4..D7AF ; Unknown "Hang", # D7B0..D7C6 ; Hangul "Zzzz", # D7C7..D7CA ; Unknown "Hang", # D7CB..D7FB ; Hangul "Zzzz", # D7FC..F8FF ; Unknown "Hani", # F900..FA6D ; Han "Zzzz", # FA6E..FA6F ; Unknown "Hani", # FA70..FAD9 ; Han "Zzzz", # FADA..FAFF ; Unknown "Latn", # FB00..FB06 ; Latin "Zzzz", # FB07..FB12 ; Unknown "Armn", # FB13..FB17 ; Armenian "Zzzz", # FB18..FB1C ; Unknown "Hebr", # FB1D..FB36 ; Hebrew "Zzzz", # FB37..FB37 ; Unknown "Hebr", # FB38..FB3C ; Hebrew "Zzzz", # FB3D..FB3D ; Unknown "Hebr", # FB3E..FB3E ; Hebrew "Zzzz", # FB3F..FB3F ; Unknown "Hebr", # FB40..FB41 ; Hebrew "Zzzz", # FB42..FB42 ; Unknown "Hebr", # FB43..FB44 ; Hebrew "Zzzz", # FB45..FB45 ; Unknown "Hebr", # FB46..FB4F ; Hebrew "Arab", # FB50..FBC2 ; Arabic "Zzzz", # FBC3..FBD2 ; Unknown "Arab", # FBD3..FD3D ; Arabic "Zyyy", # FD3E..FD3F ; Common "Arab", # FD40..FD8F ; Arabic "Zzzz", # FD90..FD91 ; Unknown "Arab", # FD92..FDC7 ; Arabic "Zzzz", # FDC8..FDCE ; Unknown "Arab", # FDCF..FDCF ; Arabic "Zzzz", # FDD0..FDEF ; Unknown "Arab", # FDF0..FDFF ; Arabic "Zinh", # FE00..FE0F ; Inherited "Zyyy", # FE10..FE19 ; Common "Zzzz", # FE1A..FE1F ; Unknown "Zinh", # FE20..FE2D ; Inherited "Cyrl", # FE2E..FE2F ; Cyrillic "Zyyy", # FE30..FE52 ; Common "Zzzz", # FE53..FE53 ; Unknown "Zyyy", # FE54..FE66 ; Common "Zzzz", # FE67..FE67 ; Unknown "Zyyy", # FE68..FE6B ; Common "Zzzz", # FE6C..FE6F ; Unknown "Arab", # FE70..FE74 ; Arabic "Zzzz", # FE75..FE75 ; Unknown "Arab", # FE76..FEFC ; Arabic "Zzzz", # FEFD..FEFE ; Unknown "Zyyy", # FEFF..FEFF ; Common "Zzzz", # FF00..FF00 ; Unknown "Zyyy", # FF01..FF20 ; Common "Latn", # FF21..FF3A ; Latin "Zyyy", # FF3B..FF40 ; Common "Latn", # FF41..FF5A ; Latin "Zyyy", # FF5B..FF65 ; Common "Kana", # FF66..FF6F ; Katakana "Zyyy", # FF70..FF70 ; Common "Kana", # FF71..FF9D ; Katakana "Zyyy", # FF9E..FF9F ; Common "Hang", # FFA0..FFBE ; Hangul "Zzzz", # FFBF..FFC1 ; Unknown "Hang", # FFC2..FFC7 ; Hangul "Zzzz", # FFC8..FFC9 ; Unknown "Hang", # FFCA..FFCF ; Hangul "Zzzz", # FFD0..FFD1 ; Unknown "Hang", # FFD2..FFD7 ; Hangul "Zzzz", # FFD8..FFD9 ; Unknown "Hang", # FFDA..FFDC ; Hangul "Zzzz", # FFDD..FFDF ; Unknown "Zyyy", # FFE0..FFE6 ; Common "Zzzz", # FFE7..FFE7 ; Unknown "Zyyy", # FFE8..FFEE ; Common "Zzzz", # FFEF..FFF8 ; Unknown "Zyyy", # FFF9..FFFD ; Common "Zzzz", # FFFE..FFFF ; Unknown "Linb", # 10000..1000B ; Linear_B "Zzzz", # 1000C..1000C ; Unknown "Linb", # 1000D..10026 ; Linear_B "Zzzz", # 10027..10027 ; Unknown "Linb", # 10028..1003A ; Linear_B "Zzzz", # 1003B..1003B ; Unknown "Linb", # 1003C..1003D ; Linear_B "Zzzz", # 1003E..1003E ; Unknown "Linb", # 1003F..1004D ; Linear_B "Zzzz", # 1004E..1004F ; Unknown "Linb", # 10050..1005D ; Linear_B "Zzzz", # 1005E..1007F ; Unknown "Linb", # 10080..100FA ; Linear_B "Zzzz", # 100FB..100FF ; Unknown "Zyyy", # 10100..10102 ; Common "Zzzz", # 10103..10106 ; Unknown "Zyyy", # 10107..10133 ; Common "Zzzz", # 10134..10136 ; Unknown "Zyyy", # 10137..1013F ; Common "Grek", # 10140..1018E ; Greek "Zzzz", # 1018F..1018F ; Unknown "Zyyy", # 10190..1019C ; Common "Zzzz", # 1019D..1019F ; Unknown "Grek", # 101A0..101A0 ; Greek "Zzzz", # 101A1..101CF ; Unknown "Zyyy", # 101D0..101FC ; Common "Zinh", # 101FD..101FD ; Inherited "Zzzz", # 101FE..1027F ; Unknown "Lyci", # 10280..1029C ; Lycian "Zzzz", # 1029D..1029F ; Unknown "Cari", # 102A0..102D0 ; Carian "Zzzz", # 102D1..102DF ; Unknown "Zinh", # 102E0..102E0 ; Inherited "Zyyy", # 102E1..102FB ; Common "Zzzz", # 102FC..102FF ; Unknown "Ital", # 10300..10323 ; Old_Italic "Zzzz", # 10324..1032C ; Unknown "Ital", # 1032D..1032F ; Old_Italic "Goth", # 10330..1034A ; Gothic "Zzzz", # 1034B..1034F ; Unknown "Perm", # 10350..1037A ; Old_Permic "Zzzz", # 1037B..1037F ; Unknown "Ugar", # 10380..1039D ; Ugaritic "Zzzz", # 1039E..1039E ; Unknown "Ugar", # 1039F..1039F ; Ugaritic "Xpeo", # 103A0..103C3 ; Old_Persian "Zzzz", # 103C4..103C7 ; Unknown "Xpeo", # 103C8..103D5 ; Old_Persian "Zzzz", # 103D6..103FF ; Unknown "Dsrt", # 10400..1044F ; Deseret "Shaw", # 10450..1047F ; Shavian "Osma", # 10480..1049D ; Osmanya "Zzzz", # 1049E..1049F ; Unknown "Osma", # 104A0..104A9 ; Osmanya "Zzzz", # 104AA..104AF ; Unknown "Osge", # 104B0..104D3 ; Osage "Zzzz", # 104D4..104D7 ; Unknown "Osge", # 104D8..104FB ; Osage "Zzzz", # 104FC..104FF ; Unknown "Elba", # 10500..10527 ; Elbasan "Zzzz", # 10528..1052F ; Unknown "Aghb", # 10530..10563 ; Caucasian_Albanian "Zzzz", # 10564..1056E ; Unknown "Aghb", # 1056F..1056F ; Caucasian_Albanian "Vith", # 10570..1057A ; Vithkuqi "Zzzz", # 1057B..1057B ; Unknown "Vith", # 1057C..1058A ; Vithkuqi "Zzzz", # 1058B..1058B ; Unknown "Vith", # 1058C..10592 ; Vithkuqi "Zzzz", # 10593..10593 ; Unknown "Vith", # 10594..10595 ; Vithkuqi "Zzzz", # 10596..10596 ; Unknown "Vith", # 10597..105A1 ; Vithkuqi "Zzzz", # 105A2..105A2 ; Unknown "Vith", # 105A3..105B1 ; Vithkuqi "Zzzz", # 105B2..105B2 ; Unknown "Vith", # 105B3..105B9 ; Vithkuqi "Zzzz", # 105BA..105BA ; Unknown "Vith", # 105BB..105BC ; Vithkuqi "Zzzz", # 105BD..105FF ; Unknown "Lina", # 10600..10736 ; Linear_A "Zzzz", # 10737..1073F ; Unknown "Lina", # 10740..10755 ; Linear_A "Zzzz", # 10756..1075F ; Unknown "Lina", # 10760..10767 ; Linear_A "Zzzz", # 10768..1077F ; Unknown "Latn", # 10780..10785 ; Latin "Zzzz", # 10786..10786 ; Unknown "Latn", # 10787..107B0 ; Latin "Zzzz", # 107B1..107B1 ; Unknown "Latn", # 107B2..107BA ; Latin "Zzzz", # 107BB..107FF ; Unknown "Cprt", # 10800..10805 ; Cypriot "Zzzz", # 10806..10807 ; Unknown "Cprt", # 10808..10808 ; Cypriot "Zzzz", # 10809..10809 ; Unknown "Cprt", # 1080A..10835 ; Cypriot "Zzzz", # 10836..10836 ; Unknown "Cprt", # 10837..10838 ; Cypriot "Zzzz", # 10839..1083B ; Unknown "Cprt", # 1083C..1083C ; Cypriot "Zzzz", # 1083D..1083E ; Unknown "Cprt", # 1083F..1083F ; Cypriot "Armi", # 10840..10855 ; Imperial_Aramaic "Zzzz", # 10856..10856 ; Unknown "Armi", # 10857..1085F ; Imperial_Aramaic "Palm", # 10860..1087F ; Palmyrene "Nbat", # 10880..1089E ; Nabataean "Zzzz", # 1089F..108A6 ; Unknown "Nbat", # 108A7..108AF ; Nabataean "Zzzz", # 108B0..108DF ; Unknown "Hatr", # 108E0..108F2 ; Hatran "Zzzz", # 108F3..108F3 ; Unknown "Hatr", # 108F4..108F5 ; Hatran "Zzzz", # 108F6..108FA ; Unknown "Hatr", # 108FB..108FF ; Hatran "Phnx", # 10900..1091B ; Phoenician "Zzzz", # 1091C..1091E ; Unknown "Phnx", # 1091F..1091F ; Phoenician "Lydi", # 10920..10939 ; Lydian "Zzzz", # 1093A..1093E ; Unknown "Lydi", # 1093F..1093F ; Lydian "Zzzz", # 10940..1097F ; Unknown "Mero", # 10980..1099F ; Meroitic_Hieroglyphs "Merc", # 109A0..109B7 ; Meroitic_Cursive "Zzzz", # 109B8..109BB ; Unknown "Merc", # 109BC..109CF ; Meroitic_Cursive "Zzzz", # 109D0..109D1 ; Unknown "Merc", # 109D2..109FF ; Meroitic_Cursive "Khar", # 10A00..10A03 ; Kharoshthi "Zzzz", # 10A04..10A04 ; Unknown "Khar", # 10A05..10A06 ; Kharoshthi "Zzzz", # 10A07..10A0B ; Unknown "Khar", # 10A0C..10A13 ; Kharoshthi "Zzzz", # 10A14..10A14 ; Unknown "Khar", # 10A15..10A17 ; Kharoshthi "Zzzz", # 10A18..10A18 ; Unknown "Khar", # 10A19..10A35 ; Kharoshthi "Zzzz", # 10A36..10A37 ; Unknown "Khar", # 10A38..10A3A ; Kharoshthi "Zzzz", # 10A3B..10A3E ; Unknown "Khar", # 10A3F..10A48 ; Kharoshthi "Zzzz", # 10A49..10A4F ; Unknown "Khar", # 10A50..10A58 ; Kharoshthi "Zzzz", # 10A59..10A5F ; Unknown "Sarb", # 10A60..10A7F ; Old_South_Arabian "Narb", # 10A80..10A9F ; Old_North_Arabian "Zzzz", # 10AA0..10ABF ; Unknown "Mani", # 10AC0..10AE6 ; Manichaean "Zzzz", # 10AE7..10AEA ; Unknown "Mani", # 10AEB..10AF6 ; Manichaean "Zzzz", # 10AF7..10AFF ; Unknown "Avst", # 10B00..10B35 ; Avestan "Zzzz", # 10B36..10B38 ; Unknown "Avst", # 10B39..10B3F ; Avestan "Prti", # 10B40..10B55 ; Inscriptional_Parthian "Zzzz", # 10B56..10B57 ; Unknown "Prti", # 10B58..10B5F ; Inscriptional_Parthian "Phli", # 10B60..10B72 ; Inscriptional_Pahlavi "Zzzz", # 10B73..10B77 ; Unknown "Phli", # 10B78..10B7F ; Inscriptional_Pahlavi "Phlp", # 10B80..10B91 ; Psalter_Pahlavi "Zzzz", # 10B92..10B98 ; Unknown "Phlp", # 10B99..10B9C ; Psalter_Pahlavi "Zzzz", # 10B9D..10BA8 ; Unknown "Phlp", # 10BA9..10BAF ; Psalter_Pahlavi "Zzzz", # 10BB0..10BFF ; Unknown "Orkh", # 10C00..10C48 ; Old_Turkic "Zzzz", # 10C49..10C7F ; Unknown "Hung", # 10C80..10CB2 ; Old_Hungarian "Zzzz", # 10CB3..10CBF ; Unknown "Hung", # 10CC0..10CF2 ; Old_Hungarian "Zzzz", # 10CF3..10CF9 ; Unknown "Hung", # 10CFA..10CFF ; Old_Hungarian "Rohg", # 10D00..10D27 ; Hanifi_Rohingya "Zzzz", # 10D28..10D2F ; Unknown "Rohg", # 10D30..10D39 ; Hanifi_Rohingya "Zzzz", # 10D3A..10E5F ; Unknown "Arab", # 10E60..10E7E ; Arabic "Zzzz", # 10E7F..10E7F ; Unknown "Yezi", # 10E80..10EA9 ; Yezidi "Zzzz", # 10EAA..10EAA ; Unknown "Yezi", # 10EAB..10EAD ; Yezidi "Zzzz", # 10EAE..10EAF ; Unknown "Yezi", # 10EB0..10EB1 ; Yezidi "Zzzz", # 10EB2..10EFC ; Unknown "Arab", # 10EFD..10EFF ; Arabic "Sogo", # 10F00..10F27 ; Old_Sogdian "Zzzz", # 10F28..10F2F ; Unknown "Sogd", # 10F30..10F59 ; Sogdian "Zzzz", # 10F5A..10F6F ; Unknown "Ougr", # 10F70..10F89 ; Old_Uyghur "Zzzz", # 10F8A..10FAF ; Unknown "Chrs", # 10FB0..10FCB ; Chorasmian "Zzzz", # 10FCC..10FDF ; Unknown "Elym", # 10FE0..10FF6 ; Elymaic "Zzzz", # 10FF7..10FFF ; Unknown "Brah", # 11000..1104D ; Brahmi "Zzzz", # 1104E..11051 ; Unknown "Brah", # 11052..11075 ; Brahmi "Zzzz", # 11076..1107E ; Unknown "Brah", # 1107F..1107F ; Brahmi "Kthi", # 11080..110C2 ; Kaithi "Zzzz", # 110C3..110CC ; Unknown "Kthi", # 110CD..110CD ; Kaithi "Zzzz", # 110CE..110CF ; Unknown "Sora", # 110D0..110E8 ; Sora_Sompeng "Zzzz", # 110E9..110EF ; Unknown "Sora", # 110F0..110F9 ; Sora_Sompeng "Zzzz", # 110FA..110FF ; Unknown "Cakm", # 11100..11134 ; Chakma "Zzzz", # 11135..11135 ; Unknown "Cakm", # 11136..11147 ; Chakma "Zzzz", # 11148..1114F ; Unknown "Mahj", # 11150..11176 ; Mahajani "Zzzz", # 11177..1117F ; Unknown "Shrd", # 11180..111DF ; Sharada "Zzzz", # 111E0..111E0 ; Unknown "Sinh", # 111E1..111F4 ; Sinhala "Zzzz", # 111F5..111FF ; Unknown "Khoj", # 11200..11211 ; Khojki "Zzzz", # 11212..11212 ; Unknown "Khoj", # 11213..11241 ; Khojki "Zzzz", # 11242..1127F ; Unknown "Mult", # 11280..11286 ; Multani "Zzzz", # 11287..11287 ; Unknown "Mult", # 11288..11288 ; Multani "Zzzz", # 11289..11289 ; Unknown "Mult", # 1128A..1128D ; Multani "Zzzz", # 1128E..1128E ; Unknown "Mult", # 1128F..1129D ; Multani "Zzzz", # 1129E..1129E ; Unknown "Mult", # 1129F..112A9 ; Multani "Zzzz", # 112AA..112AF ; Unknown "Sind", # 112B0..112EA ; Khudawadi "Zzzz", # 112EB..112EF ; Unknown "Sind", # 112F0..112F9 ; Khudawadi "Zzzz", # 112FA..112FF ; Unknown "Gran", # 11300..11303 ; Grantha "Zzzz", # 11304..11304 ; Unknown "Gran", # 11305..1130C ; Grantha "Zzzz", # 1130D..1130E ; Unknown "Gran", # 1130F..11310 ; Grantha "Zzzz", # 11311..11312 ; Unknown "Gran", # 11313..11328 ; Grantha "Zzzz", # 11329..11329 ; Unknown "Gran", # 1132A..11330 ; Grantha "Zzzz", # 11331..11331 ; Unknown "Gran", # 11332..11333 ; Grantha "Zzzz", # 11334..11334 ; Unknown "Gran", # 11335..11339 ; Grantha "Zzzz", # 1133A..1133A ; Unknown "Zinh", # 1133B..1133B ; Inherited "Gran", # 1133C..11344 ; Grantha "Zzzz", # 11345..11346 ; Unknown "Gran", # 11347..11348 ; Grantha "Zzzz", # 11349..1134A ; Unknown "Gran", # 1134B..1134D ; Grantha "Zzzz", # 1134E..1134F ; Unknown "Gran", # 11350..11350 ; Grantha "Zzzz", # 11351..11356 ; Unknown "Gran", # 11357..11357 ; Grantha "Zzzz", # 11358..1135C ; Unknown "Gran", # 1135D..11363 ; Grantha "Zzzz", # 11364..11365 ; Unknown "Gran", # 11366..1136C ; Grantha "Zzzz", # 1136D..1136F ; Unknown "Gran", # 11370..11374 ; Grantha "Zzzz", # 11375..113FF ; Unknown "Newa", # 11400..1145B ; Newa "Zzzz", # 1145C..1145C ; Unknown "Newa", # 1145D..11461 ; Newa "Zzzz", # 11462..1147F ; Unknown "Tirh", # 11480..114C7 ; Tirhuta "Zzzz", # 114C8..114CF ; Unknown "Tirh", # 114D0..114D9 ; Tirhuta "Zzzz", # 114DA..1157F ; Unknown "Sidd", # 11580..115B5 ; Siddham "Zzzz", # 115B6..115B7 ; Unknown "Sidd", # 115B8..115DD ; Siddham "Zzzz", # 115DE..115FF ; Unknown "Modi", # 11600..11644 ; Modi "Zzzz", # 11645..1164F ; Unknown "Modi", # 11650..11659 ; Modi "Zzzz", # 1165A..1165F ; Unknown "Mong", # 11660..1166C ; Mongolian "Zzzz", # 1166D..1167F ; Unknown "Takr", # 11680..116B9 ; Takri "Zzzz", # 116BA..116BF ; Unknown "Takr", # 116C0..116C9 ; Takri "Zzzz", # 116CA..116FF ; Unknown "Ahom", # 11700..1171A ; Ahom "Zzzz", # 1171B..1171C ; Unknown "Ahom", # 1171D..1172B ; Ahom "Zzzz", # 1172C..1172F ; Unknown "Ahom", # 11730..11746 ; Ahom "Zzzz", # 11747..117FF ; Unknown "Dogr", # 11800..1183B ; Dogra "Zzzz", # 1183C..1189F ; Unknown "Wara", # 118A0..118F2 ; Warang_Citi "Zzzz", # 118F3..118FE ; Unknown "Wara", # 118FF..118FF ; Warang_Citi "Diak", # 11900..11906 ; Dives_Akuru "Zzzz", # 11907..11908 ; Unknown "Diak", # 11909..11909 ; Dives_Akuru "Zzzz", # 1190A..1190B ; Unknown "Diak", # 1190C..11913 ; Dives_Akuru "Zzzz", # 11914..11914 ; Unknown "Diak", # 11915..11916 ; Dives_Akuru "Zzzz", # 11917..11917 ; Unknown "Diak", # 11918..11935 ; Dives_Akuru "Zzzz", # 11936..11936 ; Unknown "Diak", # 11937..11938 ; Dives_Akuru "Zzzz", # 11939..1193A ; Unknown "Diak", # 1193B..11946 ; Dives_Akuru "Zzzz", # 11947..1194F ; Unknown "Diak", # 11950..11959 ; Dives_Akuru "Zzzz", # 1195A..1199F ; Unknown "Nand", # 119A0..119A7 ; Nandinagari "Zzzz", # 119A8..119A9 ; Unknown "Nand", # 119AA..119D7 ; Nandinagari "Zzzz", # 119D8..119D9 ; Unknown "Nand", # 119DA..119E4 ; Nandinagari "Zzzz", # 119E5..119FF ; Unknown "Zanb", # 11A00..11A47 ; Zanabazar_Square "Zzzz", # 11A48..11A4F ; Unknown "Soyo", # 11A50..11AA2 ; Soyombo "Zzzz", # 11AA3..11AAF ; Unknown "Cans", # 11AB0..11ABF ; Canadian_Aboriginal "Pauc", # 11AC0..11AF8 ; Pau_Cin_Hau "Zzzz", # 11AF9..11AFF ; Unknown "Deva", # 11B00..11B09 ; Devanagari "Zzzz", # 11B0A..11BFF ; Unknown "Bhks", # 11C00..11C08 ; Bhaiksuki "Zzzz", # 11C09..11C09 ; Unknown "Bhks", # 11C0A..11C36 ; Bhaiksuki "Zzzz", # 11C37..11C37 ; Unknown "Bhks", # 11C38..11C45 ; Bhaiksuki "Zzzz", # 11C46..11C4F ; Unknown "Bhks", # 11C50..11C6C ; Bhaiksuki "Zzzz", # 11C6D..11C6F ; Unknown "Marc", # 11C70..11C8F ; Marchen "Zzzz", # 11C90..11C91 ; Unknown "Marc", # 11C92..11CA7 ; Marchen "Zzzz", # 11CA8..11CA8 ; Unknown "Marc", # 11CA9..11CB6 ; Marchen "Zzzz", # 11CB7..11CFF ; Unknown "Gonm", # 11D00..11D06 ; Masaram_Gondi "Zzzz", # 11D07..11D07 ; Unknown "Gonm", # 11D08..11D09 ; Masaram_Gondi "Zzzz", # 11D0A..11D0A ; Unknown "Gonm", # 11D0B..11D36 ; Masaram_Gondi "Zzzz", # 11D37..11D39 ; Unknown "Gonm", # 11D3A..11D3A ; Masaram_Gondi "Zzzz", # 11D3B..11D3B ; Unknown "Gonm", # 11D3C..11D3D ; Masaram_Gondi "Zzzz", # 11D3E..11D3E ; Unknown "Gonm", # 11D3F..11D47 ; Masaram_Gondi "Zzzz", # 11D48..11D4F ; Unknown "Gonm", # 11D50..11D59 ; Masaram_Gondi "Zzzz", # 11D5A..11D5F ; Unknown "Gong", # 11D60..11D65 ; Gunjala_Gondi "Zzzz", # 11D66..11D66 ; Unknown "Gong", # 11D67..11D68 ; Gunjala_Gondi "Zzzz", # 11D69..11D69 ; Unknown "Gong", # 11D6A..11D8E ; Gunjala_Gondi "Zzzz", # 11D8F..11D8F ; Unknown "Gong", # 11D90..11D91 ; Gunjala_Gondi "Zzzz", # 11D92..11D92 ; Unknown "Gong", # 11D93..11D98 ; Gunjala_Gondi "Zzzz", # 11D99..11D9F ; Unknown "Gong", # 11DA0..11DA9 ; Gunjala_Gondi "Zzzz", # 11DAA..11EDF ; Unknown "Maka", # 11EE0..11EF8 ; Makasar "Zzzz", # 11EF9..11EFF ; Unknown "Kawi", # 11F00..11F10 ; Kawi "Zzzz", # 11F11..11F11 ; Unknown "Kawi", # 11F12..11F3A ; Kawi "Zzzz", # 11F3B..11F3D ; Unknown "Kawi", # 11F3E..11F59 ; Kawi "Zzzz", # 11F5A..11FAF ; Unknown "Lisu", # 11FB0..11FB0 ; Lisu "Zzzz", # 11FB1..11FBF ; Unknown "Taml", # 11FC0..11FF1 ; Tamil "Zzzz", # 11FF2..11FFE ; Unknown "Taml", # 11FFF..11FFF ; Tamil "Xsux", # 12000..12399 ; Cuneiform "Zzzz", # 1239A..123FF ; Unknown "Xsux", # 12400..1246E ; Cuneiform "Zzzz", # 1246F..1246F ; Unknown "Xsux", # 12470..12474 ; Cuneiform "Zzzz", # 12475..1247F ; Unknown "Xsux", # 12480..12543 ; Cuneiform "Zzzz", # 12544..12F8F ; Unknown "Cpmn", # 12F90..12FF2 ; Cypro_Minoan "Zzzz", # 12FF3..12FFF ; Unknown "Egyp", # 13000..13455 ; Egyptian_Hieroglyphs "Zzzz", # 13456..143FF ; Unknown "Hluw", # 14400..14646 ; Anatolian_Hieroglyphs "Zzzz", # 14647..167FF ; Unknown "Bamu", # 16800..16A38 ; Bamum "Zzzz", # 16A39..16A3F ; Unknown "Mroo", # 16A40..16A5E ; Mro "Zzzz", # 16A5F..16A5F ; Unknown "Mroo", # 16A60..16A69 ; Mro "Zzzz", # 16A6A..16A6D ; Unknown "Mroo", # 16A6E..16A6F ; Mro "Tnsa", # 16A70..16ABE ; Tangsa "Zzzz", # 16ABF..16ABF ; Unknown "Tnsa", # 16AC0..16AC9 ; Tangsa "Zzzz", # 16ACA..16ACF ; Unknown "Bass", # 16AD0..16AED ; Bassa_Vah "Zzzz", # 16AEE..16AEF ; Unknown "Bass", # 16AF0..16AF5 ; Bassa_Vah "Zzzz", # 16AF6..16AFF ; Unknown "Hmng", # 16B00..16B45 ; Pahawh_Hmong "Zzzz", # 16B46..16B4F ; Unknown "Hmng", # 16B50..16B59 ; Pahawh_Hmong "Zzzz", # 16B5A..16B5A ; Unknown "Hmng", # 16B5B..16B61 ; Pahawh_Hmong "Zzzz", # 16B62..16B62 ; Unknown "Hmng", # 16B63..16B77 ; Pahawh_Hmong "Zzzz", # 16B78..16B7C ; Unknown "Hmng", # 16B7D..16B8F ; Pahawh_Hmong "Zzzz", # 16B90..16E3F ; Unknown "Medf", # 16E40..16E9A ; Medefaidrin "Zzzz", # 16E9B..16EFF ; Unknown "Plrd", # 16F00..16F4A ; Miao "Zzzz", # 16F4B..16F4E ; Unknown "Plrd", # 16F4F..16F87 ; Miao "Zzzz", # 16F88..16F8E ; Unknown "Plrd", # 16F8F..16F9F ; Miao "Zzzz", # 16FA0..16FDF ; Unknown "Tang", # 16FE0..16FE0 ; Tangut "Nshu", # 16FE1..16FE1 ; Nushu "Hani", # 16FE2..16FE3 ; Han "Kits", # 16FE4..16FE4 ; Khitan_Small_Script "Zzzz", # 16FE5..16FEF ; Unknown "Hani", # 16FF0..16FF1 ; Han "Zzzz", # 16FF2..16FFF ; Unknown "Tang", # 17000..187F7 ; Tangut "Zzzz", # 187F8..187FF ; Unknown "Tang", # 18800..18AFF ; Tangut "Kits", # 18B00..18CD5 ; Khitan_Small_Script "Zzzz", # 18CD6..18CFF ; Unknown "Tang", # 18D00..18D08 ; Tangut "Zzzz", # 18D09..1AFEF ; Unknown "Kana", # 1AFF0..1AFF3 ; Katakana "Zzzz", # 1AFF4..1AFF4 ; Unknown "Kana", # 1AFF5..1AFFB ; Katakana "Zzzz", # 1AFFC..1AFFC ; Unknown "Kana", # 1AFFD..1AFFE ; Katakana "Zzzz", # 1AFFF..1AFFF ; Unknown "Kana", # 1B000..1B000 ; Katakana "Hira", # 1B001..1B11F ; Hiragana "Kana", # 1B120..1B122 ; Katakana "Zzzz", # 1B123..1B131 ; Unknown "Hira", # 1B132..1B132 ; Hiragana "Zzzz", # 1B133..1B14F ; Unknown "Hira", # 1B150..1B152 ; Hiragana "Zzzz", # 1B153..1B154 ; Unknown "Kana", # 1B155..1B155 ; Katakana "Zzzz", # 1B156..1B163 ; Unknown "Kana", # 1B164..1B167 ; Katakana "Zzzz", # 1B168..1B16F ; Unknown "Nshu", # 1B170..1B2FB ; Nushu "Zzzz", # 1B2FC..1BBFF ; Unknown "Dupl", # 1BC00..1BC6A ; Duployan "Zzzz", # 1BC6B..1BC6F ; Unknown "Dupl", # 1BC70..1BC7C ; Duployan "Zzzz", # 1BC7D..1BC7F ; Unknown "Dupl", # 1BC80..1BC88 ; Duployan "Zzzz", # 1BC89..1BC8F ; Unknown "Dupl", # 1BC90..1BC99 ; Duployan "Zzzz", # 1BC9A..1BC9B ; Unknown "Dupl", # 1BC9C..1BC9F ; Duployan "Zyyy", # 1BCA0..1BCA3 ; Common "Zzzz", # 1BCA4..1CEFF ; Unknown "Zinh", # 1CF00..1CF2D ; Inherited "Zzzz", # 1CF2E..1CF2F ; Unknown "Zinh", # 1CF30..1CF46 ; Inherited "Zzzz", # 1CF47..1CF4F ; Unknown "Zyyy", # 1CF50..1CFC3 ; Common "Zzzz", # 1CFC4..1CFFF ; Unknown "Zyyy", # 1D000..1D0F5 ; Common "Zzzz", # 1D0F6..1D0FF ; Unknown "Zyyy", # 1D100..1D126 ; Common "Zzzz", # 1D127..1D128 ; Unknown "Zyyy", # 1D129..1D166 ; Common "Zinh", # 1D167..1D169 ; Inherited "Zyyy", # 1D16A..1D17A ; Common "Zinh", # 1D17B..1D182 ; Inherited "Zyyy", # 1D183..1D184 ; Common "Zinh", # 1D185..1D18B ; Inherited "Zyyy", # 1D18C..1D1A9 ; Common "Zinh", # 1D1AA..1D1AD ; Inherited "Zyyy", # 1D1AE..1D1EA ; Common "Zzzz", # 1D1EB..1D1FF ; Unknown "Grek", # 1D200..1D245 ; Greek "Zzzz", # 1D246..1D2BF ; Unknown "Zyyy", # 1D2C0..1D2D3 ; Common "Zzzz", # 1D2D4..1D2DF ; Unknown "Zyyy", # 1D2E0..1D2F3 ; Common "Zzzz", # 1D2F4..1D2FF ; Unknown "Zyyy", # 1D300..1D356 ; Common "Zzzz", # 1D357..1D35F ; Unknown "Zyyy", # 1D360..1D378 ; Common "Zzzz", # 1D379..1D3FF ; Unknown "Zyyy", # 1D400..1D454 ; Common "Zzzz", # 1D455..1D455 ; Unknown "Zyyy", # 1D456..1D49C ; Common "Zzzz", # 1D49D..1D49D ; Unknown "Zyyy", # 1D49E..1D49F ; Common "Zzzz", # 1D4A0..1D4A1 ; Unknown "Zyyy", # 1D4A2..1D4A2 ; Common "Zzzz", # 1D4A3..1D4A4 ; Unknown "Zyyy", # 1D4A5..1D4A6 ; Common "Zzzz", # 1D4A7..1D4A8 ; Unknown "Zyyy", # 1D4A9..1D4AC ; Common "Zzzz", # 1D4AD..1D4AD ; Unknown "Zyyy", # 1D4AE..1D4B9 ; Common "Zzzz", # 1D4BA..1D4BA ; Unknown "Zyyy", # 1D4BB..1D4BB ; Common "Zzzz", # 1D4BC..1D4BC ; Unknown "Zyyy", # 1D4BD..1D4C3 ; Common "Zzzz", # 1D4C4..1D4C4 ; Unknown "Zyyy", # 1D4C5..1D505 ; Common "Zzzz", # 1D506..1D506 ; Unknown "Zyyy", # 1D507..1D50A ; Common "Zzzz", # 1D50B..1D50C ; Unknown "Zyyy", # 1D50D..1D514 ; Common "Zzzz", # 1D515..1D515 ; Unknown "Zyyy", # 1D516..1D51C ; Common "Zzzz", # 1D51D..1D51D ; Unknown "Zyyy", # 1D51E..1D539 ; Common "Zzzz", # 1D53A..1D53A ; Unknown "Zyyy", # 1D53B..1D53E ; Common "Zzzz", # 1D53F..1D53F ; Unknown "Zyyy", # 1D540..1D544 ; Common "Zzzz", # 1D545..1D545 ; Unknown "Zyyy", # 1D546..1D546 ; Common "Zzzz", # 1D547..1D549 ; Unknown "Zyyy", # 1D54A..1D550 ; Common "Zzzz", # 1D551..1D551 ; Unknown "Zyyy", # 1D552..1D6A5 ; Common "Zzzz", # 1D6A6..1D6A7 ; Unknown "Zyyy", # 1D6A8..1D7CB ; Common "Zzzz", # 1D7CC..1D7CD ; Unknown "Zyyy", # 1D7CE..1D7FF ; Common "Sgnw", # 1D800..1DA8B ; SignWriting "Zzzz", # 1DA8C..1DA9A ; Unknown "Sgnw", # 1DA9B..1DA9F ; SignWriting "Zzzz", # 1DAA0..1DAA0 ; Unknown "Sgnw", # 1DAA1..1DAAF ; SignWriting "Zzzz", # 1DAB0..1DEFF ; Unknown "Latn", # 1DF00..1DF1E ; Latin "Zzzz", # 1DF1F..1DF24 ; Unknown "Latn", # 1DF25..1DF2A ; Latin "Zzzz", # 1DF2B..1DFFF ; Unknown "Glag", # 1E000..1E006 ; Glagolitic "Zzzz", # 1E007..1E007 ; Unknown "Glag", # 1E008..1E018 ; Glagolitic "Zzzz", # 1E019..1E01A ; Unknown "Glag", # 1E01B..1E021 ; Glagolitic "Zzzz", # 1E022..1E022 ; Unknown "Glag", # 1E023..1E024 ; Glagolitic "Zzzz", # 1E025..1E025 ; Unknown "Glag", # 1E026..1E02A ; Glagolitic "Zzzz", # 1E02B..1E02F ; Unknown "Cyrl", # 1E030..1E06D ; Cyrillic "Zzzz", # 1E06E..1E08E ; Unknown "Cyrl", # 1E08F..1E08F ; Cyrillic "Zzzz", # 1E090..1E0FF ; Unknown "Hmnp", # 1E100..1E12C ; Nyiakeng_Puachue_Hmong "Zzzz", # 1E12D..1E12F ; Unknown "Hmnp", # 1E130..1E13D ; Nyiakeng_Puachue_Hmong "Zzzz", # 1E13E..1E13F ; Unknown "Hmnp", # 1E140..1E149 ; Nyiakeng_Puachue_Hmong "Zzzz", # 1E14A..1E14D ; Unknown "Hmnp", # 1E14E..1E14F ; Nyiakeng_Puachue_Hmong "Zzzz", # 1E150..1E28F ; Unknown "Toto", # 1E290..1E2AE ; Toto "Zzzz", # 1E2AF..1E2BF ; Unknown "Wcho", # 1E2C0..1E2F9 ; Wancho "Zzzz", # 1E2FA..1E2FE ; Unknown "Wcho", # 1E2FF..1E2FF ; Wancho "Zzzz", # 1E300..1E4CF ; Unknown "Nagm", # 1E4D0..1E4F9 ; Nag_Mundari "Zzzz", # 1E4FA..1E7DF ; Unknown "Ethi", # 1E7E0..1E7E6 ; Ethiopic "Zzzz", # 1E7E7..1E7E7 ; Unknown "Ethi", # 1E7E8..1E7EB ; Ethiopic "Zzzz", # 1E7EC..1E7EC ; Unknown "Ethi", # 1E7ED..1E7EE ; Ethiopic "Zzzz", # 1E7EF..1E7EF ; Unknown "Ethi", # 1E7F0..1E7FE ; Ethiopic "Zzzz", # 1E7FF..1E7FF ; Unknown "Mend", # 1E800..1E8C4 ; Mende_Kikakui "Zzzz", # 1E8C5..1E8C6 ; Unknown "Mend", # 1E8C7..1E8D6 ; Mende_Kikakui "Zzzz", # 1E8D7..1E8FF ; Unknown "Adlm", # 1E900..1E94B ; Adlam "Zzzz", # 1E94C..1E94F ; Unknown "Adlm", # 1E950..1E959 ; Adlam "Zzzz", # 1E95A..1E95D ; Unknown "Adlm", # 1E95E..1E95F ; Adlam "Zzzz", # 1E960..1EC70 ; Unknown "Zyyy", # 1EC71..1ECB4 ; Common "Zzzz", # 1ECB5..1ED00 ; Unknown "Zyyy", # 1ED01..1ED3D ; Common "Zzzz", # 1ED3E..1EDFF ; Unknown "Arab", # 1EE00..1EE03 ; Arabic "Zzzz", # 1EE04..1EE04 ; Unknown "Arab", # 1EE05..1EE1F ; Arabic "Zzzz", # 1EE20..1EE20 ; Unknown "Arab", # 1EE21..1EE22 ; Arabic "Zzzz", # 1EE23..1EE23 ; Unknown "Arab", # 1EE24..1EE24 ; Arabic "Zzzz", # 1EE25..1EE26 ; Unknown "Arab", # 1EE27..1EE27 ; Arabic "Zzzz", # 1EE28..1EE28 ; Unknown "Arab", # 1EE29..1EE32 ; Arabic "Zzzz", # 1EE33..1EE33 ; Unknown "Arab", # 1EE34..1EE37 ; Arabic "Zzzz", # 1EE38..1EE38 ; Unknown "Arab", # 1EE39..1EE39 ; Arabic "Zzzz", # 1EE3A..1EE3A ; Unknown "Arab", # 1EE3B..1EE3B ; Arabic "Zzzz", # 1EE3C..1EE41 ; Unknown "Arab", # 1EE42..1EE42 ; Arabic "Zzzz", # 1EE43..1EE46 ; Unknown "Arab", # 1EE47..1EE47 ; Arabic "Zzzz", # 1EE48..1EE48 ; Unknown "Arab", # 1EE49..1EE49 ; Arabic "Zzzz", # 1EE4A..1EE4A ; Unknown "Arab", # 1EE4B..1EE4B ; Arabic "Zzzz", # 1EE4C..1EE4C ; Unknown "Arab", # 1EE4D..1EE4F ; Arabic "Zzzz", # 1EE50..1EE50 ; Unknown "Arab", # 1EE51..1EE52 ; Arabic "Zzzz", # 1EE53..1EE53 ; Unknown "Arab", # 1EE54..1EE54 ; Arabic "Zzzz", # 1EE55..1EE56 ; Unknown "Arab", # 1EE57..1EE57 ; Arabic "Zzzz", # 1EE58..1EE58 ; Unknown "Arab", # 1EE59..1EE59 ; Arabic "Zzzz", # 1EE5A..1EE5A ; Unknown "Arab", # 1EE5B..1EE5B ; Arabic "Zzzz", # 1EE5C..1EE5C ; Unknown "Arab", # 1EE5D..1EE5D ; Arabic "Zzzz", # 1EE5E..1EE5E ; Unknown "Arab", # 1EE5F..1EE5F ; Arabic "Zzzz", # 1EE60..1EE60 ; Unknown "Arab", # 1EE61..1EE62 ; Arabic "Zzzz", # 1EE63..1EE63 ; Unknown "Arab", # 1EE64..1EE64 ; Arabic "Zzzz", # 1EE65..1EE66 ; Unknown "Arab", # 1EE67..1EE6A ; Arabic "Zzzz", # 1EE6B..1EE6B ; Unknown "Arab", # 1EE6C..1EE72 ; Arabic "Zzzz", # 1EE73..1EE73 ; Unknown "Arab", # 1EE74..1EE77 ; Arabic "Zzzz", # 1EE78..1EE78 ; Unknown "Arab", # 1EE79..1EE7C ; Arabic "Zzzz", # 1EE7D..1EE7D ; Unknown "Arab", # 1EE7E..1EE7E ; Arabic "Zzzz", # 1EE7F..1EE7F ; Unknown "Arab", # 1EE80..1EE89 ; Arabic "Zzzz", # 1EE8A..1EE8A ; Unknown "Arab", # 1EE8B..1EE9B ; Arabic "Zzzz", # 1EE9C..1EEA0 ; Unknown "Arab", # 1EEA1..1EEA3 ; Arabic "Zzzz", # 1EEA4..1EEA4 ; Unknown "Arab", # 1EEA5..1EEA9 ; Arabic "Zzzz", # 1EEAA..1EEAA ; Unknown "Arab", # 1EEAB..1EEBB ; Arabic "Zzzz", # 1EEBC..1EEEF ; Unknown "Arab", # 1EEF0..1EEF1 ; Arabic "Zzzz", # 1EEF2..1EFFF ; Unknown "Zyyy", # 1F000..1F02B ; Common "Zzzz", # 1F02C..1F02F ; Unknown "Zyyy", # 1F030..1F093 ; Common "Zzzz", # 1F094..1F09F ; Unknown "Zyyy", # 1F0A0..1F0AE ; Common "Zzzz", # 1F0AF..1F0B0 ; Unknown "Zyyy", # 1F0B1..1F0BF ; Common "Zzzz", # 1F0C0..1F0C0 ; Unknown "Zyyy", # 1F0C1..1F0CF ; Common "Zzzz", # 1F0D0..1F0D0 ; Unknown "Zyyy", # 1F0D1..1F0F5 ; Common "Zzzz", # 1F0F6..1F0FF ; Unknown "Zyyy", # 1F100..1F1AD ; Common "Zzzz", # 1F1AE..1F1E5 ; Unknown "Zyyy", # 1F1E6..1F1FF ; Common "Hira", # 1F200..1F200 ; Hiragana "Zyyy", # 1F201..1F202 ; Common "Zzzz", # 1F203..1F20F ; Unknown "Zyyy", # 1F210..1F23B ; Common "Zzzz", # 1F23C..1F23F ; Unknown "Zyyy", # 1F240..1F248 ; Common "Zzzz", # 1F249..1F24F ; Unknown "Zyyy", # 1F250..1F251 ; Common "Zzzz", # 1F252..1F25F ; Unknown "Zyyy", # 1F260..1F265 ; Common "Zzzz", # 1F266..1F2FF ; Unknown "Zyyy", # 1F300..1F6D7 ; Common "Zzzz", # 1F6D8..1F6DB ; Unknown "Zyyy", # 1F6DC..1F6EC ; Common "Zzzz", # 1F6ED..1F6EF ; Unknown "Zyyy", # 1F6F0..1F6FC ; Common "Zzzz", # 1F6FD..1F6FF ; Unknown "Zyyy", # 1F700..1F776 ; Common "Zzzz", # 1F777..1F77A ; Unknown "Zyyy", # 1F77B..1F7D9 ; Common "Zzzz", # 1F7DA..1F7DF ; Unknown "Zyyy", # 1F7E0..1F7EB ; Common "Zzzz", # 1F7EC..1F7EF ; Unknown "Zyyy", # 1F7F0..1F7F0 ; Common "Zzzz", # 1F7F1..1F7FF ; Unknown "Zyyy", # 1F800..1F80B ; Common "Zzzz", # 1F80C..1F80F ; Unknown "Zyyy", # 1F810..1F847 ; Common "Zzzz", # 1F848..1F84F ; Unknown "Zyyy", # 1F850..1F859 ; Common "Zzzz", # 1F85A..1F85F ; Unknown "Zyyy", # 1F860..1F887 ; Common "Zzzz", # 1F888..1F88F ; Unknown "Zyyy", # 1F890..1F8AD ; Common "Zzzz", # 1F8AE..1F8AF ; Unknown "Zyyy", # 1F8B0..1F8B1 ; Common "Zzzz", # 1F8B2..1F8FF ; Unknown "Zyyy", # 1F900..1FA53 ; Common "Zzzz", # 1FA54..1FA5F ; Unknown "Zyyy", # 1FA60..1FA6D ; Common "Zzzz", # 1FA6E..1FA6F ; Unknown "Zyyy", # 1FA70..1FA7C ; Common "Zzzz", # 1FA7D..1FA7F ; Unknown "Zyyy", # 1FA80..1FA88 ; Common "Zzzz", # 1FA89..1FA8F ; Unknown "Zyyy", # 1FA90..1FABD ; Common "Zzzz", # 1FABE..1FABE ; Unknown "Zyyy", # 1FABF..1FAC5 ; Common "Zzzz", # 1FAC6..1FACD ; Unknown "Zyyy", # 1FACE..1FADB ; Common "Zzzz", # 1FADC..1FADF ; Unknown "Zyyy", # 1FAE0..1FAE8 ; Common "Zzzz", # 1FAE9..1FAEF ; Unknown "Zyyy", # 1FAF0..1FAF8 ; Common "Zzzz", # 1FAF9..1FAFF ; Unknown "Zyyy", # 1FB00..1FB92 ; Common "Zzzz", # 1FB93..1FB93 ; Unknown "Zyyy", # 1FB94..1FBCA ; Common "Zzzz", # 1FBCB..1FBEF ; Unknown "Zyyy", # 1FBF0..1FBF9 ; Common "Zzzz", # 1FBFA..1FFFF ; Unknown "Hani", # 20000..2A6DF ; Han "Zzzz", # 2A6E0..2A6FF ; Unknown "Hani", # 2A700..2B739 ; Han "Zzzz", # 2B73A..2B73F ; Unknown "Hani", # 2B740..2B81D ; Han "Zzzz", # 2B81E..2B81F ; Unknown "Hani", # 2B820..2CEA1 ; Han "Zzzz", # 2CEA2..2CEAF ; Unknown "Hani", # 2CEB0..2EBE0 ; Han "Zzzz", # 2EBE1..2F7FF ; Unknown "Hani", # 2F800..2FA1D ; Han "Zzzz", # 2FA1E..2FFFF ; Unknown "Hani", # 30000..3134A ; Han "Zzzz", # 3134B..3134F ; Unknown "Hani", # 31350..323AF ; Han "Zzzz", # 323B0..E0000 ; Unknown "Zyyy", # E0001..E0001 ; Common "Zzzz", # E0002..E001F ; Unknown "Zyyy", # E0020..E007F ; Common "Zzzz", # E0080..E00FF ; Unknown "Zinh", # E0100..E01EF ; Inherited "Zzzz", # E01F0..10FFFF ; Unknown ] NAMES = { "Adlm": "Adlam", "Aghb": "Caucasian_Albanian", "Ahom": "Ahom", "Arab": "Arabic", "Armi": "Imperial_Aramaic", "Armn": "Armenian", "Avst": "Avestan", "Bali": "Balinese", "Bamu": "Bamum", "Bass": "Bassa_Vah", "Batk": "Batak", "Beng": "Bengali", "Bhks": "Bhaiksuki", "Bopo": "Bopomofo", "Brah": "Brahmi", "Brai": "Braille", "Bugi": "Buginese", "Buhd": "Buhid", "Cakm": "Chakma", "Cans": "Canadian_Aboriginal", "Cari": "Carian", "Cham": "Cham", "Cher": "Cherokee", "Chrs": "Chorasmian", "Copt": "Coptic", "Cpmn": "Cypro_Minoan", "Cprt": "Cypriot", "Cyrl": "Cyrillic", "Deva": "Devanagari", "Diak": "Dives_Akuru", "Dogr": "Dogra", "Dsrt": "Deseret", "Dupl": "Duployan", "Egyp": "Egyptian_Hieroglyphs", "Elba": "Elbasan", "Elym": "Elymaic", "Ethi": "Ethiopic", "Geor": "Georgian", "Glag": "Glagolitic", "Gong": "Gunjala_Gondi", "Gonm": "Masaram_Gondi", "Goth": "Gothic", "Gran": "Grantha", "Grek": "Greek", "Gujr": "Gujarati", "Guru": "Gurmukhi", "Hang": "Hangul", "Hani": "Han", "Hano": "Hanunoo", "Hatr": "Hatran", "Hebr": "Hebrew", "Hira": "Hiragana", "Hluw": "Anatolian_Hieroglyphs", "Hmng": "Pahawh_Hmong", "Hmnp": "Nyiakeng_Puachue_Hmong", "Hrkt": "Katakana_Or_Hiragana", "Hung": "Old_Hungarian", "Ital": "Old_Italic", "Java": "Javanese", "Kali": "Kayah_Li", "Kana": "Katakana", "Kawi": "Kawi", "Khar": "Kharoshthi", "Khmr": "Khmer", "Khoj": "Khojki", "Kits": "Khitan_Small_Script", "Knda": "Kannada", "Kthi": "Kaithi", "Lana": "Tai_Tham", "Laoo": "Lao", "Latn": "Latin", "Lepc": "Lepcha", "Limb": "Limbu", "Lina": "Linear_A", "Linb": "Linear_B", "Lisu": "Lisu", "Lyci": "Lycian", "Lydi": "Lydian", "Mahj": "Mahajani", "Maka": "Makasar", "Mand": "Mandaic", "Mani": "Manichaean", "Marc": "Marchen", "Medf": "Medefaidrin", "Mend": "Mende_Kikakui", "Merc": "Meroitic_Cursive", "Mero": "Meroitic_Hieroglyphs", "Mlym": "Malayalam", "Modi": "Modi", "Mong": "Mongolian", "Mroo": "Mro", "Mtei": "Meetei_Mayek", "Mult": "Multani", "Mymr": "Myanmar", "Nagm": "Nag_Mundari", "Nand": "Nandinagari", "Narb": "Old_North_Arabian", "Nbat": "Nabataean", "Newa": "Newa", "Nkoo": "Nko", "Nshu": "Nushu", "Ogam": "Ogham", "Olck": "Ol_Chiki", "Orkh": "Old_Turkic", "Orya": "Oriya", "Osge": "Osage", "Osma": "Osmanya", "Ougr": "Old_Uyghur", "Palm": "Palmyrene", "Pauc": "Pau_Cin_Hau", "Perm": "Old_Permic", "Phag": "Phags_Pa", "Phli": "Inscriptional_Pahlavi", "Phlp": "Psalter_Pahlavi", "Phnx": "Phoenician", "Plrd": "Miao", "Prti": "Inscriptional_Parthian", "Rjng": "Rejang", "Rohg": "Hanifi_Rohingya", "Runr": "Runic", "Samr": "Samaritan", "Sarb": "Old_South_Arabian", "Saur": "Saurashtra", "Sgnw": "SignWriting", "Shaw": "Shavian", "Shrd": "Sharada", "Sidd": "Siddham", "Sind": "Khudawadi", "Sinh": "Sinhala", "Sogd": "Sogdian", "Sogo": "Old_Sogdian", "Sora": "Sora_Sompeng", "Soyo": "Soyombo", "Sund": "Sundanese", "Sylo": "Syloti_Nagri", "Syrc": "Syriac", "Tagb": "Tagbanwa", "Takr": "Takri", "Tale": "Tai_Le", "Talu": "New_Tai_Lue", "Taml": "Tamil", "Tang": "Tangut", "Tavt": "Tai_Viet", "Telu": "Telugu", "Tfng": "Tifinagh", "Tglg": "Tagalog", "Thaa": "Thaana", "Thai": "Thai", "Tibt": "Tibetan", "Tirh": "Tirhuta", "Tnsa": "Tangsa", "Toto": "Toto", "Ugar": "Ugaritic", "Vaii": "Vai", "Vith": "Vithkuqi", "Wara": "Warang_Citi", "Wcho": "Wancho", "Xpeo": "Old_Persian", "Xsux": "Cuneiform", "Yezi": "Yezidi", "Yiii": "Yi", "Zanb": "Zanabazar_Square", "Zinh": "Inherited", "Zyyy": "Common", "Zzzz": "Unknown", } PKaZZZ�Usp"p"!fontTools/unicodedata/__init__.pyfrom __future__ import annotations from fontTools.misc.textTools import byteord, tostr import re from bisect import bisect_right from typing import Literal, TypeVar, overload try: # use unicodedata backport compatible with python2: # https://github.com/fonttools/unicodedata2 from unicodedata2 import * except ImportError: # pragma: no cover # fall back to built-in unicodedata (possibly outdated) from unicodedata import * from . import Blocks, Scripts, ScriptExtensions, OTTags __all__ = [ # names from built-in unicodedata module "lookup", "name", "decimal", "digit", "numeric", "category", "bidirectional", "combining", "east_asian_width", "mirrored", "decomposition", "normalize", "unidata_version", "ucd_3_2_0", # additonal functions "block", "script", "script_extension", "script_name", "script_code", "script_horizontal_direction", "ot_tags_from_script", "ot_tag_to_script", ] def script(char): """Return the four-letter script code assigned to the Unicode character 'char' as string. >>> script("a") 'Latn' >>> script(",") 'Zyyy' >>> script(chr(0x10FFFF)) 'Zzzz' """ code = byteord(char) # 'bisect_right(a, x, lo=0, hi=len(a))' returns an insertion point which # comes after (to the right of) any existing entries of x in a, and it # partitions array a into two halves so that, for the left side # all(val <= x for val in a[lo:i]), and for the right side # all(val > x for val in a[i:hi]). # Our 'SCRIPT_RANGES' is a sorted list of ranges (only their starting # breakpoints); we want to use `bisect_right` to look up the range that # contains the given codepoint: i.e. whose start is less than or equal # to the codepoint. Thus, we subtract -1 from the index returned. i = bisect_right(Scripts.RANGES, code) return Scripts.VALUES[i - 1] def script_extension(char): """Return the script extension property assigned to the Unicode character 'char' as a set of string. >>> script_extension("a") == {'Latn'} True >>> script_extension(chr(0x060C)) == {'Rohg', 'Syrc', 'Yezi', 'Arab', 'Thaa', 'Nkoo'} True >>> script_extension(chr(0x10FFFF)) == {'Zzzz'} True """ code = byteord(char) i = bisect_right(ScriptExtensions.RANGES, code) value = ScriptExtensions.VALUES[i - 1] if value is None: # code points not explicitly listed for Script Extensions # have as their value the corresponding Script property value return {script(char)} return value def script_name(code, default=KeyError): """Return the long, human-readable script name given a four-letter Unicode script code. If no matching name is found, a KeyError is raised by default. You can use the 'default' argument to return a fallback value (e.g. 'Unknown' or None) instead of throwing an error. """ try: return str(Scripts.NAMES[code].replace("_", " ")) except KeyError: if isinstance(default, type) and issubclass(default, KeyError): raise return default _normalize_re = re.compile(r"[-_ ]+") def _normalize_property_name(string): """Remove case, strip space, '-' and '_' for loose matching.""" return _normalize_re.sub("", string).lower() _SCRIPT_CODES = {_normalize_property_name(v): k for k, v in Scripts.NAMES.items()} def script_code(script_name, default=KeyError): """Returns the four-letter Unicode script code from its long name If no matching script code is found, a KeyError is raised by default. You can use the 'default' argument to return a fallback string (e.g. 'Zzzz' or None) instead of throwing an error. """ normalized_name = _normalize_property_name(script_name) try: return _SCRIPT_CODES[normalized_name] except KeyError: if isinstance(default, type) and issubclass(default, KeyError): raise return default # The data on script direction is taken from Harfbuzz source code: # https://github.com/harfbuzz/harfbuzz/blob/3.2.0/src/hb-common.cc#L514-L613 # This in turn references the following "Script_Metadata" document: # https://docs.google.com/spreadsheets/d/1Y90M0Ie3MUJ6UVCRDOypOtijlMDLNNyyLk36T6iMu0o RTL_SCRIPTS = { # Unicode-1.1 additions "Arab", # Arabic "Hebr", # Hebrew # Unicode-3.0 additions "Syrc", # Syriac "Thaa", # Thaana # Unicode-4.0 additions "Cprt", # Cypriot # Unicode-4.1 additions "Khar", # Kharoshthi # Unicode-5.0 additions "Phnx", # Phoenician "Nkoo", # Nko # Unicode-5.1 additions "Lydi", # Lydian # Unicode-5.2 additions "Avst", # Avestan "Armi", # Imperial Aramaic "Phli", # Inscriptional Pahlavi "Prti", # Inscriptional Parthian "Sarb", # Old South Arabian "Orkh", # Old Turkic "Samr", # Samaritan # Unicode-6.0 additions "Mand", # Mandaic # Unicode-6.1 additions "Merc", # Meroitic Cursive "Mero", # Meroitic Hieroglyphs # Unicode-7.0 additions "Mani", # Manichaean "Mend", # Mende Kikakui "Nbat", # Nabataean "Narb", # Old North Arabian "Palm", # Palmyrene "Phlp", # Psalter Pahlavi # Unicode-8.0 additions "Hatr", # Hatran "Hung", # Old Hungarian # Unicode-9.0 additions "Adlm", # Adlam # Unicode-11.0 additions "Rohg", # Hanifi Rohingya "Sogo", # Old Sogdian "Sogd", # Sogdian # Unicode-12.0 additions "Elym", # Elymaic # Unicode-13.0 additions "Chrs", # Chorasmian "Yezi", # Yezidi # Unicode-14.0 additions "Ougr", # Old Uyghur } HorizDirection = Literal["RTL", "LTR"] T = TypeVar("T") @overload def script_horizontal_direction(script_code: str, default: T) -> HorizDirection | T: ... @overload def script_horizontal_direction( script_code: str, default: type[KeyError] = KeyError ) -> HorizDirection: ... def script_horizontal_direction( script_code: str, default: T | type[KeyError] = KeyError ) -> HorizDirection | T: """Return "RTL" for scripts that contain right-to-left characters according to the Bidi_Class property. Otherwise return "LTR". """ if script_code not in Scripts.NAMES: if isinstance(default, type) and issubclass(default, KeyError): raise default(script_code) return default return "RTL" if script_code in RTL_SCRIPTS else "LTR" def block(char): """Return the block property assigned to the Unicode character 'char' as a string. >>> block("a") 'Basic Latin' >>> block(chr(0x060C)) 'Arabic' >>> block(chr(0xEFFFF)) 'No_Block' """ code = byteord(char) i = bisect_right(Blocks.RANGES, code) return Blocks.VALUES[i - 1] def ot_tags_from_script(script_code): """Return a list of OpenType script tags associated with a given Unicode script code. Return ['DFLT'] script tag for invalid/unknown script codes. """ if script_code in OTTags.SCRIPT_EXCEPTIONS: return [OTTags.SCRIPT_EXCEPTIONS[script_code]] if script_code not in Scripts.NAMES: return [OTTags.DEFAULT_SCRIPT] script_tags = [script_code[0].lower() + script_code[1:]] if script_code in OTTags.NEW_SCRIPT_TAGS: script_tags.extend(OTTags.NEW_SCRIPT_TAGS[script_code]) script_tags.reverse() # last in, first out return script_tags def ot_tag_to_script(tag): """Return the Unicode script code for the given OpenType script tag, or None for "DFLT" tag or if there is no Unicode script associated with it. Raises ValueError if the tag is invalid. """ tag = tostr(tag).strip() if not tag or " " in tag or len(tag) > 4: raise ValueError("invalid OpenType tag: %r" % tag) if tag in OTTags.SCRIPT_ALIASES: tag = OTTags.SCRIPT_ALIASES[tag] while len(tag) != 4: tag += str(" ") # pad with spaces if tag == OTTags.DEFAULT_SCRIPT: # it's unclear which Unicode script the "DFLT" OpenType tag maps to, # so here we return None return None if tag in OTTags.NEW_SCRIPT_TAGS_REVERSED: return OTTags.NEW_SCRIPT_TAGS_REVERSED[tag] if tag in OTTags.SCRIPT_EXCEPTIONS_REVERSED: return OTTags.SCRIPT_EXCEPTIONS_REVERSED[tag] # This side of the conversion is fully algorithmic # Any spaces at the end of the tag are replaced by repeating the last # letter. Eg 'nko ' -> 'Nkoo'. # Change first char to uppercase script_code = tag[0].upper() + tag[1] for i in range(2, 4): script_code += script_code[i - 1] if tag[i] == " " else tag[i] if script_code not in Scripts.NAMES: return None return script_code PKaZZZ�Jj�����fontTools/varLib/__init__.py""" Module for dealing with 'gvar'-style font variations, also known as run-time interpolation. The ideas here are very similar to MutatorMath. There is even code to read MutatorMath .designspace files in the varLib.designspace module. For now, if you run this file on a designspace file, it tries to find ttf-interpolatable files for the masters and build a variable-font from them. Such ttf-interpolatable and designspace files can be generated from a Glyphs source, eg., using noto-source as an example: $ fontmake -o ttf-interpolatable -g NotoSansArabic-MM.glyphs Then you can make a variable-font this way: $ fonttools varLib master_ufo/NotoSansArabic.designspace API *will* change in near future. """ from typing import List from fontTools.misc.vector import Vector from fontTools.misc.roundTools import noRound, otRound from fontTools.misc.fixedTools import floatToFixed as fl2fi from fontTools.misc.textTools import Tag, tostr from fontTools.ttLib import TTFont, newTable from fontTools.ttLib.tables._f_v_a_r import Axis, NamedInstance from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates, dropImpliedOnCurvePoints from fontTools.ttLib.tables.ttProgram import Program from fontTools.ttLib.tables.TupleVariation import TupleVariation from fontTools.ttLib.tables import otTables as ot from fontTools.ttLib.tables.otBase import OTTableWriter from fontTools.varLib import builder, models, varStore from fontTools.varLib.merger import VariationMerger, COLRVariationMerger from fontTools.varLib.mvar import MVAR_ENTRIES from fontTools.varLib.iup import iup_delta_optimize from fontTools.varLib.featureVars import addFeatureVariations from fontTools.designspaceLib import DesignSpaceDocument, InstanceDescriptor from fontTools.designspaceLib.split import splitInterpolable, splitVariableFonts from fontTools.varLib.stat import buildVFStatTable from fontTools.colorLib.builder import buildColrV1 from fontTools.colorLib.unbuilder import unbuildColrV1 from functools import partial from collections import OrderedDict, defaultdict, namedtuple import os.path import logging from copy import deepcopy from pprint import pformat from re import fullmatch from .errors import VarLibError, VarLibValidationError log = logging.getLogger("fontTools.varLib") # This is a lib key for the designspace document. The value should be # a comma-separated list of OpenType feature tag(s), to be used as the # FeatureVariations feature. # If present, the DesignSpace <rules processing="..."> flag is ignored. FEAVAR_FEATURETAG_LIB_KEY = "com.github.fonttools.varLib.featureVarsFeatureTag" # # Creation routines # def _add_fvar(font, axes, instances: List[InstanceDescriptor]): """ Add 'fvar' table to font. axes is an ordered dictionary of DesignspaceAxis objects. instances is list of dictionary objects with 'location', 'stylename', and possibly 'postscriptfontname' entries. """ assert axes assert isinstance(axes, OrderedDict) log.info("Generating fvar") fvar = newTable("fvar") nameTable = font["name"] for a in axes.values(): axis = Axis() axis.axisTag = Tag(a.tag) # TODO Skip axes that have no variation. axis.minValue, axis.defaultValue, axis.maxValue = ( a.minimum, a.default, a.maximum, ) axis.axisNameID = nameTable.addMultilingualName( a.labelNames, font, minNameID=256 ) axis.flags = int(a.hidden) fvar.axes.append(axis) for instance in instances: # Filter out discrete axis locations coordinates = { name: value for name, value in instance.location.items() if name in axes } if "en" not in instance.localisedStyleName: if not instance.styleName: raise VarLibValidationError( f"Instance at location '{coordinates}' must have a default English " "style name ('stylename' attribute on the instance element or a " "stylename element with an 'xml:lang=\"en\"' attribute)." ) localisedStyleName = dict(instance.localisedStyleName) localisedStyleName["en"] = tostr(instance.styleName) else: localisedStyleName = instance.localisedStyleName psname = instance.postScriptFontName inst = NamedInstance() inst.subfamilyNameID = nameTable.addMultilingualName(localisedStyleName) if psname is not None: psname = tostr(psname) inst.postscriptNameID = nameTable.addName(psname) inst.coordinates = { axes[k].tag: axes[k].map_backward(v) for k, v in coordinates.items() } # inst.coordinates = {axes[k].tag:v for k,v in coordinates.items()} fvar.instances.append(inst) assert "fvar" not in font font["fvar"] = fvar return fvar def _add_avar(font, axes, mappings, axisTags): """ Add 'avar' table to font. axes is an ordered dictionary of AxisDescriptor objects. """ assert axes assert isinstance(axes, OrderedDict) log.info("Generating avar") avar = newTable("avar") interesting = False vals_triples = {} for axis in axes.values(): # Currently, some rasterizers require that the default value maps # (-1 to -1, 0 to 0, and 1 to 1) be present for all the segment # maps, even when the default normalization mapping for the axis # was not modified. # https://github.com/googlei18n/fontmake/issues/295 # https://github.com/fonttools/fonttools/issues/1011 # TODO(anthrotype) revert this (and 19c4b37) when issue is fixed curve = avar.segments[axis.tag] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} keys_triple = (axis.minimum, axis.default, axis.maximum) vals_triple = tuple(axis.map_forward(v) for v in keys_triple) vals_triples[axis.tag] = vals_triple if not axis.map: continue items = sorted(axis.map) keys = [item[0] for item in items] vals = [item[1] for item in items] # Current avar requirements. We don't have to enforce # these on the designer and can deduce some ourselves, # but for now just enforce them. if axis.minimum != min(keys): raise VarLibValidationError( f"Axis '{axis.name}': there must be a mapping for the axis minimum " f"value {axis.minimum} and it must be the lowest input mapping value." ) if axis.maximum != max(keys): raise VarLibValidationError( f"Axis '{axis.name}': there must be a mapping for the axis maximum " f"value {axis.maximum} and it must be the highest input mapping value." ) if axis.default not in keys: raise VarLibValidationError( f"Axis '{axis.name}': there must be a mapping for the axis default " f"value {axis.default}." ) # No duplicate input values (output values can be >= their preceeding value). if len(set(keys)) != len(keys): raise VarLibValidationError( f"Axis '{axis.name}': All axis mapping input='...' values must be " "unique, but we found duplicates." ) # Ascending values if sorted(vals) != vals: raise VarLibValidationError( f"Axis '{axis.name}': mapping output values must be in ascending order." ) keys = [models.normalizeValue(v, keys_triple) for v in keys] vals = [models.normalizeValue(v, vals_triple) for v in vals] if all(k == v for k, v in zip(keys, vals)): continue interesting = True curve.update(zip(keys, vals)) assert 0.0 in curve and curve[0.0] == 0.0 assert -1.0 not in curve or curve[-1.0] == -1.0 assert +1.0 not in curve or curve[+1.0] == +1.0 # curve.update({-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}) if mappings: interesting = True inputLocations = [ { axes[name].tag: models.normalizeValue(v, vals_triples[axes[name].tag]) for name, v in mapping.inputLocation.items() } for mapping in mappings ] outputLocations = [ { axes[name].tag: models.normalizeValue(v, vals_triples[axes[name].tag]) for name, v in mapping.outputLocation.items() } for mapping in mappings ] assert len(inputLocations) == len(outputLocations) # If base-master is missing, insert it at zero location. if not any(all(v == 0 for k, v in loc.items()) for loc in inputLocations): inputLocations.insert(0, {}) outputLocations.insert(0, {}) model = models.VariationModel(inputLocations, axisTags) storeBuilder = varStore.OnlineVarStoreBuilder(axisTags) storeBuilder.setModel(model) varIdxes = {} for tag in axisTags: masterValues = [] for vo, vi in zip(outputLocations, inputLocations): if tag not in vo: masterValues.append(0) continue v = vo[tag] - vi.get(tag, 0) masterValues.append(fl2fi(v, 14)) varIdxes[tag] = storeBuilder.storeMasters(masterValues)[1] store = storeBuilder.finish() optimized = store.optimize() varIdxes = {axis: optimized[value] for axis, value in varIdxes.items()} varIdxMap = builder.buildDeltaSetIndexMap(varIdxes[t] for t in axisTags) avar.majorVersion = 2 avar.table = ot.avar() avar.table.VarIdxMap = varIdxMap avar.table.VarStore = store assert "avar" not in font if not interesting: log.info("No need for avar") avar = None else: font["avar"] = avar return avar def _add_stat(font): # Note: this function only gets called by old code that calls `build()` # directly. Newer code that wants to benefit from STAT data from the # designspace should call `build_many()` if "STAT" in font: return from ..otlLib.builder import buildStatTable fvarTable = font["fvar"] axes = [dict(tag=a.axisTag, name=a.axisNameID) for a in fvarTable.axes] buildStatTable(font, axes) _MasterData = namedtuple("_MasterData", ["glyf", "hMetrics", "vMetrics"]) def _add_gvar(font, masterModel, master_ttfs, tolerance=0.5, optimize=True): if tolerance < 0: raise ValueError("`tolerance` must be a positive number.") log.info("Generating gvar") assert "gvar" not in font gvar = font["gvar"] = newTable("gvar") glyf = font["glyf"] defaultMasterIndex = masterModel.reverseMapping[0] master_datas = [ _MasterData( m["glyf"], m["hmtx"].metrics, getattr(m.get("vmtx"), "metrics", None) ) for m in master_ttfs ] for glyph in font.getGlyphOrder(): log.debug("building gvar for glyph '%s'", glyph) isComposite = glyf[glyph].isComposite() allData = [ m.glyf._getCoordinatesAndControls(glyph, m.hMetrics, m.vMetrics) for m in master_datas ] if allData[defaultMasterIndex][1].numberOfContours != 0: # If the default master is not empty, interpret empty non-default masters # as missing glyphs from a sparse master allData = [ d if d is not None and d[1].numberOfContours != 0 else None for d in allData ] model, allData = masterModel.getSubModel(allData) allCoords = [d[0] for d in allData] allControls = [d[1] for d in allData] control = allControls[0] if not models.allEqual(allControls): log.warning("glyph %s has incompatible masters; skipping" % glyph) continue del allControls # Update gvar gvar.variations[glyph] = [] deltas = model.getDeltas( allCoords, round=partial(GlyphCoordinates.__round__, round=round) ) supports = model.supports assert len(deltas) == len(supports) # Prepare for IUP optimization origCoords = deltas[0] endPts = control.endPts for i, (delta, support) in enumerate(zip(deltas[1:], supports[1:])): if all(v == 0 for v in delta.array) and not isComposite: continue var = TupleVariation(support, delta) if optimize: delta_opt = iup_delta_optimize( delta, origCoords, endPts, tolerance=tolerance ) if None in delta_opt: """In composite glyphs, there should be one 0 entry to make sure the gvar entry is written to the font. This is to work around an issue with macOS 10.14 and can be removed once the behaviour of macOS is changed. https://github.com/fonttools/fonttools/issues/1381 """ if all(d is None for d in delta_opt): delta_opt = [(0, 0)] + [None] * (len(delta_opt) - 1) # Use "optimized" version only if smaller... var_opt = TupleVariation(support, delta_opt) axis_tags = sorted( support.keys() ) # Shouldn't matter that this is different from fvar...? tupleData, auxData = var.compile(axis_tags) unoptimized_len = len(tupleData) + len(auxData) tupleData, auxData = var_opt.compile(axis_tags) optimized_len = len(tupleData) + len(auxData) if optimized_len < unoptimized_len: var = var_opt gvar.variations[glyph].append(var) def _remove_TTHinting(font): for tag in ("cvar", "cvt ", "fpgm", "prep"): if tag in font: del font[tag] maxp = font["maxp"] for attr in ( "maxTwilightPoints", "maxStorage", "maxFunctionDefs", "maxInstructionDefs", "maxStackElements", "maxSizeOfInstructions", ): setattr(maxp, attr, 0) maxp.maxZones = 1 font["glyf"].removeHinting() # TODO: Modify gasp table to deactivate gridfitting for all ranges? def _merge_TTHinting(font, masterModel, master_ttfs): log.info("Merging TT hinting") assert "cvar" not in font # Check that the existing hinting is compatible # fpgm and prep table for tag in ("fpgm", "prep"): all_pgms = [m[tag].program for m in master_ttfs if tag in m] if not all_pgms: continue font_pgm = getattr(font.get(tag), "program", None) if any(pgm != font_pgm for pgm in all_pgms): log.warning( "Masters have incompatible %s tables, hinting is discarded." % tag ) _remove_TTHinting(font) return # glyf table font_glyf = font["glyf"] master_glyfs = [m["glyf"] for m in master_ttfs] for name, glyph in font_glyf.glyphs.items(): all_pgms = [getattr(glyf.get(name), "program", None) for glyf in master_glyfs] if not any(all_pgms): continue glyph.expand(font_glyf) font_pgm = getattr(glyph, "program", None) if any(pgm != font_pgm for pgm in all_pgms if pgm): log.warning( "Masters have incompatible glyph programs in glyph '%s', hinting is discarded." % name ) # TODO Only drop hinting from this glyph. _remove_TTHinting(font) return # cvt table all_cvs = [Vector(m["cvt "].values) if "cvt " in m else None for m in master_ttfs] nonNone_cvs = models.nonNone(all_cvs) if not nonNone_cvs: # There is no cvt table to make a cvar table from, we're done here. return if not models.allEqual(len(c) for c in nonNone_cvs): log.warning("Masters have incompatible cvt tables, hinting is discarded.") _remove_TTHinting(font) return variations = [] deltas, supports = masterModel.getDeltasAndSupports( all_cvs, round=round ) # builtin round calls into Vector.__round__, which uses builtin round as we like for i, (delta, support) in enumerate(zip(deltas[1:], supports[1:])): if all(v == 0 for v in delta): continue var = TupleVariation(support, delta) variations.append(var) # We can build the cvar table now. if variations: cvar = font["cvar"] = newTable("cvar") cvar.version = 1 cvar.variations = variations _MetricsFields = namedtuple( "_MetricsFields", ["tableTag", "metricsTag", "sb1", "sb2", "advMapping", "vOrigMapping"], ) HVAR_FIELDS = _MetricsFields( tableTag="HVAR", metricsTag="hmtx", sb1="LsbMap", sb2="RsbMap", advMapping="AdvWidthMap", vOrigMapping=None, ) VVAR_FIELDS = _MetricsFields( tableTag="VVAR", metricsTag="vmtx", sb1="TsbMap", sb2="BsbMap", advMapping="AdvHeightMap", vOrigMapping="VOrgMap", ) def _add_HVAR(font, masterModel, master_ttfs, axisTags): _add_VHVAR(font, masterModel, master_ttfs, axisTags, HVAR_FIELDS) def _add_VVAR(font, masterModel, master_ttfs, axisTags): _add_VHVAR(font, masterModel, master_ttfs, axisTags, VVAR_FIELDS) def _add_VHVAR(font, masterModel, master_ttfs, axisTags, tableFields): tableTag = tableFields.tableTag assert tableTag not in font log.info("Generating " + tableTag) VHVAR = newTable(tableTag) tableClass = getattr(ot, tableTag) vhvar = VHVAR.table = tableClass() vhvar.Version = 0x00010000 glyphOrder = font.getGlyphOrder() # Build list of source font advance widths for each glyph metricsTag = tableFields.metricsTag advMetricses = [m[metricsTag].metrics for m in master_ttfs] # Build list of source font vertical origin coords for each glyph if tableTag == "VVAR" and "VORG" in master_ttfs[0]: vOrigMetricses = [m["VORG"].VOriginRecords for m in master_ttfs] defaultYOrigs = [m["VORG"].defaultVertOriginY for m in master_ttfs] vOrigMetricses = list(zip(vOrigMetricses, defaultYOrigs)) else: vOrigMetricses = None metricsStore, advanceMapping, vOrigMapping = _get_advance_metrics( font, masterModel, master_ttfs, axisTags, glyphOrder, advMetricses, vOrigMetricses, ) vhvar.VarStore = metricsStore if advanceMapping is None: setattr(vhvar, tableFields.advMapping, None) else: setattr(vhvar, tableFields.advMapping, advanceMapping) if vOrigMapping is not None: setattr(vhvar, tableFields.vOrigMapping, vOrigMapping) setattr(vhvar, tableFields.sb1, None) setattr(vhvar, tableFields.sb2, None) font[tableTag] = VHVAR return def _get_advance_metrics( font, masterModel, master_ttfs, axisTags, glyphOrder, advMetricses, vOrigMetricses=None, ): vhAdvanceDeltasAndSupports = {} vOrigDeltasAndSupports = {} # HACK: we treat width 65535 as a sentinel value to signal that a glyph # from a non-default master should not participate in computing {H,V}VAR, # as if it were missing. Allows to variate other glyph-related data independently # from glyph metrics sparse_advance = 0xFFFF for glyph in glyphOrder: vhAdvances = [ ( metrics[glyph][0] if glyph in metrics and metrics[glyph][0] != sparse_advance else None ) for metrics in advMetricses ] vhAdvanceDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports( vhAdvances, round=round ) singleModel = models.allEqual(id(v[1]) for v in vhAdvanceDeltasAndSupports.values()) if vOrigMetricses: singleModel = False for glyph in glyphOrder: # We need to supply a vOrigs tuple with non-None default values # for each glyph. vOrigMetricses contains values only for those # glyphs which have a non-default vOrig. vOrigs = [ metrics[glyph] if glyph in metrics else defaultVOrig for metrics, defaultVOrig in vOrigMetricses ] vOrigDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports( vOrigs, round=round ) directStore = None if singleModel: # Build direct mapping supports = next(iter(vhAdvanceDeltasAndSupports.values()))[1][1:] varTupleList = builder.buildVarRegionList(supports, axisTags) varTupleIndexes = list(range(len(supports))) varData = builder.buildVarData(varTupleIndexes, [], optimize=False) for glyphName in glyphOrder: varData.addItem(vhAdvanceDeltasAndSupports[glyphName][0], round=noRound) varData.optimize() directStore = builder.buildVarStore(varTupleList, [varData]) # Build optimized indirect mapping storeBuilder = varStore.OnlineVarStoreBuilder(axisTags) advMapping = {} for glyphName in glyphOrder: deltas, supports = vhAdvanceDeltasAndSupports[glyphName] storeBuilder.setSupports(supports) advMapping[glyphName] = storeBuilder.storeDeltas(deltas, round=noRound) if vOrigMetricses: vOrigMap = {} for glyphName in glyphOrder: deltas, supports = vOrigDeltasAndSupports[glyphName] storeBuilder.setSupports(supports) vOrigMap[glyphName] = storeBuilder.storeDeltas(deltas, round=noRound) indirectStore = storeBuilder.finish() mapping2 = indirectStore.optimize(use_NO_VARIATION_INDEX=False) advMapping = [mapping2[advMapping[g]] for g in glyphOrder] advanceMapping = builder.buildVarIdxMap(advMapping, glyphOrder) if vOrigMetricses: vOrigMap = [mapping2[vOrigMap[g]] for g in glyphOrder] useDirect = False vOrigMapping = None if directStore: # Compile both, see which is more compact writer = OTTableWriter() directStore.compile(writer, font) directSize = len(writer.getAllData()) writer = OTTableWriter() indirectStore.compile(writer, font) advanceMapping.compile(writer, font) indirectSize = len(writer.getAllData()) useDirect = directSize < indirectSize if useDirect: metricsStore = directStore advanceMapping = None else: metricsStore = indirectStore if vOrigMetricses: vOrigMapping = builder.buildVarIdxMap(vOrigMap, glyphOrder) return metricsStore, advanceMapping, vOrigMapping def _add_MVAR(font, masterModel, master_ttfs, axisTags): log.info("Generating MVAR") store_builder = varStore.OnlineVarStoreBuilder(axisTags) records = [] lastTableTag = None fontTable = None tables = None # HACK: we need to special-case post.underlineThickness and .underlinePosition # and unilaterally/arbitrarily define a sentinel value to distinguish the case # when a post table is present in a given master simply because that's where # the glyph names in TrueType must be stored, but the underline values are not # meant to be used for building MVAR's deltas. The value of -0x8000 (-36768) # the minimum FWord (int16) value, was chosen for its unlikelyhood to appear # in real-world underline position/thickness values. specialTags = {"unds": -0x8000, "undo": -0x8000} for tag, (tableTag, itemName) in sorted(MVAR_ENTRIES.items(), key=lambda kv: kv[1]): # For each tag, fetch the associated table from all fonts (or not when we are # still looking at a tag from the same tables) and set up the variation model # for them. if tableTag != lastTableTag: tables = fontTable = None if tableTag in font: fontTable = font[tableTag] tables = [] for master in master_ttfs: if tableTag not in master or ( tag in specialTags and getattr(master[tableTag], itemName) == specialTags[tag] ): tables.append(None) else: tables.append(master[tableTag]) model, tables = masterModel.getSubModel(tables) store_builder.setModel(model) lastTableTag = tableTag if tables is None: # Tag not applicable to the master font. continue # TODO support gasp entries master_values = [getattr(table, itemName) for table in tables] if models.allEqual(master_values): base, varIdx = master_values[0], None else: base, varIdx = store_builder.storeMasters(master_values) setattr(fontTable, itemName, base) if varIdx is None: continue log.info(" %s: %s.%s %s", tag, tableTag, itemName, master_values) rec = ot.MetricsValueRecord() rec.ValueTag = tag rec.VarIdx = varIdx records.append(rec) assert "MVAR" not in font if records: store = store_builder.finish() # Optimize mapping = store.optimize() for rec in records: rec.VarIdx = mapping[rec.VarIdx] MVAR = font["MVAR"] = newTable("MVAR") mvar = MVAR.table = ot.MVAR() mvar.Version = 0x00010000 mvar.Reserved = 0 mvar.VarStore = store # XXX these should not be hard-coded but computed automatically mvar.ValueRecordSize = 8 mvar.ValueRecordCount = len(records) mvar.ValueRecord = sorted(records, key=lambda r: r.ValueTag) def _add_BASE(font, masterModel, master_ttfs, axisTags): log.info("Generating BASE") merger = VariationMerger(masterModel, axisTags, font) merger.mergeTables(font, master_ttfs, ["BASE"]) store = merger.store_builder.finish() if not store: return base = font["BASE"].table assert base.Version == 0x00010000 base.Version = 0x00010001 base.VarStore = store def _merge_OTL(font, model, master_fonts, axisTags): otl_tags = ["GSUB", "GDEF", "GPOS"] if not any(tag in font for tag in otl_tags): return log.info("Merging OpenType Layout tables") merger = VariationMerger(model, axisTags, font) merger.mergeTables(font, master_fonts, otl_tags) store = merger.store_builder.finish() if not store: return try: GDEF = font["GDEF"].table assert GDEF.Version <= 0x00010002 except KeyError: font["GDEF"] = newTable("GDEF") GDEFTable = font["GDEF"] = newTable("GDEF") GDEF = GDEFTable.table = ot.GDEF() GDEF.GlyphClassDef = None GDEF.AttachList = None GDEF.LigCaretList = None GDEF.MarkAttachClassDef = None GDEF.MarkGlyphSetsDef = None GDEF.Version = 0x00010003 GDEF.VarStore = store # Optimize varidx_map = store.optimize() GDEF.remap_device_varidxes(varidx_map) if "GPOS" in font: font["GPOS"].table.remap_device_varidxes(varidx_map) def _add_GSUB_feature_variations( font, axes, internal_axis_supports, rules, featureTags ): def normalize(name, value): return models.normalizeLocation({name: value}, internal_axis_supports)[name] log.info("Generating GSUB FeatureVariations") axis_tags = {name: axis.tag for name, axis in axes.items()} conditional_subs = [] for rule in rules: region = [] for conditions in rule.conditionSets: space = {} for condition in conditions: axis_name = condition["name"] if condition["minimum"] is not None: minimum = normalize(axis_name, condition["minimum"]) else: minimum = -1.0 if condition["maximum"] is not None: maximum = normalize(axis_name, condition["maximum"]) else: maximum = 1.0 tag = axis_tags[axis_name] space[tag] = (minimum, maximum) region.append(space) subs = {k: v for k, v in rule.subs} conditional_subs.append((region, subs)) addFeatureVariations(font, conditional_subs, featureTags) _DesignSpaceData = namedtuple( "_DesignSpaceData", [ "axes", "axisMappings", "internal_axis_supports", "base_idx", "normalized_master_locs", "masters", "instances", "rules", "rulesProcessingLast", "lib", ], ) def _add_CFF2(varFont, model, master_fonts): from .cff import merge_region_fonts glyphOrder = varFont.getGlyphOrder() if "CFF2" not in varFont: from .cff import convertCFFtoCFF2 convertCFFtoCFF2(varFont) ordered_fonts_list = model.reorderMasters(master_fonts, model.reverseMapping) # re-ordering the master list simplifies building the CFF2 data item lists. merge_region_fonts(varFont, model, ordered_fonts_list, glyphOrder) def _add_COLR(font, model, master_fonts, axisTags, colr_layer_reuse=True): merger = COLRVariationMerger( model, axisTags, font, allowLayerReuse=colr_layer_reuse ) merger.mergeTables(font, master_fonts) store = merger.store_builder.finish() colr = font["COLR"].table if store: mapping = store.optimize() colr.VarStore = store varIdxes = [mapping[v] for v in merger.varIdxes] colr.VarIndexMap = builder.buildDeltaSetIndexMap(varIdxes) def load_designspace(designspace, log_enabled=True): # TODO: remove this and always assume 'designspace' is a DesignSpaceDocument, # never a file path, as that's already handled by caller if hasattr(designspace, "sources"): # Assume a DesignspaceDocument ds = designspace else: # Assume a file path ds = DesignSpaceDocument.fromfile(designspace) masters = ds.sources if not masters: raise VarLibValidationError("Designspace must have at least one source.") instances = ds.instances # TODO: Use fontTools.designspaceLib.tagForAxisName instead. standard_axis_map = OrderedDict( [ ("weight", ("wght", {"en": "Weight"})), ("width", ("wdth", {"en": "Width"})), ("slant", ("slnt", {"en": "Slant"})), ("optical", ("opsz", {"en": "Optical Size"})), ("italic", ("ital", {"en": "Italic"})), ] ) # Setup axes if not ds.axes: raise VarLibValidationError(f"Designspace must have at least one axis.") axes = OrderedDict() for axis_index, axis in enumerate(ds.axes): axis_name = axis.name if not axis_name: if not axis.tag: raise VarLibValidationError(f"Axis at index {axis_index} needs a tag.") axis_name = axis.name = axis.tag if axis_name in standard_axis_map: if axis.tag is None: axis.tag = standard_axis_map[axis_name][0] if not axis.labelNames: axis.labelNames.update(standard_axis_map[axis_name][1]) else: if not axis.tag: raise VarLibValidationError(f"Axis at index {axis_index} needs a tag.") if not axis.labelNames: axis.labelNames["en"] = tostr(axis_name) axes[axis_name] = axis if log_enabled: log.info("Axes:\n%s", pformat([axis.asdict() for axis in axes.values()])) axisMappings = ds.axisMappings if axisMappings and log_enabled: log.info("Mappings:\n%s", pformat(axisMappings)) # Check all master and instance locations are valid and fill in defaults for obj in masters + instances: obj_name = obj.name or obj.styleName or "" loc = obj.getFullDesignLocation(ds) obj.designLocation = loc if loc is None: raise VarLibValidationError( f"Source or instance '{obj_name}' has no location." ) for axis_name in loc.keys(): if axis_name not in axes: raise VarLibValidationError( f"Location axis '{axis_name}' unknown for '{obj_name}'." ) for axis_name, axis in axes.items(): v = axis.map_backward(loc[axis_name]) if not (axis.minimum <= v <= axis.maximum): raise VarLibValidationError( f"Source or instance '{obj_name}' has out-of-range location " f"for axis '{axis_name}': is mapped to {v} but must be in " f"mapped range [{axis.minimum}..{axis.maximum}] (NOTE: all " "values are in user-space)." ) # Normalize master locations internal_master_locs = [o.getFullDesignLocation(ds) for o in masters] if log_enabled: log.info("Internal master locations:\n%s", pformat(internal_master_locs)) # TODO This mapping should ideally be moved closer to logic in _add_fvar/avar internal_axis_supports = {} for axis in axes.values(): triple = (axis.minimum, axis.default, axis.maximum) internal_axis_supports[axis.name] = [axis.map_forward(v) for v in triple] if log_enabled: log.info("Internal axis supports:\n%s", pformat(internal_axis_supports)) normalized_master_locs = [ models.normalizeLocation(m, internal_axis_supports) for m in internal_master_locs ] if log_enabled: log.info("Normalized master locations:\n%s", pformat(normalized_master_locs)) # Find base master base_idx = None for i, m in enumerate(normalized_master_locs): if all(v == 0 for v in m.values()): if base_idx is not None: raise VarLibValidationError( "More than one base master found in Designspace." ) base_idx = i if base_idx is None: raise VarLibValidationError( "Base master not found; no master at default location?" ) if log_enabled: log.info("Index of base master: %s", base_idx) return _DesignSpaceData( axes, axisMappings, internal_axis_supports, base_idx, normalized_master_locs, masters, instances, ds.rules, ds.rulesProcessingLast, ds.lib, ) # https://docs.microsoft.com/en-us/typography/opentype/spec/os2#uswidthclass WDTH_VALUE_TO_OS2_WIDTH_CLASS = { 50: 1, 62.5: 2, 75: 3, 87.5: 4, 100: 5, 112.5: 6, 125: 7, 150: 8, 200: 9, } def set_default_weight_width_slant(font, location): if "OS/2" in font: if "wght" in location: weight_class = otRound(max(1, min(location["wght"], 1000))) if font["OS/2"].usWeightClass != weight_class: log.info("Setting OS/2.usWeightClass = %s", weight_class) font["OS/2"].usWeightClass = weight_class if "wdth" in location: # map 'wdth' axis (50..200) to OS/2.usWidthClass (1..9), rounding to closest widthValue = min(max(location["wdth"], 50), 200) widthClass = otRound( models.piecewiseLinearMap(widthValue, WDTH_VALUE_TO_OS2_WIDTH_CLASS) ) if font["OS/2"].usWidthClass != widthClass: log.info("Setting OS/2.usWidthClass = %s", widthClass) font["OS/2"].usWidthClass = widthClass if "slnt" in location and "post" in font: italicAngle = max(-90, min(location["slnt"], 90)) if font["post"].italicAngle != italicAngle: log.info("Setting post.italicAngle = %s", italicAngle) font["post"].italicAngle = italicAngle def drop_implied_oncurve_points(*masters: TTFont) -> int: """Drop impliable on-curve points from all the simple glyphs in masters. In TrueType glyf outlines, on-curve points can be implied when they are located exactly at the midpoint of the line connecting two consecutive off-curve points. The input masters' glyf tables are assumed to contain same-named glyphs that are interpolatable. Oncurve points are only dropped if they can be implied for all the masters. The fonts are modified in-place. Args: masters: The TTFont(s) to modify Returns: The total number of points that were dropped if any. Reference: https://developer.apple.com/fonts/TrueType-Reference-Manual/RM01/Chap1.html """ count = 0 glyph_masters = defaultdict(list) # multiple DS source may point to the same TTFont object and we want to # avoid processing the same glyph twice as they are modified in-place for font in {id(m): m for m in masters}.values(): glyf = font["glyf"] for glyphName in glyf.keys(): glyph_masters[glyphName].append(glyf[glyphName]) count = 0 for glyphName, glyphs in glyph_masters.items(): try: dropped = dropImpliedOnCurvePoints(*glyphs) except ValueError as e: # we don't fail for incompatible glyphs in _add_gvar so we shouldn't here log.warning("Failed to drop implied oncurves for %r: %s", glyphName, e) else: count += len(dropped) return count def build_many( designspace: DesignSpaceDocument, master_finder=lambda s: s, exclude=[], optimize=True, skip_vf=lambda vf_name: False, colr_layer_reuse=True, drop_implied_oncurves=False, ): """ Build variable fonts from a designspace file, version 5 which can define several VFs, or version 4 which has implicitly one VF covering the whole doc. If master_finder is set, it should be a callable that takes master filename as found in designspace file and map it to master font binary as to be opened (eg. .ttf or .otf). skip_vf can be used to skip building some of the variable fonts defined in the input designspace. It's a predicate that takes as argument the name of the variable font and returns `bool`. Always returns a Dict[str, TTFont] keyed by VariableFontDescriptor.name """ res = {} # varLib.build (used further below) by default only builds an incomplete 'STAT' # with an empty AxisValueArray--unless the VF inherited 'STAT' from its base master. # Designspace version 5 can also be used to define 'STAT' labels or customize # axes ordering, etc. To avoid overwriting a pre-existing 'STAT' or redoing the # same work twice, here we check if designspace contains any 'STAT' info before # proceeding to call buildVFStatTable for each VF. # https://github.com/fonttools/fonttools/pull/3024 # https://github.com/fonttools/fonttools/issues/3045 doBuildStatFromDSv5 = ( "STAT" not in exclude and designspace.formatTuple >= (5, 0) and ( any(a.axisLabels or a.axisOrdering is not None for a in designspace.axes) or designspace.locationLabels ) ) for _location, subDoc in splitInterpolable(designspace): for name, vfDoc in splitVariableFonts(subDoc): if skip_vf(name): log.debug(f"Skipping variable TTF font: {name}") continue vf = build( vfDoc, master_finder, exclude=exclude, optimize=optimize, colr_layer_reuse=colr_layer_reuse, drop_implied_oncurves=drop_implied_oncurves, )[0] if doBuildStatFromDSv5: buildVFStatTable(vf, designspace, name) res[name] = vf return res def build( designspace, master_finder=lambda s: s, exclude=[], optimize=True, colr_layer_reuse=True, drop_implied_oncurves=False, ): """ Build variation font from a designspace file. If master_finder is set, it should be a callable that takes master filename as found in designspace file and map it to master font binary as to be opened (eg. .ttf or .otf). """ if hasattr(designspace, "sources"): # Assume a DesignspaceDocument pass else: # Assume a file path designspace = DesignSpaceDocument.fromfile(designspace) ds = load_designspace(designspace) log.info("Building variable font") log.info("Loading master fonts") master_fonts = load_masters(designspace, master_finder) # TODO: 'master_ttfs' is unused except for return value, remove later master_ttfs = [] for master in master_fonts: try: master_ttfs.append(master.reader.file.name) except AttributeError: master_ttfs.append(None) # in-memory fonts have no path if drop_implied_oncurves and "glyf" in master_fonts[ds.base_idx]: drop_count = drop_implied_oncurve_points(*master_fonts) log.info( "Dropped %s on-curve points from simple glyphs in the 'glyf' table", drop_count, ) # Copy the base master to work from it vf = deepcopy(master_fonts[ds.base_idx]) if "DSIG" in vf: del vf["DSIG"] # TODO append masters as named-instances as well; needs .designspace change. fvar = _add_fvar(vf, ds.axes, ds.instances) if "STAT" not in exclude: _add_stat(vf) # Map from axis names to axis tags... normalized_master_locs = [ {ds.axes[k].tag: v for k, v in loc.items()} for loc in ds.normalized_master_locs ] # From here on, we use fvar axes only axisTags = [axis.axisTag for axis in fvar.axes] # Assume single-model for now. model = models.VariationModel(normalized_master_locs, axisOrder=axisTags) assert 0 == model.mapping[ds.base_idx] log.info("Building variations tables") if "avar" not in exclude: _add_avar(vf, ds.axes, ds.axisMappings, axisTags) if "BASE" not in exclude and "BASE" in vf: _add_BASE(vf, model, master_fonts, axisTags) if "MVAR" not in exclude: _add_MVAR(vf, model, master_fonts, axisTags) if "HVAR" not in exclude: _add_HVAR(vf, model, master_fonts, axisTags) if "VVAR" not in exclude and "vmtx" in vf: _add_VVAR(vf, model, master_fonts, axisTags) if "GDEF" not in exclude or "GPOS" not in exclude: _merge_OTL(vf, model, master_fonts, axisTags) if "gvar" not in exclude and "glyf" in vf: _add_gvar(vf, model, master_fonts, optimize=optimize) if "cvar" not in exclude and "glyf" in vf: _merge_TTHinting(vf, model, master_fonts) if "GSUB" not in exclude and ds.rules: featureTags = _feature_variations_tags(ds) _add_GSUB_feature_variations( vf, ds.axes, ds.internal_axis_supports, ds.rules, featureTags ) if "CFF2" not in exclude and ("CFF " in vf or "CFF2" in vf): _add_CFF2(vf, model, master_fonts) if "post" in vf: # set 'post' to format 2 to keep the glyph names dropped from CFF2 post = vf["post"] if post.formatType != 2.0: post.formatType = 2.0 post.extraNames = [] post.mapping = {} if "COLR" not in exclude and "COLR" in vf and vf["COLR"].version > 0: _add_COLR(vf, model, master_fonts, axisTags, colr_layer_reuse) set_default_weight_width_slant( vf, location={axis.axisTag: axis.defaultValue for axis in vf["fvar"].axes} ) for tag in exclude: if tag in vf: del vf[tag] # TODO: Only return vf for 4.0+, the rest is unused. return vf, model, master_ttfs def _open_font(path, master_finder=lambda s: s): # load TTFont masters from given 'path': this can be either a .TTX or an # OpenType binary font; or if neither of these, try use the 'master_finder' # callable to resolve the path to a valid .TTX or OpenType font binary. from fontTools.ttx import guessFileType master_path = os.path.normpath(path) tp = guessFileType(master_path) if tp is None: # not an OpenType binary/ttx, fall back to the master finder. master_path = master_finder(master_path) tp = guessFileType(master_path) if tp in ("TTX", "OTX"): font = TTFont() font.importXML(master_path) elif tp in ("TTF", "OTF", "WOFF", "WOFF2"): font = TTFont(master_path) else: raise VarLibValidationError("Invalid master path: %r" % master_path) return font def load_masters(designspace, master_finder=lambda s: s): """Ensure that all SourceDescriptor.font attributes have an appropriate TTFont object loaded, or else open TTFont objects from the SourceDescriptor.path attributes. The paths can point to either an OpenType font, a TTX file, or a UFO. In the latter case, use the provided master_finder callable to map from UFO paths to the respective master font binaries (e.g. .ttf, .otf or .ttx). Return list of master TTFont objects in the same order they are listed in the DesignSpaceDocument. """ for master in designspace.sources: # If a SourceDescriptor has a layer name, demand that the compiled TTFont # be supplied by the caller. This spares us from modifying MasterFinder. if master.layerName and master.font is None: raise VarLibValidationError( f"Designspace source '{master.name or '<Unknown>'}' specified a " "layer name but lacks the required TTFont object in the 'font' " "attribute." ) return designspace.loadSourceFonts(_open_font, master_finder=master_finder) class MasterFinder(object): def __init__(self, template): self.template = template def __call__(self, src_path): fullname = os.path.abspath(src_path) dirname, basename = os.path.split(fullname) stem, ext = os.path.splitext(basename) path = self.template.format( fullname=fullname, dirname=dirname, basename=basename, stem=stem, ext=ext, ) return os.path.normpath(path) def _feature_variations_tags(ds): raw_tags = ds.lib.get( FEAVAR_FEATURETAG_LIB_KEY, "rclt" if ds.rulesProcessingLast else "rvrn", ) return sorted({t.strip() for t in raw_tags.split(",")}) def addGSUBFeatureVariations(vf, designspace, featureTags=(), *, log_enabled=False): """Add GSUB FeatureVariations table to variable font, based on DesignSpace rules. Args: vf: A TTFont object representing the variable font. designspace: A DesignSpaceDocument object. featureTags: Optional feature tag(s) to use for the FeatureVariations records. If unset, the key 'com.github.fonttools.varLib.featureVarsFeatureTag' is looked up in the DS <lib> and used; otherwise the default is 'rclt' if the <rules processing="last"> attribute is set, else 'rvrn'. See <https://fonttools.readthedocs.io/en/latest/designspaceLib/xml.html#rules-element> log_enabled: If True, log info about DS axes and sources. Default is False, as the same info may have already been logged as part of varLib.build. """ ds = load_designspace(designspace, log_enabled=log_enabled) if not ds.rules: return if not featureTags: featureTags = _feature_variations_tags(ds) _add_GSUB_feature_variations( vf, ds.axes, ds.internal_axis_supports, ds.rules, featureTags ) def main(args=None): """Build variable fonts from a designspace file and masters""" from argparse import ArgumentParser from fontTools import configLogger parser = ArgumentParser(prog="varLib", description=main.__doc__) parser.add_argument("designspace") output_group = parser.add_mutually_exclusive_group() output_group.add_argument( "-o", metavar="OUTPUTFILE", dest="outfile", default=None, help="output file" ) output_group.add_argument( "-d", "--output-dir", metavar="OUTPUTDIR", default=None, help="output dir (default: same as input designspace file)", ) parser.add_argument( "-x", metavar="TAG", dest="exclude", action="append", default=[], help="exclude table", ) parser.add_argument( "--disable-iup", dest="optimize", action="store_false", help="do not perform IUP optimization", ) parser.add_argument( "--no-colr-layer-reuse", dest="colr_layer_reuse", action="store_false", help="do not rebuild variable COLR table to optimize COLR layer reuse", ) parser.add_argument( "--drop-implied-oncurves", action="store_true", help=( "drop on-curve points that can be implied when exactly in the middle of " "two off-curve points (only applies to TrueType fonts)" ), ) parser.add_argument( "--master-finder", default="master_ttf_interpolatable/{stem}.ttf", help=( "templated string used for finding binary font " "files given the source file names defined in the " "designspace document. The following special strings " "are defined: {fullname} is the absolute source file " "name; {basename} is the file name without its " "directory; {stem} is the basename without the file " "extension; {ext} is the source file extension; " "{dirname} is the directory of the absolute file " 'name. The default value is "%(default)s".' ), ) parser.add_argument( "--variable-fonts", default=".*", metavar="VF_NAME", help=( "Filter the list of variable fonts produced from the input " "Designspace v5 file. By default all listed variable fonts are " "generated. To generate a specific variable font (or variable fonts) " 'that match a given "name" attribute, you can pass as argument ' "the full name or a regular expression. E.g.: --variable-fonts " '"MyFontVF_WeightOnly"; or --variable-fonts "MyFontVFItalic_.*".' ), ) logging_group = parser.add_mutually_exclusive_group(required=False) logging_group.add_argument( "-v", "--verbose", action="store_true", help="Run more verbosely." ) logging_group.add_argument( "-q", "--quiet", action="store_true", help="Turn verbosity off." ) options = parser.parse_args(args) configLogger( level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO") ) designspace_filename = options.designspace designspace = DesignSpaceDocument.fromfile(designspace_filename) vf_descriptors = designspace.getVariableFonts() if not vf_descriptors: parser.error(f"No variable fonts in given designspace {designspace.path!r}") vfs_to_build = [] for vf in vf_descriptors: # Skip variable fonts that do not match the user's inclusion regex if given. if not fullmatch(options.variable_fonts, vf.name): continue vfs_to_build.append(vf) if not vfs_to_build: parser.error(f"No variable fonts matching {options.variable_fonts!r}") if options.outfile is not None and len(vfs_to_build) > 1: parser.error( "can't specify -o because there are multiple VFs to build; " "use --output-dir, or select a single VF with --variable-fonts" ) output_dir = options.output_dir if output_dir is None: output_dir = os.path.dirname(designspace_filename) vf_name_to_output_path = {} if len(vfs_to_build) == 1 and options.outfile is not None: vf_name_to_output_path[vfs_to_build[0].name] = options.outfile else: for vf in vfs_to_build: filename = vf.filename if vf.filename is not None else vf.name + ".{ext}" vf_name_to_output_path[vf.name] = os.path.join(output_dir, filename) finder = MasterFinder(options.master_finder) vfs = build_many( designspace, finder, exclude=options.exclude, optimize=options.optimize, colr_layer_reuse=options.colr_layer_reuse, drop_implied_oncurves=options.drop_implied_oncurves, ) for vf_name, vf in vfs.items(): ext = "otf" if vf.sfntVersion == "OTTO" else "ttf" output_path = vf_name_to_output_path[vf_name].format(ext=ext) output_dir = os.path.dirname(output_path) if output_dir: os.makedirs(output_dir, exist_ok=True) log.info("Saving variation font %s", output_path) vf.save(output_path) if __name__ == "__main__": import sys if len(sys.argv) > 1: sys.exit(main()) import doctest sys.exit(doctest.testmod().failed) PKaZZZ�P�__fontTools/varLib/__main__.pyimport sys from fontTools.varLib import main if __name__ == "__main__": sys.exit(main()) PKaZZZ �t�ggfontTools/varLib/avar.pyfrom fontTools.varLib import _add_avar, load_designspace from fontTools.misc.cliTools import makeOutputFileName import logging log = logging.getLogger("fontTools.varLib.avar") def main(args=None): """Add `avar` table from designspace file to variable font.""" if args is None: import sys args = sys.argv[1:] from fontTools import configLogger from fontTools.ttLib import TTFont from fontTools.designspaceLib import DesignSpaceDocument import argparse parser = argparse.ArgumentParser( "fonttools varLib.avar", description="Add `avar` table from designspace file to variable font.", ) parser.add_argument("font", metavar="varfont.ttf", help="Variable-font file.") parser.add_argument( "designspace", metavar="family.designspace", help="Designspace file." ) parser.add_argument( "-o", "--output-file", type=str, help="Output font file name.", ) parser.add_argument( "-v", "--verbose", action="store_true", help="Run more verbosely." ) options = parser.parse_args(args) configLogger(level=("INFO" if options.verbose else "WARNING")) font = TTFont(options.font) if not "fvar" in font: log.error("Not a variable font.") return 1 axisTags = [a.axisTag for a in font["fvar"].axes] ds = load_designspace(options.designspace) if "avar" in font: log.warning("avar table already present, overwriting.") del font["avar"] _add_avar(font, ds.axes, ds.axisMappings, axisTags) if options.output_file is None: outfile = makeOutputFileName(options.font, overWrite=True, suffix=".avar") else: outfile = options.output_file if outfile: log.info("Saving %s", outfile) font.save(outfile) if __name__ == "__main__": import sys sys.exit(main()) PKaZZZ�M� �j�jfontTools/varLib/avarPlanner.pyfrom fontTools.ttLib import newTable from fontTools.ttLib.tables._f_v_a_r import Axis as fvarAxis from fontTools.pens.areaPen import AreaPen from fontTools.pens.basePen import NullPen from fontTools.pens.statisticsPen import StatisticsPen from fontTools.varLib.models import piecewiseLinearMap, normalizeValue from fontTools.misc.cliTools import makeOutputFileName import math import logging from pprint import pformat __all__ = [ "planWeightAxis", "planWidthAxis", "planSlantAxis", "planOpticalSizeAxis", "planAxis", "sanitizeWeight", "sanitizeWidth", "sanitizeSlant", "measureWeight", "measureWidth", "measureSlant", "normalizeLinear", "normalizeLog", "normalizeDegrees", "interpolateLinear", "interpolateLog", "processAxis", "makeDesignspaceSnippet", "addEmptyAvar", "main", ] log = logging.getLogger("fontTools.varLib.avarPlanner") WEIGHTS = [ 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, ] WIDTHS = [ 25.0, 37.5, 50.0, 62.5, 75.0, 87.5, 100.0, 112.5, 125.0, 137.5, 150.0, 162.5, 175.0, 187.5, 200.0, ] SLANTS = list(math.degrees(math.atan(d / 20.0)) for d in range(-20, 21)) SIZES = [ 5, 6, 7, 8, 9, 10, 11, 12, 14, 18, 24, 30, 36, 48, 60, 72, 96, 120, 144, 192, 240, 288, ] SAMPLES = 8 def normalizeLinear(value, rangeMin, rangeMax): """Linearly normalize value in [rangeMin, rangeMax] to [0, 1], with extrapolation.""" return (value - rangeMin) / (rangeMax - rangeMin) def interpolateLinear(t, a, b): """Linear interpolation between a and b, with t typically in [0, 1].""" return a + t * (b - a) def normalizeLog(value, rangeMin, rangeMax): """Logarithmically normalize value in [rangeMin, rangeMax] to [0, 1], with extrapolation.""" logMin = math.log(rangeMin) logMax = math.log(rangeMax) return (math.log(value) - logMin) / (logMax - logMin) def interpolateLog(t, a, b): """Logarithmic interpolation between a and b, with t typically in [0, 1].""" logA = math.log(a) logB = math.log(b) return math.exp(logA + t * (logB - logA)) def normalizeDegrees(value, rangeMin, rangeMax): """Angularly normalize value in [rangeMin, rangeMax] to [0, 1], with extrapolation.""" tanMin = math.tan(math.radians(rangeMin)) tanMax = math.tan(math.radians(rangeMax)) return (math.tan(math.radians(value)) - tanMin) / (tanMax - tanMin) def measureWeight(glyphset, glyphs=None): """Measure the perceptual average weight of the given glyphs.""" if isinstance(glyphs, dict): frequencies = glyphs else: frequencies = {g: 1 for g in glyphs} wght_sum = wdth_sum = 0 for glyph_name in glyphs: if frequencies is not None: frequency = frequencies.get(glyph_name, 0) if frequency == 0: continue else: frequency = 1 glyph = glyphset[glyph_name] pen = AreaPen(glyphset=glyphset) glyph.draw(pen) mult = glyph.width * frequency wght_sum += mult * abs(pen.value) wdth_sum += mult return wght_sum / wdth_sum def measureWidth(glyphset, glyphs=None): """Measure the average width of the given glyphs.""" if isinstance(glyphs, dict): frequencies = glyphs else: frequencies = {g: 1 for g in glyphs} wdth_sum = 0 freq_sum = 0 for glyph_name in glyphs: if frequencies is not None: frequency = frequencies.get(glyph_name, 0) if frequency == 0: continue else: frequency = 1 glyph = glyphset[glyph_name] pen = NullPen() glyph.draw(pen) wdth_sum += glyph.width * frequency freq_sum += frequency return wdth_sum / freq_sum def measureSlant(glyphset, glyphs=None): """Measure the perceptual average slant angle of the given glyphs.""" if isinstance(glyphs, dict): frequencies = glyphs else: frequencies = {g: 1 for g in glyphs} slnt_sum = 0 freq_sum = 0 for glyph_name in glyphs: if frequencies is not None: frequency = frequencies.get(glyph_name, 0) if frequency == 0: continue else: frequency = 1 glyph = glyphset[glyph_name] pen = StatisticsPen(glyphset=glyphset) glyph.draw(pen) mult = glyph.width * frequency slnt_sum += mult * pen.slant freq_sum += mult return -math.degrees(math.atan(slnt_sum / freq_sum)) def sanitizeWidth(userTriple, designTriple, pins, measurements): """Sanitize the width axis limits.""" minVal, defaultVal, maxVal = ( measurements[designTriple[0]], measurements[designTriple[1]], measurements[designTriple[2]], ) calculatedMinVal = userTriple[1] * (minVal / defaultVal) calculatedMaxVal = userTriple[1] * (maxVal / defaultVal) log.info("Original width axis limits: %g:%g:%g", *userTriple) log.info( "Calculated width axis limits: %g:%g:%g", calculatedMinVal, userTriple[1], calculatedMaxVal, ) if ( abs(calculatedMinVal - userTriple[0]) / userTriple[1] > 0.05 or abs(calculatedMaxVal - userTriple[2]) / userTriple[1] > 0.05 ): log.warning("Calculated width axis min/max do not match user input.") log.warning( " Current width axis limits: %g:%g:%g", *userTriple, ) log.warning( " Suggested width axis limits: %g:%g:%g", calculatedMinVal, userTriple[1], calculatedMaxVal, ) return False return True def sanitizeWeight(userTriple, designTriple, pins, measurements): """Sanitize the weight axis limits.""" if len(set(userTriple)) < 3: return True minVal, defaultVal, maxVal = ( measurements[designTriple[0]], measurements[designTriple[1]], measurements[designTriple[2]], ) logMin = math.log(minVal) logDefault = math.log(defaultVal) logMax = math.log(maxVal) t = (userTriple[1] - userTriple[0]) / (userTriple[2] - userTriple[0]) y = math.exp(logMin + t * (logMax - logMin)) t = (y - minVal) / (maxVal - minVal) calculatedDefaultVal = userTriple[0] + t * (userTriple[2] - userTriple[0]) log.info("Original weight axis limits: %g:%g:%g", *userTriple) log.info( "Calculated weight axis limits: %g:%g:%g", userTriple[0], calculatedDefaultVal, userTriple[2], ) if abs(calculatedDefaultVal - userTriple[1]) / userTriple[1] > 0.05: log.warning("Calculated weight axis default does not match user input.") log.warning( " Current weight axis limits: %g:%g:%g", *userTriple, ) log.warning( " Suggested weight axis limits, changing default: %g:%g:%g", userTriple[0], calculatedDefaultVal, userTriple[2], ) t = (userTriple[2] - userTriple[0]) / (userTriple[1] - userTriple[0]) y = math.exp(logMin + t * (logDefault - logMin)) t = (y - minVal) / (defaultVal - minVal) calculatedMaxVal = userTriple[0] + t * (userTriple[1] - userTriple[0]) log.warning( " Suggested weight axis limits, changing maximum: %g:%g:%g", userTriple[0], userTriple[1], calculatedMaxVal, ) t = (userTriple[0] - userTriple[2]) / (userTriple[1] - userTriple[2]) y = math.exp(logMax + t * (logDefault - logMax)) t = (y - maxVal) / (defaultVal - maxVal) calculatedMinVal = userTriple[2] + t * (userTriple[1] - userTriple[2]) log.warning( " Suggested weight axis limits, changing minimum: %g:%g:%g", calculatedMinVal, userTriple[1], userTriple[2], ) return False return True def sanitizeSlant(userTriple, designTriple, pins, measurements): """Sanitize the slant axis limits.""" log.info("Original slant axis limits: %g:%g:%g", *userTriple) log.info( "Calculated slant axis limits: %g:%g:%g", measurements[designTriple[0]], measurements[designTriple[1]], measurements[designTriple[2]], ) if ( abs(measurements[designTriple[0]] - userTriple[0]) > 1 or abs(measurements[designTriple[1]] - userTriple[1]) > 1 or abs(measurements[designTriple[2]] - userTriple[2]) > 1 ): log.warning("Calculated slant axis min/default/max do not match user input.") log.warning( " Current slant axis limits: %g:%g:%g", *userTriple, ) log.warning( " Suggested slant axis limits: %g:%g:%g", measurements[designTriple[0]], measurements[designTriple[1]], measurements[designTriple[2]], ) return False return True def planAxis( measureFunc, normalizeFunc, interpolateFunc, glyphSetFunc, axisTag, axisLimits, values, samples=None, glyphs=None, designLimits=None, pins=None, sanitizeFunc=None, ): """Plan an axis. measureFunc: callable that takes a glyphset and an optional list of glyphnames, and returns the glyphset-wide measurement to be used for the axis. normalizeFunc: callable that takes a measurement and a minimum and maximum, and normalizes the measurement into the range 0..1, possibly extrapolating too. interpolateFunc: callable that takes a normalized t value, and a minimum and maximum, and returns the interpolated value, possibly extrapolating too. glyphSetFunc: callable that takes a variations "location" dictionary, and returns a glyphset. axisTag: the axis tag string. axisLimits: a triple of minimum, default, and maximum values for the axis. Or an `fvar` Axis object. values: a list of output values to map for this axis. samples: the number of samples to use when sampling. Default 8. glyphs: a list of glyph names to use when sampling. Defaults to None, which will process all glyphs. designLimits: an optional triple of minimum, default, and maximum values represenging the "design" limits for the axis. If not provided, the axisLimits will be used. pins: an optional dictionary of before/after mapping entries to pin in the output. sanitizeFunc: an optional callable to call to sanitize the axis limits. """ if isinstance(axisLimits, fvarAxis): axisLimits = (axisLimits.minValue, axisLimits.defaultValue, axisLimits.maxValue) minValue, defaultValue, maxValue = axisLimits if samples is None: samples = SAMPLES if glyphs is None: glyphs = glyphSetFunc({}).keys() if pins is None: pins = {} else: pins = pins.copy() log.info( "Axis limits min %g / default %g / max %g", minValue, defaultValue, maxValue ) triple = (minValue, defaultValue, maxValue) if designLimits is not None: log.info("Axis design-limits min %g / default %g / max %g", *designLimits) else: designLimits = triple if pins: log.info("Pins %s", sorted(pins.items())) pins.update( { minValue: designLimits[0], defaultValue: designLimits[1], maxValue: designLimits[2], } ) out = {} outNormalized = {} axisMeasurements = {} for value in sorted({minValue, defaultValue, maxValue} | set(pins.keys())): glyphset = glyphSetFunc(location={axisTag: value}) designValue = pins[value] axisMeasurements[designValue] = measureFunc(glyphset, glyphs) if sanitizeFunc is not None: log.info("Sanitizing axis limit values for the `%s` axis.", axisTag) sanitizeFunc(triple, designLimits, pins, axisMeasurements) log.debug("Calculated average value:\n%s", pformat(axisMeasurements)) for (rangeMin, targetMin), (rangeMax, targetMax) in zip( list(sorted(pins.items()))[:-1], list(sorted(pins.items()))[1:], ): targetValues = {w for w in values if rangeMin < w < rangeMax} if not targetValues: continue normalizedMin = normalizeValue(rangeMin, triple) normalizedMax = normalizeValue(rangeMax, triple) normalizedTargetMin = normalizeValue(targetMin, designLimits) normalizedTargetMax = normalizeValue(targetMax, designLimits) log.info("Planning target values %s.", sorted(targetValues)) log.info("Sampling %u points in range %g,%g.", samples, rangeMin, rangeMax) valueMeasurements = axisMeasurements.copy() for sample in range(1, samples + 1): value = rangeMin + (rangeMax - rangeMin) * sample / (samples + 1) log.debug("Sampling value %g.", value) glyphset = glyphSetFunc(location={axisTag: value}) designValue = piecewiseLinearMap(value, pins) valueMeasurements[designValue] = measureFunc(glyphset, glyphs) log.debug("Sampled average value:\n%s", pformat(valueMeasurements)) measurementValue = {} for value in sorted(valueMeasurements): measurementValue[valueMeasurements[value]] = value out[rangeMin] = targetMin outNormalized[normalizedMin] = normalizedTargetMin for value in sorted(targetValues): t = normalizeFunc(value, rangeMin, rangeMax) targetMeasurement = interpolateFunc( t, valueMeasurements[targetMin], valueMeasurements[targetMax] ) targetValue = piecewiseLinearMap(targetMeasurement, measurementValue) log.debug("Planned mapping value %g to %g." % (value, targetValue)) out[value] = targetValue valueNormalized = normalizedMin + (value - rangeMin) / ( rangeMax - rangeMin ) * (normalizedMax - normalizedMin) outNormalized[valueNormalized] = normalizedTargetMin + ( targetValue - targetMin ) / (targetMax - targetMin) * (normalizedTargetMax - normalizedTargetMin) out[rangeMax] = targetMax outNormalized[normalizedMax] = normalizedTargetMax log.info("Planned mapping for the `%s` axis:\n%s", axisTag, pformat(out)) log.info( "Planned normalized mapping for the `%s` axis:\n%s", axisTag, pformat(outNormalized), ) if all(abs(k - v) < 0.01 for k, v in outNormalized.items()): log.info("Detected identity mapping for the `%s` axis. Dropping.", axisTag) out = {} outNormalized = {} return out, outNormalized def planWeightAxis( glyphSetFunc, axisLimits, weights=None, samples=None, glyphs=None, designLimits=None, pins=None, sanitize=False, ): """Plan a weight (`wght`) axis. weights: A list of weight values to plan for. If None, the default values are used. This function simply calls planAxis with values=weights, and the appropriate arguments. See documenation for planAxis for more information. """ if weights is None: weights = WEIGHTS return planAxis( measureWeight, normalizeLinear, interpolateLog, glyphSetFunc, "wght", axisLimits, values=weights, samples=samples, glyphs=glyphs, designLimits=designLimits, pins=pins, sanitizeFunc=sanitizeWeight if sanitize else None, ) def planWidthAxis( glyphSetFunc, axisLimits, widths=None, samples=None, glyphs=None, designLimits=None, pins=None, sanitize=False, ): """Plan a width (`wdth`) axis. widths: A list of width values (percentages) to plan for. If None, the default values are used. This function simply calls planAxis with values=widths, and the appropriate arguments. See documenation for planAxis for more information. """ if widths is None: widths = WIDTHS return planAxis( measureWidth, normalizeLinear, interpolateLinear, glyphSetFunc, "wdth", axisLimits, values=widths, samples=samples, glyphs=glyphs, designLimits=designLimits, pins=pins, sanitizeFunc=sanitizeWidth if sanitize else None, ) def planSlantAxis( glyphSetFunc, axisLimits, slants=None, samples=None, glyphs=None, designLimits=None, pins=None, sanitize=False, ): """Plan a slant (`slnt`) axis. slants: A list slant angles to plan for. If None, the default values are used. This function simply calls planAxis with values=slants, and the appropriate arguments. See documenation for planAxis for more information. """ if slants is None: slants = SLANTS return planAxis( measureSlant, normalizeDegrees, interpolateLinear, glyphSetFunc, "slnt", axisLimits, values=slants, samples=samples, glyphs=glyphs, designLimits=designLimits, pins=pins, sanitizeFunc=sanitizeSlant if sanitize else None, ) def planOpticalSizeAxis( glyphSetFunc, axisLimits, sizes=None, samples=None, glyphs=None, designLimits=None, pins=None, sanitize=False, ): """Plan a optical-size (`opsz`) axis. sizes: A list of optical size values to plan for. If None, the default values are used. This function simply calls planAxis with values=sizes, and the appropriate arguments. See documenation for planAxis for more information. """ if sizes is None: sizes = SIZES return planAxis( measureWeight, normalizeLog, interpolateLog, glyphSetFunc, "opsz", axisLimits, values=sizes, samples=samples, glyphs=glyphs, designLimits=designLimits, pins=pins, ) def makeDesignspaceSnippet(axisTag, axisName, axisLimit, mapping): """Make a designspace snippet for a single axis.""" designspaceSnippet = ( ' <axis tag="%s" name="%s" minimum="%g" default="%g" maximum="%g"' % ((axisTag, axisName) + axisLimit) ) if mapping: designspaceSnippet += ">\n" else: designspaceSnippet += "/>" for key, value in mapping.items(): designspaceSnippet += ' <map input="%g" output="%g"/>\n' % (key, value) if mapping: designspaceSnippet += " </axis>" return designspaceSnippet def addEmptyAvar(font): """Add an empty `avar` table to the font.""" font["avar"] = avar = newTable("avar") for axis in fvar.axes: avar.segments[axis.axisTag] = {} def processAxis( font, planFunc, axisTag, axisName, values, samples=None, glyphs=None, designLimits=None, pins=None, sanitize=False, plot=False, ): """Process a single axis.""" axisLimits = None for axis in font["fvar"].axes: if axis.axisTag == axisTag: axisLimits = axis break if axisLimits is None: return "" axisLimits = (axisLimits.minValue, axisLimits.defaultValue, axisLimits.maxValue) log.info("Planning %s axis.", axisName) if "avar" in font: existingMapping = font["avar"].segments[axisTag] font["avar"].segments[axisTag] = {} else: existingMapping = None if values is not None and isinstance(values, str): values = [float(w) for w in values.split()] if designLimits is not None and isinstance(designLimits, str): designLimits = [float(d) for d in options.designLimits.split(":")] assert ( len(designLimits) == 3 and designLimits[0] <= designLimits[1] <= designLimits[2] ) else: designLimits = None if pins is not None and isinstance(pins, str): newPins = {} for pin in pins.split(): before, after = pin.split(":") newPins[float(before)] = float(after) pins = newPins del newPins mapping, mappingNormalized = planFunc( font.getGlyphSet, axisLimits, values, samples=samples, glyphs=glyphs, designLimits=designLimits, pins=pins, sanitize=sanitize, ) if plot: from matplotlib import pyplot pyplot.plot( sorted(mappingNormalized), [mappingNormalized[k] for k in sorted(mappingNormalized)], ) pyplot.show() if existingMapping is not None: log.info("Existing %s mapping:\n%s", axisName, pformat(existingMapping)) if mapping: if "avar" not in font: addEmptyAvar(font) font["avar"].segments[axisTag] = mappingNormalized else: if "avar" in font: font["avar"].segments[axisTag] = {} designspaceSnippet = makeDesignspaceSnippet( axisTag, axisName, axisLimits, mapping, ) return designspaceSnippet def main(args=None): """Plan the standard axis mappings for a variable font""" if args is None: import sys args = sys.argv[1:] from fontTools import configLogger from fontTools.ttLib import TTFont import argparse parser = argparse.ArgumentParser( "fonttools varLib.avarPlanner", description="Plan `avar` table for variable font", ) parser.add_argument("font", metavar="varfont.ttf", help="Variable-font file.") parser.add_argument( "-o", "--output-file", type=str, help="Output font file name.", ) parser.add_argument( "--weights", type=str, help="Space-separate list of weights to generate." ) parser.add_argument( "--widths", type=str, help="Space-separate list of widths to generate." ) parser.add_argument( "--slants", type=str, help="Space-separate list of slants to generate." ) parser.add_argument( "--sizes", type=str, help="Space-separate list of optical-sizes to generate." ) parser.add_argument("--samples", type=int, help="Number of samples.") parser.add_argument( "-s", "--sanitize", action="store_true", help="Sanitize axis limits" ) parser.add_argument( "-g", "--glyphs", type=str, help="Space-separate list of glyphs to use for sampling.", ) parser.add_argument( "--weight-design-limits", type=str, help="min:default:max in design units for the `wght` axis.", ) parser.add_argument( "--width-design-limits", type=str, help="min:default:max in design units for the `wdth` axis.", ) parser.add_argument( "--slant-design-limits", type=str, help="min:default:max in design units for the `slnt` axis.", ) parser.add_argument( "--optical-size-design-limits", type=str, help="min:default:max in design units for the `opsz` axis.", ) parser.add_argument( "--weight-pins", type=str, help="Space-separate list of before:after pins for the `wght` axis.", ) parser.add_argument( "--width-pins", type=str, help="Space-separate list of before:after pins for the `wdth` axis.", ) parser.add_argument( "--slant-pins", type=str, help="Space-separate list of before:after pins for the `slnt` axis.", ) parser.add_argument( "--optical-size-pins", type=str, help="Space-separate list of before:after pins for the `opsz` axis.", ) parser.add_argument( "-p", "--plot", action="store_true", help="Plot the resulting mapping." ) logging_group = parser.add_mutually_exclusive_group(required=False) logging_group.add_argument( "-v", "--verbose", action="store_true", help="Run more verbosely." ) logging_group.add_argument( "-q", "--quiet", action="store_true", help="Turn verbosity off." ) options = parser.parse_args(args) configLogger( level=("DEBUG" if options.verbose else "WARNING" if options.quiet else "INFO") ) font = TTFont(options.font) if not "fvar" in font: log.error("Not a variable font.") return 1 if options.glyphs is not None: glyphs = options.glyphs.split() if ":" in options.glyphs: glyphs = {} for g in options.glyphs.split(): if ":" in g: glyph, frequency = g.split(":") glyphs[glyph] = float(frequency) else: glyphs[g] = 1.0 else: glyphs = None designspaceSnippets = [] designspaceSnippets.append( processAxis( font, planWeightAxis, "wght", "Weight", values=options.weights, samples=options.samples, glyphs=glyphs, designLimits=options.weight_design_limits, pins=options.weight_pins, sanitize=options.sanitize, plot=options.plot, ) ) designspaceSnippets.append( processAxis( font, planWidthAxis, "wdth", "Width", values=options.widths, samples=options.samples, glyphs=glyphs, designLimits=options.width_design_limits, pins=options.width_pins, sanitize=options.sanitize, plot=options.plot, ) ) designspaceSnippets.append( processAxis( font, planSlantAxis, "slnt", "Slant", values=options.slants, samples=options.samples, glyphs=glyphs, designLimits=options.slant_design_limits, pins=options.slant_pins, sanitize=options.sanitize, plot=options.plot, ) ) designspaceSnippets.append( processAxis( font, planOpticalSizeAxis, "opsz", "OpticalSize", values=options.sizes, samples=options.samples, glyphs=glyphs, designLimits=options.optical_size_design_limits, pins=options.optical_size_pins, sanitize=options.sanitize, plot=options.plot, ) ) log.info("Designspace snippet:") for snippet in designspaceSnippets: if snippet: print(snippet) if options.output_file is None: outfile = makeOutputFileName(options.font, overWrite=True, suffix=".avar") else: outfile = options.output_file if outfile: log.info("Saving %s", outfile) font.save(outfile) if __name__ == "__main__": import sys sys.exit(main()) PKaZZZ�z���fontTools/varLib/builder.pyfrom fontTools import ttLib from fontTools.ttLib.tables import otTables as ot # VariationStore def buildVarRegionAxis(axisSupport): self = ot.VarRegionAxis() self.StartCoord, self.PeakCoord, self.EndCoord = [float(v) for v in axisSupport] return self def buildVarRegion(support, axisTags): assert all(tag in axisTags for tag in support.keys()), ( "Unknown axis tag found.", support, axisTags, ) self = ot.VarRegion() self.VarRegionAxis = [] for tag in axisTags: self.VarRegionAxis.append(buildVarRegionAxis(support.get(tag, (0, 0, 0)))) return self def buildVarRegionList(supports, axisTags): self = ot.VarRegionList() self.RegionAxisCount = len(axisTags) self.Region = [] for support in supports: self.Region.append(buildVarRegion(support, axisTags)) self.RegionCount = len(self.Region) return self def _reorderItem(lst, mapping): return [lst[i] for i in mapping] def VarData_calculateNumShorts(self, optimize=False): count = self.VarRegionCount items = self.Item bit_lengths = [0] * count for item in items: # The "+ (i < -1)" magic is to handle two's-compliment. # That is, we want to get back 7 for -128, whereas # bit_length() returns 8. Similarly for -65536. # The reason "i < -1" is used instead of "i < 0" is that # the latter would make it return 0 for "-1" instead of 1. bl = [(i + (i < -1)).bit_length() for i in item] bit_lengths = [max(*pair) for pair in zip(bl, bit_lengths)] # The addition of 8, instead of seven, is to account for the sign bit. # This "((b + 8) >> 3) if b else 0" when combined with the above # "(i + (i < -1)).bit_length()" is a faster way to compute byte-lengths # conforming to: # # byte_length = (0 if i == 0 else # 1 if -128 <= i < 128 else # 2 if -65536 <= i < 65536 else # ...) byte_lengths = [((b + 8) >> 3) if b else 0 for b in bit_lengths] # https://github.com/fonttools/fonttools/issues/2279 longWords = any(b > 2 for b in byte_lengths) if optimize: # Reorder columns such that wider columns come before narrower columns mapping = [] mapping.extend(i for i, b in enumerate(byte_lengths) if b > 2) mapping.extend(i for i, b in enumerate(byte_lengths) if b == 2) mapping.extend(i for i, b in enumerate(byte_lengths) if b == 1) byte_lengths = _reorderItem(byte_lengths, mapping) self.VarRegionIndex = _reorderItem(self.VarRegionIndex, mapping) self.VarRegionCount = len(self.VarRegionIndex) for i in range(len(items)): items[i] = _reorderItem(items[i], mapping) if longWords: self.NumShorts = ( max((i for i, b in enumerate(byte_lengths) if b > 2), default=-1) + 1 ) self.NumShorts |= 0x8000 else: self.NumShorts = ( max((i for i, b in enumerate(byte_lengths) if b > 1), default=-1) + 1 ) self.VarRegionCount = len(self.VarRegionIndex) return self ot.VarData.calculateNumShorts = VarData_calculateNumShorts def VarData_CalculateNumShorts(self, optimize=True): """Deprecated name for VarData_calculateNumShorts() which defaults to optimize=True. Use varData.calculateNumShorts() or varData.optimize().""" return VarData_calculateNumShorts(self, optimize=optimize) def VarData_optimize(self): return VarData_calculateNumShorts(self, optimize=True) ot.VarData.optimize = VarData_optimize def buildVarData(varRegionIndices, items, optimize=True): self = ot.VarData() self.VarRegionIndex = list(varRegionIndices) regionCount = self.VarRegionCount = len(self.VarRegionIndex) records = self.Item = [] if items: for item in items: assert len(item) == regionCount records.append(list(item)) self.ItemCount = len(self.Item) self.calculateNumShorts(optimize=optimize) return self def buildVarStore(varRegionList, varDataList): self = ot.VarStore() self.Format = 1 self.VarRegionList = varRegionList self.VarData = list(varDataList) self.VarDataCount = len(self.VarData) return self # Variation helpers def buildVarIdxMap(varIdxes, glyphOrder): self = ot.VarIdxMap() self.mapping = {g: v for g, v in zip(glyphOrder, varIdxes)} return self def buildDeltaSetIndexMap(varIdxes): mapping = list(varIdxes) if all(i == v for i, v in enumerate(mapping)): return None self = ot.DeltaSetIndexMap() self.mapping = mapping self.Format = 1 if len(mapping) > 0xFFFF else 0 return self def buildVarDevTable(varIdx): self = ot.Device() self.DeltaFormat = 0x8000 self.StartSize = varIdx >> 16 self.EndSize = varIdx & 0xFFFF return self PKaZZZ��� f ffontTools/varLib/cff.pyfrom collections import namedtuple from fontTools.cffLib import ( maxStackLimit, TopDictIndex, buildOrder, topDictOperators, topDictOperators2, privateDictOperators, privateDictOperators2, FDArrayIndex, FontDict, VarStoreData, ) from io import BytesIO from fontTools.cffLib.specializer import specializeCommands, commandsToProgram from fontTools.ttLib import newTable from fontTools import varLib from fontTools.varLib.models import allEqual from fontTools.misc.roundTools import roundFunc from fontTools.misc.psCharStrings import T2CharString, T2OutlineExtractor from fontTools.pens.t2CharStringPen import T2CharStringPen from functools import partial from .errors import ( VarLibCFFDictMergeError, VarLibCFFPointTypeMergeError, VarLibCFFHintTypeMergeError, VarLibMergeError, ) # Backwards compatibility MergeDictError = VarLibCFFDictMergeError MergeTypeError = VarLibCFFPointTypeMergeError def addCFFVarStore(varFont, varModel, varDataList, masterSupports): fvarTable = varFont["fvar"] axisKeys = [axis.axisTag for axis in fvarTable.axes] varTupleList = varLib.builder.buildVarRegionList(masterSupports, axisKeys) varStoreCFFV = varLib.builder.buildVarStore(varTupleList, varDataList) topDict = varFont["CFF2"].cff.topDictIndex[0] topDict.VarStore = VarStoreData(otVarStore=varStoreCFFV) if topDict.FDArray[0].vstore is None: fdArray = topDict.FDArray for fontDict in fdArray: if hasattr(fontDict, "Private"): fontDict.Private.vstore = topDict.VarStore def lib_convertCFFToCFF2(cff, otFont): # This assumes a decompiled CFF table. cff2GetGlyphOrder = cff.otFont.getGlyphOrder topDictData = TopDictIndex(None, cff2GetGlyphOrder, None) topDictData.items = cff.topDictIndex.items cff.topDictIndex = topDictData topDict = topDictData[0] if hasattr(topDict, "Private"): privateDict = topDict.Private else: privateDict = None opOrder = buildOrder(topDictOperators2) topDict.order = opOrder topDict.cff2GetGlyphOrder = cff2GetGlyphOrder if not hasattr(topDict, "FDArray"): fdArray = topDict.FDArray = FDArrayIndex() fdArray.strings = None fdArray.GlobalSubrs = topDict.GlobalSubrs topDict.GlobalSubrs.fdArray = fdArray charStrings = topDict.CharStrings if charStrings.charStringsAreIndexed: charStrings.charStringsIndex.fdArray = fdArray else: charStrings.fdArray = fdArray fontDict = FontDict() fontDict.setCFF2(True) fdArray.append(fontDict) fontDict.Private = privateDict privateOpOrder = buildOrder(privateDictOperators2) if privateDict is not None: for entry in privateDictOperators: key = entry[1] if key not in privateOpOrder: if key in privateDict.rawDict: # print "Removing private dict", key del privateDict.rawDict[key] if hasattr(privateDict, key): delattr(privateDict, key) # print "Removing privateDict attr", key else: # clean up the PrivateDicts in the fdArray fdArray = topDict.FDArray privateOpOrder = buildOrder(privateDictOperators2) for fontDict in fdArray: fontDict.setCFF2(True) for key in list(fontDict.rawDict.keys()): if key not in fontDict.order: del fontDict.rawDict[key] if hasattr(fontDict, key): delattr(fontDict, key) privateDict = fontDict.Private for entry in privateDictOperators: key = entry[1] if key not in privateOpOrder: if key in privateDict.rawDict: # print "Removing private dict", key del privateDict.rawDict[key] if hasattr(privateDict, key): delattr(privateDict, key) # print "Removing privateDict attr", key # Now delete up the deprecated topDict operators from CFF 1.0 for entry in topDictOperators: key = entry[1] if key not in opOrder: if key in topDict.rawDict: del topDict.rawDict[key] if hasattr(topDict, key): delattr(topDict, key) # At this point, the Subrs and Charstrings are all still T2Charstring class # easiest to fix this by compiling, then decompiling again cff.major = 2 file = BytesIO() cff.compile(file, otFont, isCFF2=True) file.seek(0) cff.decompile(file, otFont, isCFF2=True) def convertCFFtoCFF2(varFont): # Convert base font to a single master CFF2 font. cffTable = varFont["CFF "] lib_convertCFFToCFF2(cffTable.cff, varFont) newCFF2 = newTable("CFF2") newCFF2.cff = cffTable.cff varFont["CFF2"] = newCFF2 del varFont["CFF "] def conv_to_int(num): if isinstance(num, float) and num.is_integer(): return int(num) return num pd_blend_fields = ( "BlueValues", "OtherBlues", "FamilyBlues", "FamilyOtherBlues", "BlueScale", "BlueShift", "BlueFuzz", "StdHW", "StdVW", "StemSnapH", "StemSnapV", ) def get_private(regionFDArrays, fd_index, ri, fd_map): region_fdArray = regionFDArrays[ri] region_fd_map = fd_map[fd_index] if ri in region_fd_map: region_fdIndex = region_fd_map[ri] private = region_fdArray[region_fdIndex].Private else: private = None return private def merge_PrivateDicts(top_dicts, vsindex_dict, var_model, fd_map): """ I step through the FontDicts in the FDArray of the varfont TopDict. For each varfont FontDict: * step through each key in FontDict.Private. * For each key, step through each relevant source font Private dict, and build a list of values to blend. The 'relevant' source fonts are selected by first getting the right submodel using ``vsindex_dict[vsindex]``. The indices of the ``subModel.locations`` are mapped to source font list indices by assuming the latter order is the same as the order of the ``var_model.locations``. I can then get the index of each subModel location in the list of ``var_model.locations``. """ topDict = top_dicts[0] region_top_dicts = top_dicts[1:] if hasattr(region_top_dicts[0], "FDArray"): regionFDArrays = [fdTopDict.FDArray for fdTopDict in region_top_dicts] else: regionFDArrays = [[fdTopDict] for fdTopDict in region_top_dicts] for fd_index, font_dict in enumerate(topDict.FDArray): private_dict = font_dict.Private vsindex = getattr(private_dict, "vsindex", 0) # At the moment, no PrivateDict has a vsindex key, but let's support # how it should work. See comment at end of # merge_charstrings() - still need to optimize use of vsindex. sub_model, _ = vsindex_dict[vsindex] master_indices = [] for loc in sub_model.locations[1:]: i = var_model.locations.index(loc) - 1 master_indices.append(i) pds = [private_dict] last_pd = private_dict for ri in master_indices: pd = get_private(regionFDArrays, fd_index, ri, fd_map) # If the region font doesn't have this FontDict, just reference # the last one used. if pd is None: pd = last_pd else: last_pd = pd pds.append(pd) num_masters = len(pds) for key, value in private_dict.rawDict.items(): dataList = [] if key not in pd_blend_fields: continue if isinstance(value, list): try: values = [pd.rawDict[key] for pd in pds] except KeyError: print( "Warning: {key} in default font Private dict is " "missing from another font, and was " "discarded.".format(key=key) ) continue try: values = zip(*values) except IndexError: raise VarLibCFFDictMergeError(key, value, values) """ Row 0 contains the first value from each master. Convert each row from absolute values to relative values from the previous row. e.g for three masters, a list of values was: master 0 OtherBlues = [-217,-205] master 1 OtherBlues = [-234,-222] master 1 OtherBlues = [-188,-176] The call to zip() converts this to: [(-217, -234, -188), (-205, -222, -176)] and is converted finally to: OtherBlues = [[-217, 17.0, 46.0], [-205, 0.0, 0.0]] """ prev_val_list = [0] * num_masters any_points_differ = False for val_list in values: rel_list = [ (val - prev_val_list[i]) for (i, val) in enumerate(val_list) ] if (not any_points_differ) and not allEqual(rel_list): any_points_differ = True prev_val_list = val_list deltas = sub_model.getDeltas(rel_list) # For PrivateDict BlueValues, the default font # values are absolute, not relative to the prior value. deltas[0] = val_list[0] dataList.append(deltas) # If there are no blend values,then # we can collapse the blend lists. if not any_points_differ: dataList = [data[0] for data in dataList] else: values = [pd.rawDict[key] for pd in pds] if not allEqual(values): dataList = sub_model.getDeltas(values) else: dataList = values[0] # Convert numbers with no decimal part to an int if isinstance(dataList, list): for i, item in enumerate(dataList): if isinstance(item, list): for j, jtem in enumerate(item): dataList[i][j] = conv_to_int(jtem) else: dataList[i] = conv_to_int(item) else: dataList = conv_to_int(dataList) private_dict.rawDict[key] = dataList def _cff_or_cff2(font): if "CFF " in font: return font["CFF "] return font["CFF2"] def getfd_map(varFont, fonts_list): """Since a subset source font may have fewer FontDicts in their FDArray than the default font, we have to match up the FontDicts in the different fonts . We do this with the FDSelect array, and by assuming that the same glyph will reference matching FontDicts in each source font. We return a mapping from fdIndex in the default font to a dictionary which maps each master list index of each region font to the equivalent fdIndex in the region font.""" fd_map = {} default_font = fonts_list[0] region_fonts = fonts_list[1:] num_regions = len(region_fonts) topDict = _cff_or_cff2(default_font).cff.topDictIndex[0] if not hasattr(topDict, "FDSelect"): # All glyphs reference only one FontDict. # Map the FD index for regions to index 0. fd_map[0] = {ri: 0 for ri in range(num_regions)} return fd_map gname_mapping = {} default_fdSelect = topDict.FDSelect glyphOrder = default_font.getGlyphOrder() for gid, fdIndex in enumerate(default_fdSelect): gname_mapping[glyphOrder[gid]] = fdIndex if fdIndex not in fd_map: fd_map[fdIndex] = {} for ri, region_font in enumerate(region_fonts): region_glyphOrder = region_font.getGlyphOrder() region_topDict = _cff_or_cff2(region_font).cff.topDictIndex[0] if not hasattr(region_topDict, "FDSelect"): # All the glyphs share the same FontDict. Pick any glyph. default_fdIndex = gname_mapping[region_glyphOrder[0]] fd_map[default_fdIndex][ri] = 0 else: region_fdSelect = region_topDict.FDSelect for gid, fdIndex in enumerate(region_fdSelect): default_fdIndex = gname_mapping[region_glyphOrder[gid]] region_map = fd_map[default_fdIndex] if ri not in region_map: region_map[ri] = fdIndex return fd_map CVarData = namedtuple("CVarData", "varDataList masterSupports vsindex_dict") def merge_region_fonts(varFont, model, ordered_fonts_list, glyphOrder): topDict = varFont["CFF2"].cff.topDictIndex[0] top_dicts = [topDict] + [ _cff_or_cff2(ttFont).cff.topDictIndex[0] for ttFont in ordered_fonts_list[1:] ] num_masters = len(model.mapping) cvData = merge_charstrings(glyphOrder, num_masters, top_dicts, model) fd_map = getfd_map(varFont, ordered_fonts_list) merge_PrivateDicts(top_dicts, cvData.vsindex_dict, model, fd_map) addCFFVarStore(varFont, model, cvData.varDataList, cvData.masterSupports) def _get_cs(charstrings, glyphName, filterEmpty=False): if glyphName not in charstrings: return None cs = charstrings[glyphName] if filterEmpty: cs.decompile() if cs.program == []: # CFF2 empty charstring return None elif ( len(cs.program) <= 2 and cs.program[-1] == "endchar" and (len(cs.program) == 1 or type(cs.program[0]) in (int, float)) ): # CFF1 empty charstring return None return cs def _add_new_vsindex( model, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList ): varTupleIndexes = [] for support in model.supports[1:]: if support not in masterSupports: masterSupports.append(support) varTupleIndexes.append(masterSupports.index(support)) var_data = varLib.builder.buildVarData(varTupleIndexes, None, False) vsindex = len(vsindex_dict) vsindex_by_key[key] = vsindex vsindex_dict[vsindex] = (model, [key]) varDataList.append(var_data) return vsindex def merge_charstrings(glyphOrder, num_masters, top_dicts, masterModel): vsindex_dict = {} vsindex_by_key = {} varDataList = [] masterSupports = [] default_charstrings = top_dicts[0].CharStrings for gid, gname in enumerate(glyphOrder): # interpret empty non-default masters as missing glyphs from a sparse master all_cs = [ _get_cs(td.CharStrings, gname, i != 0) for i, td in enumerate(top_dicts) ] model, model_cs = masterModel.getSubModel(all_cs) # create the first pass CFF2 charstring, from # the default charstring. default_charstring = model_cs[0] var_pen = CFF2CharStringMergePen([], gname, num_masters, 0) # We need to override outlineExtractor because these # charstrings do have widths in the 'program'; we need to drop these # values rather than post assertion error for them. default_charstring.outlineExtractor = MergeOutlineExtractor default_charstring.draw(var_pen) # Add the coordinates from all the other regions to the # blend lists in the CFF2 charstring. region_cs = model_cs[1:] for region_idx, region_charstring in enumerate(region_cs, start=1): var_pen.restart(region_idx) region_charstring.outlineExtractor = MergeOutlineExtractor region_charstring.draw(var_pen) # Collapse each coordinate list to a blend operator and its args. new_cs = var_pen.getCharString( private=default_charstring.private, globalSubrs=default_charstring.globalSubrs, var_model=model, optimize=True, ) default_charstrings[gname] = new_cs if not region_cs: continue if (not var_pen.seen_moveto) or ("blend" not in new_cs.program): # If this is not a marking glyph, or if there are no blend # arguments, then we can use vsindex 0. No need to # check if we need a new vsindex. continue # If the charstring required a new model, create # a VarData table to go with, and set vsindex. key = tuple(v is not None for v in all_cs) try: vsindex = vsindex_by_key[key] except KeyError: vsindex = _add_new_vsindex( model, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList ) # We do not need to check for an existing new_cs.private.vsindex, # as we know it doesn't exist yet. if vsindex != 0: new_cs.program[:0] = [vsindex, "vsindex"] # If there is no variation in any of the charstrings, then vsindex_dict # never gets built. This could still be needed if there is variation # in the PrivatDict, so we will build the default data for vsindex = 0. if not vsindex_dict: key = (True,) * num_masters _add_new_vsindex( masterModel, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList ) cvData = CVarData( varDataList=varDataList, masterSupports=masterSupports, vsindex_dict=vsindex_dict, ) # XXX To do: optimize use of vsindex between the PrivateDicts and # charstrings return cvData class CFFToCFF2OutlineExtractor(T2OutlineExtractor): """This class is used to remove the initial width from the CFF charstring without trying to add the width to self.nominalWidthX, which is None.""" def popallWidth(self, evenOdd=0): args = self.popall() if not self.gotWidth: if evenOdd ^ (len(args) % 2): args = args[1:] self.width = self.defaultWidthX self.gotWidth = 1 return args class MergeOutlineExtractor(CFFToCFF2OutlineExtractor): """Used to extract the charstring commands - including hints - from a CFF charstring in order to merge it as another set of region data into a CFF2 variable font charstring.""" def __init__( self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None, blender=None, ): super().__init__( pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private, blender ) def countHints(self): args = self.popallWidth() self.hintCount = self.hintCount + len(args) // 2 return args def _hint_op(self, type, args): self.pen.add_hint(type, args) def op_hstem(self, index): args = self.countHints() self._hint_op("hstem", args) def op_vstem(self, index): args = self.countHints() self._hint_op("vstem", args) def op_hstemhm(self, index): args = self.countHints() self._hint_op("hstemhm", args) def op_vstemhm(self, index): args = self.countHints() self._hint_op("vstemhm", args) def _get_hintmask(self, index): if not self.hintMaskBytes: args = self.countHints() if args: self._hint_op("vstemhm", args) self.hintMaskBytes = (self.hintCount + 7) // 8 hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes) return index, hintMaskBytes def op_hintmask(self, index): index, hintMaskBytes = self._get_hintmask(index) self.pen.add_hintmask("hintmask", [hintMaskBytes]) return hintMaskBytes, index def op_cntrmask(self, index): index, hintMaskBytes = self._get_hintmask(index) self.pen.add_hintmask("cntrmask", [hintMaskBytes]) return hintMaskBytes, index class CFF2CharStringMergePen(T2CharStringPen): """Pen to merge Type 2 CharStrings.""" def __init__( self, default_commands, glyphName, num_masters, master_idx, roundTolerance=0.01 ): # For roundTolerance see https://github.com/fonttools/fonttools/issues/2838 super().__init__( width=None, glyphSet=None, CFF2=True, roundTolerance=roundTolerance ) self.pt_index = 0 self._commands = default_commands self.m_index = master_idx self.num_masters = num_masters self.prev_move_idx = 0 self.seen_moveto = False self.glyphName = glyphName self.round = roundFunc(roundTolerance, round=round) def add_point(self, point_type, pt_coords): if self.m_index == 0: self._commands.append([point_type, [pt_coords]]) else: cmd = self._commands[self.pt_index] if cmd[0] != point_type: raise VarLibCFFPointTypeMergeError( point_type, self.pt_index, len(cmd[1]), cmd[0], self.glyphName ) cmd[1].append(pt_coords) self.pt_index += 1 def add_hint(self, hint_type, args): if self.m_index == 0: self._commands.append([hint_type, [args]]) else: cmd = self._commands[self.pt_index] if cmd[0] != hint_type: raise VarLibCFFHintTypeMergeError( hint_type, self.pt_index, len(cmd[1]), cmd[0], self.glyphName ) cmd[1].append(args) self.pt_index += 1 def add_hintmask(self, hint_type, abs_args): # For hintmask, fonttools.cffLib.specializer.py expects # each of these to be represented by two sequential commands: # first holding only the operator name, with an empty arg list, # second with an empty string as the op name, and the mask arg list. if self.m_index == 0: self._commands.append([hint_type, []]) self._commands.append(["", [abs_args]]) else: cmd = self._commands[self.pt_index] if cmd[0] != hint_type: raise VarLibCFFHintTypeMergeError( hint_type, self.pt_index, len(cmd[1]), cmd[0], self.glyphName ) self.pt_index += 1 cmd = self._commands[self.pt_index] cmd[1].append(abs_args) self.pt_index += 1 def _moveTo(self, pt): if not self.seen_moveto: self.seen_moveto = True pt_coords = self._p(pt) self.add_point("rmoveto", pt_coords) # I set prev_move_idx here because add_point() # can change self.pt_index. self.prev_move_idx = self.pt_index - 1 def _lineTo(self, pt): pt_coords = self._p(pt) self.add_point("rlineto", pt_coords) def _curveToOne(self, pt1, pt2, pt3): _p = self._p pt_coords = _p(pt1) + _p(pt2) + _p(pt3) self.add_point("rrcurveto", pt_coords) def _closePath(self): pass def _endPath(self): pass def restart(self, region_idx): self.pt_index = 0 self.m_index = region_idx self._p0 = (0, 0) def getCommands(self): return self._commands def reorder_blend_args(self, commands, get_delta_func): """ We first re-order the master coordinate values. For a moveto to lineto, the args are now arranged as:: [ [master_0 x,y], [master_1 x,y], [master_2 x,y] ] We re-arrange this to:: [ [master_0 x, master_1 x, master_2 x], [master_0 y, master_1 y, master_2 y] ] If the master values are all the same, we collapse the list to as single value instead of a list. We then convert this to:: [ [master_0 x] + [x delta tuple] + [numBlends=1] [master_0 y] + [y delta tuple] + [numBlends=1] ] """ for cmd in commands: # arg[i] is the set of arguments for this operator from master i. args = cmd[1] m_args = zip(*args) # m_args[n] is now all num_master args for the i'th argument # for this operation. cmd[1] = list(m_args) lastOp = None for cmd in commands: op = cmd[0] # masks are represented by two cmd's: first has only op names, # second has only args. if lastOp in ["hintmask", "cntrmask"]: coord = list(cmd[1]) if not allEqual(coord): raise VarLibMergeError( "Hintmask values cannot differ between source fonts." ) cmd[1] = [coord[0][0]] else: coords = cmd[1] new_coords = [] for coord in coords: if allEqual(coord): new_coords.append(coord[0]) else: # convert to deltas deltas = get_delta_func(coord)[1:] coord = [coord[0]] + deltas coord.append(1) new_coords.append(coord) cmd[1] = new_coords lastOp = op return commands def getCharString( self, private=None, globalSubrs=None, var_model=None, optimize=True ): commands = self._commands commands = self.reorder_blend_args( commands, partial(var_model.getDeltas, round=self.round) ) if optimize: commands = specializeCommands( commands, generalizeFirst=False, maxstack=maxStackLimit ) program = commandsToProgram(commands) charString = T2CharString( program=program, private=private, globalSubrs=globalSubrs ) return charString PKaZZZ6Q"�fontTools/varLib/errors.pyimport textwrap class VarLibError(Exception): """Base exception for the varLib module.""" class VarLibValidationError(VarLibError): """Raised when input data is invalid from varLib's point of view.""" class VarLibMergeError(VarLibError): """Raised when input data cannot be merged into a variable font.""" def __init__(self, merger=None, **kwargs): self.merger = merger if not kwargs: kwargs = {} if "stack" in kwargs: self.stack = kwargs["stack"] del kwargs["stack"] else: self.stack = [] self.cause = kwargs @property def reason(self): return self.__doc__ def _master_name(self, ix): if self.merger is not None: ttf = self.merger.ttfs[ix] if "name" in ttf and ttf["name"].getBestFullName(): return ttf["name"].getBestFullName() elif hasattr(ttf.reader, "file") and hasattr(ttf.reader.file, "name"): return ttf.reader.file.name return f"master number {ix}" @property def offender(self): if "expected" in self.cause and "got" in self.cause: index = [x == self.cause["expected"] for x in self.cause["got"]].index( False ) master_name = self._master_name(index) if "location" in self.cause: master_name = f"{master_name} ({self.cause['location']})" return index, master_name return None, None @property def details(self): if "expected" in self.cause and "got" in self.cause: offender_index, offender = self.offender got = self.cause["got"][offender_index] return f"Expected to see {self.stack[0]}=={self.cause['expected']!r}, instead saw {got!r}\n" return "" def __str__(self): offender_index, offender = self.offender location = "" if offender: location = f"\n\nThe problem is likely to be in {offender}:\n" context = "".join(reversed(self.stack)) basic = textwrap.fill( f"Couldn't merge the fonts, because {self.reason}. " f"This happened while performing the following operation: {context}", width=78, ) return "\n\n" + basic + location + self.details class ShouldBeConstant(VarLibMergeError): """some values were different, but should have been the same""" @property def details(self): basic_message = super().details if self.stack[0] != ".FeatureCount" or self.merger is None: return basic_message assert self.stack[0] == ".FeatureCount" offender_index, _ = self.offender bad_ttf = self.merger.ttfs[offender_index] good_ttf = next( ttf for ttf in self.merger.ttfs if self.stack[-1] in ttf and ttf[self.stack[-1]].table.FeatureList.FeatureCount == self.cause["expected"] ) good_features = [ x.FeatureTag for x in good_ttf[self.stack[-1]].table.FeatureList.FeatureRecord ] bad_features = [ x.FeatureTag for x in bad_ttf[self.stack[-1]].table.FeatureList.FeatureRecord ] return basic_message + ( "\nIncompatible features between masters.\n" f"Expected: {', '.join(good_features)}.\n" f"Got: {', '.join(bad_features)}.\n" ) class FoundANone(VarLibMergeError): """one of the values in a list was empty when it shouldn't have been""" @property def offender(self): index = [x is None for x in self.cause["got"]].index(True) return index, self._master_name(index) @property def details(self): cause, stack = self.cause, self.stack return f"{stack[0]}=={cause['got']}\n" class NotANone(VarLibMergeError): """one of the values in a list was not empty when it should have been""" @property def offender(self): index = [x is not None for x in self.cause["got"]].index(True) return index, self._master_name(index) @property def details(self): cause, stack = self.cause, self.stack return f"{stack[0]}=={cause['got']}\n" class MismatchedTypes(VarLibMergeError): """data had inconsistent types""" class LengthsDiffer(VarLibMergeError): """a list of objects had inconsistent lengths""" class KeysDiffer(VarLibMergeError): """a list of objects had different keys""" class InconsistentGlyphOrder(VarLibMergeError): """the glyph order was inconsistent between masters""" class InconsistentExtensions(VarLibMergeError): """the masters use extension lookups in inconsistent ways""" class UnsupportedFormat(VarLibMergeError): """an OpenType subtable (%s) had a format I didn't expect""" def __init__(self, merger=None, **kwargs): super().__init__(merger, **kwargs) if not self.stack: self.stack = [".Format"] @property def reason(self): s = self.__doc__ % self.cause["subtable"] if "value" in self.cause: s += f" ({self.cause['value']!r})" return s class InconsistentFormats(UnsupportedFormat): """an OpenType subtable (%s) had inconsistent formats between masters""" class VarLibCFFMergeError(VarLibError): pass class VarLibCFFDictMergeError(VarLibCFFMergeError): """Raised when a CFF PrivateDict cannot be merged.""" def __init__(self, key, value, values): error_msg = ( f"For the Private Dict key '{key}', the default font value list:" f"\n\t{value}\nhad a different number of values than a region font:" ) for region_value in values: error_msg += f"\n\t{region_value}" self.args = (error_msg,) class VarLibCFFPointTypeMergeError(VarLibCFFMergeError): """Raised when a CFF glyph cannot be merged because of point type differences.""" def __init__(self, point_type, pt_index, m_index, default_type, glyph_name): error_msg = ( f"Glyph '{glyph_name}': '{point_type}' at point index {pt_index} in " f"master index {m_index} differs from the default font point type " f"'{default_type}'" ) self.args = (error_msg,) class VarLibCFFHintTypeMergeError(VarLibCFFMergeError): """Raised when a CFF glyph cannot be merged because of hint type differences.""" def __init__(self, hint_type, cmd_index, m_index, default_type, glyph_name): error_msg = ( f"Glyph '{glyph_name}': '{hint_type}' at index {cmd_index} in " f"master index {m_index} differs from the default font hint type " f"'{default_type}'" ) self.args = (error_msg,) class VariationModelError(VarLibError): """Raised when a variation model is faulty.""" PKaZZZ݅s�hchcfontTools/varLib/featureVars.py"""Module to build FeatureVariation tables: https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#featurevariations-table NOTE: The API is experimental and subject to change. """ from fontTools.misc.dictTools import hashdict from fontTools.misc.intTools import bit_count from fontTools.ttLib import newTable from fontTools.ttLib.tables import otTables as ot from fontTools.ttLib.ttVisitor import TTVisitor from fontTools.otlLib.builder import buildLookup, buildSingleSubstSubtable from collections import OrderedDict from .errors import VarLibError, VarLibValidationError def addFeatureVariations(font, conditionalSubstitutions, featureTag="rvrn"): """Add conditional substitutions to a Variable Font. The `conditionalSubstitutions` argument is a list of (Region, Substitutions) tuples. A Region is a list of Boxes. A Box is a dict mapping axisTags to (minValue, maxValue) tuples. Irrelevant axes may be omitted and they are interpretted as extending to end of axis in each direction. A Box represents an orthogonal 'rectangular' subset of an N-dimensional design space. A Region represents a more complex subset of an N-dimensional design space, ie. the union of all the Boxes in the Region. For efficiency, Boxes within a Region should ideally not overlap, but functionality is not compromised if they do. The minimum and maximum values are expressed in normalized coordinates. A Substitution is a dict mapping source glyph names to substitute glyph names. Example: # >>> f = TTFont(srcPath) # >>> condSubst = [ # ... # A list of (Region, Substitution) tuples. # ... ([{"wdth": (0.5, 1.0)}], {"cent": "cent.rvrn"}), # ... ([{"wght": (0.5, 1.0)}], {"dollar": "dollar.rvrn"}), # ... ] # >>> addFeatureVariations(f, condSubst) # >>> f.save(dstPath) The `featureTag` parameter takes either a str or a iterable of str (the single str is kept for backwards compatibility), and defines which feature(s) will be associated with the feature variations. Note, if this is "rvrn", then the substitution lookup will be inserted at the beginning of the lookup list so that it is processed before others, otherwise for any other feature tags it will be appended last. """ # process first when "rvrn" is the only listed tag featureTags = [featureTag] if isinstance(featureTag, str) else sorted(featureTag) processLast = "rvrn" not in featureTags or len(featureTags) > 1 _checkSubstitutionGlyphsExist( glyphNames=set(font.getGlyphOrder()), substitutions=conditionalSubstitutions, ) substitutions = overlayFeatureVariations(conditionalSubstitutions) # turn substitution dicts into tuples of tuples, so they are hashable conditionalSubstitutions, allSubstitutions = makeSubstitutionsHashable( substitutions ) if "GSUB" not in font: font["GSUB"] = buildGSUB() else: existingTags = _existingVariableFeatures(font["GSUB"].table).intersection( featureTags ) if existingTags: raise VarLibError( f"FeatureVariations already exist for feature tag(s): {existingTags}" ) # setup lookups lookupMap = buildSubstitutionLookups( font["GSUB"].table, allSubstitutions, processLast ) # addFeatureVariationsRaw takes a list of # ( {condition}, [ lookup indices ] ) # so rearrange our lookups to match conditionsAndLookups = [] for conditionSet, substitutions in conditionalSubstitutions: conditionsAndLookups.append( (conditionSet, [lookupMap[s] for s in substitutions]) ) addFeatureVariationsRaw(font, font["GSUB"].table, conditionsAndLookups, featureTags) def _existingVariableFeatures(table): existingFeatureVarsTags = set() if hasattr(table, "FeatureVariations") and table.FeatureVariations is not None: features = table.FeatureList.FeatureRecord for fvr in table.FeatureVariations.FeatureVariationRecord: for ftsr in fvr.FeatureTableSubstitution.SubstitutionRecord: existingFeatureVarsTags.add(features[ftsr.FeatureIndex].FeatureTag) return existingFeatureVarsTags def _checkSubstitutionGlyphsExist(glyphNames, substitutions): referencedGlyphNames = set() for _, substitution in substitutions: referencedGlyphNames |= substitution.keys() referencedGlyphNames |= set(substitution.values()) missing = referencedGlyphNames - glyphNames if missing: raise VarLibValidationError( "Missing glyphs are referenced in conditional substitution rules:" f" {', '.join(missing)}" ) def overlayFeatureVariations(conditionalSubstitutions): """Compute overlaps between all conditional substitutions. The `conditionalSubstitutions` argument is a list of (Region, Substitutions) tuples. A Region is a list of Boxes. A Box is a dict mapping axisTags to (minValue, maxValue) tuples. Irrelevant axes may be omitted and they are interpretted as extending to end of axis in each direction. A Box represents an orthogonal 'rectangular' subset of an N-dimensional design space. A Region represents a more complex subset of an N-dimensional design space, ie. the union of all the Boxes in the Region. For efficiency, Boxes within a Region should ideally not overlap, but functionality is not compromised if they do. The minimum and maximum values are expressed in normalized coordinates. A Substitution is a dict mapping source glyph names to substitute glyph names. Returns data is in similar but different format. Overlaps of distinct substitution Boxes (*not* Regions) are explicitly listed as distinct rules, and rules with the same Box merged. The more specific rules appear earlier in the resulting list. Moreover, instead of just a dictionary of substitutions, a list of dictionaries is returned for substitutions corresponding to each unique space, with each dictionary being identical to one of the input substitution dictionaries. These dictionaries are not merged to allow data sharing when they are converted into font tables. Example:: >>> condSubst = [ ... # A list of (Region, Substitution) tuples. ... ([{"wght": (0.5, 1.0)}], {"dollar": "dollar.rvrn"}), ... ([{"wght": (0.5, 1.0)}], {"dollar": "dollar.rvrn"}), ... ([{"wdth": (0.5, 1.0)}], {"cent": "cent.rvrn"}), ... ([{"wght": (0.5, 1.0), "wdth": (-1, 1.0)}], {"dollar": "dollar.rvrn"}), ... ] >>> from pprint import pprint >>> pprint(overlayFeatureVariations(condSubst)) [({'wdth': (0.5, 1.0), 'wght': (0.5, 1.0)}, [{'dollar': 'dollar.rvrn'}, {'cent': 'cent.rvrn'}]), ({'wdth': (0.5, 1.0)}, [{'cent': 'cent.rvrn'}]), ({'wght': (0.5, 1.0)}, [{'dollar': 'dollar.rvrn'}])] """ # Merge same-substitutions rules, as this creates fewer number oflookups. merged = OrderedDict() for value, key in conditionalSubstitutions: key = hashdict(key) if key in merged: merged[key].extend(value) else: merged[key] = value conditionalSubstitutions = [(v, dict(k)) for k, v in merged.items()] del merged # Merge same-region rules, as this is cheaper. # Also convert boxes to hashdict() # # Reversing is such that earlier entries win in case of conflicting substitution # rules for the same region. merged = OrderedDict() for key, value in reversed(conditionalSubstitutions): key = tuple( sorted( (hashdict(cleanupBox(k)) for k in key), key=lambda d: tuple(sorted(d.items())), ) ) if key in merged: merged[key].update(value) else: merged[key] = dict(value) conditionalSubstitutions = list(reversed(merged.items())) del merged # Overlay # # Rank is the bit-set of the index of all contributing layers. initMapInit = ((hashdict(), 0),) # Initializer representing the entire space boxMap = OrderedDict(initMapInit) # Map from Box to Rank for i, (currRegion, _) in enumerate(conditionalSubstitutions): newMap = OrderedDict(initMapInit) currRank = 1 << i for box, rank in boxMap.items(): for currBox in currRegion: intersection, remainder = overlayBox(currBox, box) if intersection is not None: intersection = hashdict(intersection) newMap[intersection] = newMap.get(intersection, 0) | rank | currRank if remainder is not None: remainder = hashdict(remainder) newMap[remainder] = newMap.get(remainder, 0) | rank boxMap = newMap # Generate output items = [] for box, rank in sorted( boxMap.items(), key=(lambda BoxAndRank: -bit_count(BoxAndRank[1])) ): # Skip any box that doesn't have any substitution. if rank == 0: continue substsList = [] i = 0 while rank: if rank & 1: substsList.append(conditionalSubstitutions[i][1]) rank >>= 1 i += 1 items.append((dict(box), substsList)) return items # # Terminology: # # A 'Box' is a dict representing an orthogonal "rectangular" bit of N-dimensional space. # The keys in the dict are axis tags, the values are (minValue, maxValue) tuples. # Missing dimensions (keys) are substituted by the default min and max values # from the corresponding axes. # def overlayBox(top, bot): """Overlays ``top`` box on top of ``bot`` box. Returns two items: * Box for intersection of ``top`` and ``bot``, or None if they don't intersect. * Box for remainder of ``bot``. Remainder box might not be exact (since the remainder might not be a simple box), but is inclusive of the exact remainder. """ # Intersection intersection = {} intersection.update(top) intersection.update(bot) for axisTag in set(top) & set(bot): min1, max1 = top[axisTag] min2, max2 = bot[axisTag] minimum = max(min1, min2) maximum = min(max1, max2) if not minimum < maximum: return None, bot # Do not intersect intersection[axisTag] = minimum, maximum # Remainder # # Remainder is empty if bot's each axis range lies within that of intersection. # # Remainder is shrank if bot's each, except for exactly one, axis range lies # within that of intersection, and that one axis, it extrudes out of the # intersection only on one side. # # Bot is returned in full as remainder otherwise, as true remainder is not # representable as a single box. remainder = dict(bot) extruding = False fullyInside = True for axisTag in top: if axisTag in bot: continue extruding = True fullyInside = False break for axisTag in bot: if axisTag not in top: continue # Axis range lies fully within min1, max1 = intersection[axisTag] min2, max2 = bot[axisTag] if min1 <= min2 and max2 <= max1: continue # Axis range lies fully within # Bot's range doesn't fully lie within that of top's for this axis. # We know they intersect, so it cannot lie fully without either; so they # overlap. # If we have had an overlapping axis before, remainder is not # representable as a box, so return full bottom and go home. if extruding: return intersection, bot extruding = True fullyInside = False # Otherwise, cut remainder on this axis and continue. if min1 <= min2: # Right side survives. minimum = max(max1, min2) maximum = max2 elif max2 <= max1: # Left side survives. minimum = min2 maximum = min(min1, max2) else: # Remainder leaks out from both sides. Can't cut either. return intersection, bot remainder[axisTag] = minimum, maximum if fullyInside: # bot is fully within intersection. Remainder is empty. return intersection, None return intersection, remainder def cleanupBox(box): """Return a sparse copy of `box`, without redundant (default) values. >>> cleanupBox({}) {} >>> cleanupBox({'wdth': (0.0, 1.0)}) {'wdth': (0.0, 1.0)} >>> cleanupBox({'wdth': (-1.0, 1.0)}) {} """ return {tag: limit for tag, limit in box.items() if limit != (-1.0, 1.0)} # # Low level implementation # def addFeatureVariationsRaw(font, table, conditionalSubstitutions, featureTag="rvrn"): """Low level implementation of addFeatureVariations that directly models the possibilities of the FeatureVariations table.""" featureTags = [featureTag] if isinstance(featureTag, str) else sorted(featureTag) processLast = "rvrn" not in featureTags or len(featureTags) > 1 # # if a <featureTag> feature is not present: # make empty <featureTag> feature # sort features, get <featureTag> feature index # add <featureTag> feature to all scripts # if a <featureTag> feature is present: # reuse <featureTag> feature index # make lookups # add feature variations # if table.Version < 0x00010001: table.Version = 0x00010001 # allow table.FeatureVariations varFeatureIndices = set() existingTags = { feature.FeatureTag for feature in table.FeatureList.FeatureRecord if feature.FeatureTag in featureTags } newTags = set(featureTags) - existingTags if newTags: varFeatures = [] for featureTag in sorted(newTags): varFeature = buildFeatureRecord(featureTag, []) table.FeatureList.FeatureRecord.append(varFeature) varFeatures.append(varFeature) table.FeatureList.FeatureCount = len(table.FeatureList.FeatureRecord) sortFeatureList(table) for varFeature in varFeatures: varFeatureIndex = table.FeatureList.FeatureRecord.index(varFeature) for scriptRecord in table.ScriptList.ScriptRecord: if scriptRecord.Script.DefaultLangSys is None: raise VarLibError( "Feature variations require that the script " f"'{scriptRecord.ScriptTag}' defines a default language system." ) langSystems = [lsr.LangSys for lsr in scriptRecord.Script.LangSysRecord] for langSys in [scriptRecord.Script.DefaultLangSys] + langSystems: langSys.FeatureIndex.append(varFeatureIndex) langSys.FeatureCount = len(langSys.FeatureIndex) varFeatureIndices.add(varFeatureIndex) if existingTags: # indices may have changed if we inserted new features and sorted feature list # so we must do this after the above varFeatureIndices.update( index for index, feature in enumerate(table.FeatureList.FeatureRecord) if feature.FeatureTag in existingTags ) axisIndices = { axis.axisTag: axisIndex for axisIndex, axis in enumerate(font["fvar"].axes) } hasFeatureVariations = ( hasattr(table, "FeatureVariations") and table.FeatureVariations is not None ) featureVariationRecords = [] for conditionSet, lookupIndices in conditionalSubstitutions: conditionTable = [] for axisTag, (minValue, maxValue) in sorted(conditionSet.items()): if minValue > maxValue: raise VarLibValidationError( "A condition set has a minimum value above the maximum value." ) ct = buildConditionTable(axisIndices[axisTag], minValue, maxValue) conditionTable.append(ct) records = [] for varFeatureIndex in sorted(varFeatureIndices): existingLookupIndices = table.FeatureList.FeatureRecord[ varFeatureIndex ].Feature.LookupListIndex combinedLookupIndices = ( existingLookupIndices + lookupIndices if processLast else lookupIndices + existingLookupIndices ) records.append( buildFeatureTableSubstitutionRecord( varFeatureIndex, combinedLookupIndices ) ) if hasFeatureVariations and ( fvr := findFeatureVariationRecord(table.FeatureVariations, conditionTable) ): fvr.FeatureTableSubstitution.SubstitutionRecord.extend(records) fvr.FeatureTableSubstitution.SubstitutionCount = len( fvr.FeatureTableSubstitution.SubstitutionRecord ) else: featureVariationRecords.append( buildFeatureVariationRecord(conditionTable, records) ) if hasFeatureVariations: if table.FeatureVariations.Version != 0x00010000: raise VarLibError( "Unsupported FeatureVariations table version: " f"0x{table.FeatureVariations.Version:08x} (expected 0x00010000)." ) table.FeatureVariations.FeatureVariationRecord.extend(featureVariationRecords) table.FeatureVariations.FeatureVariationCount = len( table.FeatureVariations.FeatureVariationRecord ) else: table.FeatureVariations = buildFeatureVariations(featureVariationRecords) # # Building GSUB/FeatureVariations internals # def buildGSUB(): """Build a GSUB table from scratch.""" fontTable = newTable("GSUB") gsub = fontTable.table = ot.GSUB() gsub.Version = 0x00010001 # allow gsub.FeatureVariations gsub.ScriptList = ot.ScriptList() gsub.ScriptList.ScriptRecord = [] gsub.FeatureList = ot.FeatureList() gsub.FeatureList.FeatureRecord = [] gsub.LookupList = ot.LookupList() gsub.LookupList.Lookup = [] srec = ot.ScriptRecord() srec.ScriptTag = "DFLT" srec.Script = ot.Script() srec.Script.DefaultLangSys = None srec.Script.LangSysRecord = [] srec.Script.LangSysCount = 0 langrec = ot.LangSysRecord() langrec.LangSys = ot.LangSys() langrec.LangSys.ReqFeatureIndex = 0xFFFF langrec.LangSys.FeatureIndex = [] srec.Script.DefaultLangSys = langrec.LangSys gsub.ScriptList.ScriptRecord.append(srec) gsub.ScriptList.ScriptCount = 1 gsub.FeatureVariations = None return fontTable def makeSubstitutionsHashable(conditionalSubstitutions): """Turn all the substitution dictionaries in sorted tuples of tuples so they are hashable, to detect duplicates so we don't write out redundant data.""" allSubstitutions = set() condSubst = [] for conditionSet, substitutionMaps in conditionalSubstitutions: substitutions = [] for substitutionMap in substitutionMaps: subst = tuple(sorted(substitutionMap.items())) substitutions.append(subst) allSubstitutions.add(subst) condSubst.append((conditionSet, substitutions)) return condSubst, sorted(allSubstitutions) class ShifterVisitor(TTVisitor): def __init__(self, shift): self.shift = shift @ShifterVisitor.register_attr(ot.Feature, "LookupListIndex") # GSUB/GPOS def visit(visitor, obj, attr, value): shift = visitor.shift value = [l + shift for l in value] setattr(obj, attr, value) @ShifterVisitor.register_attr( (ot.SubstLookupRecord, ot.PosLookupRecord), "LookupListIndex" ) def visit(visitor, obj, attr, value): setattr(obj, attr, visitor.shift + value) def buildSubstitutionLookups(gsub, allSubstitutions, processLast=False): """Build the lookups for the glyph substitutions, return a dict mapping the substitution to lookup indices.""" # Insert lookups at the beginning of the lookup vector # https://github.com/googlefonts/fontmake/issues/950 firstIndex = len(gsub.LookupList.Lookup) if processLast else 0 lookupMap = {} for i, substitutionMap in enumerate(allSubstitutions): lookupMap[substitutionMap] = firstIndex + i if not processLast: # Shift all lookup indices in gsub by len(allSubstitutions) shift = len(allSubstitutions) visitor = ShifterVisitor(shift) visitor.visit(gsub.FeatureList.FeatureRecord) visitor.visit(gsub.LookupList.Lookup) for i, subst in enumerate(allSubstitutions): substMap = dict(subst) lookup = buildLookup([buildSingleSubstSubtable(substMap)]) if processLast: gsub.LookupList.Lookup.append(lookup) else: gsub.LookupList.Lookup.insert(i, lookup) assert gsub.LookupList.Lookup[lookupMap[subst]] is lookup gsub.LookupList.LookupCount = len(gsub.LookupList.Lookup) return lookupMap def buildFeatureVariations(featureVariationRecords): """Build the FeatureVariations subtable.""" fv = ot.FeatureVariations() fv.Version = 0x00010000 fv.FeatureVariationRecord = featureVariationRecords fv.FeatureVariationCount = len(featureVariationRecords) return fv def buildFeatureRecord(featureTag, lookupListIndices): """Build a FeatureRecord.""" fr = ot.FeatureRecord() fr.FeatureTag = featureTag fr.Feature = ot.Feature() fr.Feature.LookupListIndex = lookupListIndices fr.Feature.populateDefaults() return fr def buildFeatureVariationRecord(conditionTable, substitutionRecords): """Build a FeatureVariationRecord.""" fvr = ot.FeatureVariationRecord() fvr.ConditionSet = ot.ConditionSet() fvr.ConditionSet.ConditionTable = conditionTable fvr.ConditionSet.ConditionCount = len(conditionTable) fvr.FeatureTableSubstitution = ot.FeatureTableSubstitution() fvr.FeatureTableSubstitution.Version = 0x00010000 fvr.FeatureTableSubstitution.SubstitutionRecord = substitutionRecords fvr.FeatureTableSubstitution.SubstitutionCount = len(substitutionRecords) return fvr def buildFeatureTableSubstitutionRecord(featureIndex, lookupListIndices): """Build a FeatureTableSubstitutionRecord.""" ftsr = ot.FeatureTableSubstitutionRecord() ftsr.FeatureIndex = featureIndex ftsr.Feature = ot.Feature() ftsr.Feature.LookupListIndex = lookupListIndices ftsr.Feature.LookupCount = len(lookupListIndices) return ftsr def buildConditionTable(axisIndex, filterRangeMinValue, filterRangeMaxValue): """Build a ConditionTable.""" ct = ot.ConditionTable() ct.Format = 1 ct.AxisIndex = axisIndex ct.FilterRangeMinValue = filterRangeMinValue ct.FilterRangeMaxValue = filterRangeMaxValue return ct def findFeatureVariationRecord(featureVariations, conditionTable): """Find a FeatureVariationRecord that has the same conditionTable.""" if featureVariations.Version != 0x00010000: raise VarLibError( "Unsupported FeatureVariations table version: " f"0x{featureVariations.Version:08x} (expected 0x00010000)." ) for fvr in featureVariations.FeatureVariationRecord: if conditionTable == fvr.ConditionSet.ConditionTable: return fvr return None def sortFeatureList(table): """Sort the feature list by feature tag, and remap the feature indices elsewhere. This is needed after the feature list has been modified. """ # decorate, sort, undecorate, because we need to make an index remapping table tagIndexFea = [ (fea.FeatureTag, index, fea) for index, fea in enumerate(table.FeatureList.FeatureRecord) ] tagIndexFea.sort() table.FeatureList.FeatureRecord = [fea for tag, index, fea in tagIndexFea] featureRemap = dict( zip([index for tag, index, fea in tagIndexFea], range(len(tagIndexFea))) ) # Remap the feature indices remapFeatures(table, featureRemap) def remapFeatures(table, featureRemap): """Go through the scripts list, and remap feature indices.""" for scriptIndex, script in enumerate(table.ScriptList.ScriptRecord): defaultLangSys = script.Script.DefaultLangSys if defaultLangSys is not None: _remapLangSys(defaultLangSys, featureRemap) for langSysRecordIndex, langSysRec in enumerate(script.Script.LangSysRecord): langSys = langSysRec.LangSys _remapLangSys(langSys, featureRemap) if hasattr(table, "FeatureVariations") and table.FeatureVariations is not None: for fvr in table.FeatureVariations.FeatureVariationRecord: for ftsr in fvr.FeatureTableSubstitution.SubstitutionRecord: ftsr.FeatureIndex = featureRemap[ftsr.FeatureIndex] def _remapLangSys(langSys, featureRemap): if langSys.ReqFeatureIndex != 0xFFFF: langSys.ReqFeatureIndex = featureRemap[langSys.ReqFeatureIndex] langSys.FeatureIndex = [featureRemap[index] for index in langSys.FeatureIndex] if __name__ == "__main__": import doctest, sys sys.exit(doctest.testmod().failed) PKaZZZ��X���"fontTools/varLib/interpolatable.py""" Tool to find wrong contour order between different masters, and other interpolatability (or lack thereof) issues. Call as: $ fonttools varLib.interpolatable font1 font2 ... """ from .interpolatableHelpers import * from .interpolatableTestContourOrder import test_contour_order from .interpolatableTestStartingPoint import test_starting_point from fontTools.pens.recordingPen import ( RecordingPen, DecomposingRecordingPen, lerpRecordings, ) from fontTools.pens.transformPen import TransformPen from fontTools.pens.statisticsPen import StatisticsPen, StatisticsControlPen from fontTools.pens.momentsPen import OpenContourError from fontTools.varLib.models import piecewiseLinearMap, normalizeLocation from fontTools.misc.fixedTools import floatToFixedToStr from fontTools.misc.transform import Transform from collections import defaultdict from types import SimpleNamespace from functools import wraps from pprint import pformat from math import sqrt, atan2, pi import logging import os log = logging.getLogger("fontTools.varLib.interpolatable") DEFAULT_TOLERANCE = 0.95 DEFAULT_KINKINESS = 0.5 DEFAULT_KINKINESS_LENGTH = 0.002 # ratio of UPEM DEFAULT_UPEM = 1000 class Glyph: ITEMS = ( "recordings", "greenStats", "controlStats", "greenVectors", "controlVectors", "nodeTypes", "isomorphisms", "points", "openContours", ) def __init__(self, glyphname, glyphset): self.name = glyphname for item in self.ITEMS: setattr(self, item, []) self._populate(glyphset) def _fill_in(self, ix): for item in self.ITEMS: if len(getattr(self, item)) == ix: getattr(self, item).append(None) def _populate(self, glyphset): glyph = glyphset[self.name] self.doesnt_exist = glyph is None if self.doesnt_exist: return perContourPen = PerContourOrComponentPen(RecordingPen, glyphset=glyphset) try: glyph.draw(perContourPen, outputImpliedClosingLine=True) except TypeError: glyph.draw(perContourPen) self.recordings = perContourPen.value del perContourPen for ix, contour in enumerate(self.recordings): nodeTypes = [op for op, arg in contour.value] self.nodeTypes.append(nodeTypes) greenStats = StatisticsPen(glyphset=glyphset) controlStats = StatisticsControlPen(glyphset=glyphset) try: contour.replay(greenStats) contour.replay(controlStats) self.openContours.append(False) except OpenContourError as e: self.openContours.append(True) self._fill_in(ix) continue self.greenStats.append(greenStats) self.controlStats.append(controlStats) self.greenVectors.append(contour_vector_from_stats(greenStats)) self.controlVectors.append(contour_vector_from_stats(controlStats)) # Check starting point if nodeTypes[0] == "addComponent": self._fill_in(ix) continue assert nodeTypes[0] == "moveTo" assert nodeTypes[-1] in ("closePath", "endPath") points = SimpleRecordingPointPen() converter = SegmentToPointPen(points, False) contour.replay(converter) # points.value is a list of pt,bool where bool is true if on-curve and false if off-curve; # now check all rotations and mirror-rotations of the contour and build list of isomorphic # possible starting points. self.points.append(points.value) isomorphisms = [] self.isomorphisms.append(isomorphisms) # Add rotations add_isomorphisms(points.value, isomorphisms, False) # Add mirrored rotations add_isomorphisms(points.value, isomorphisms, True) def draw(self, pen, countor_idx=None): if countor_idx is None: for contour in self.recordings: contour.draw(pen) else: self.recordings[countor_idx].draw(pen) def test_gen( glyphsets, glyphs=None, names=None, ignore_missing=False, *, locations=None, tolerance=DEFAULT_TOLERANCE, kinkiness=DEFAULT_KINKINESS, upem=DEFAULT_UPEM, show_all=False, ): if tolerance >= 10: tolerance *= 0.01 assert 0 <= tolerance <= 1 if kinkiness >= 10: kinkiness *= 0.01 assert 0 <= kinkiness names = names or [repr(g) for g in glyphsets] if glyphs is None: # `glyphs = glyphsets[0].keys()` is faster, certainly, but doesn't allow for sparse TTFs/OTFs given out of order # ... risks the sparse master being the first one, and only processing a subset of the glyphs glyphs = {g for glyphset in glyphsets for g in glyphset.keys()} parents, order = find_parents_and_order(glyphsets, locations) def grand_parent(i, glyphname): if i is None: return None i = parents[i] if i is None: return None while parents[i] is not None and glyphsets[i][glyphname] is None: i = parents[i] return i for glyph_name in glyphs: log.info("Testing glyph %s", glyph_name) allGlyphs = [Glyph(glyph_name, glyphset) for glyphset in glyphsets] if len([1 for glyph in allGlyphs if glyph is not None]) <= 1: continue for master_idx, (glyph, glyphset, name) in enumerate( zip(allGlyphs, glyphsets, names) ): if glyph.doesnt_exist: if not ignore_missing: yield ( glyph_name, { "type": InterpolatableProblem.MISSING, "master": name, "master_idx": master_idx, }, ) continue has_open = False for ix, open in enumerate(glyph.openContours): if not open: continue has_open = True yield ( glyph_name, { "type": InterpolatableProblem.OPEN_PATH, "master": name, "master_idx": master_idx, "contour": ix, }, ) if has_open: continue matchings = [None] * len(glyphsets) for m1idx in order: glyph1 = allGlyphs[m1idx] if glyph1 is None or not glyph1.nodeTypes: continue m0idx = grand_parent(m1idx, glyph_name) if m0idx is None: continue glyph0 = allGlyphs[m0idx] if glyph0 is None or not glyph0.nodeTypes: continue # # Basic compatibility checks # m1 = glyph0.nodeTypes m0 = glyph1.nodeTypes if len(m0) != len(m1): yield ( glyph_name, { "type": InterpolatableProblem.PATH_COUNT, "master_1": names[m0idx], "master_2": names[m1idx], "master_1_idx": m0idx, "master_2_idx": m1idx, "value_1": len(m0), "value_2": len(m1), }, ) continue if m0 != m1: for pathIx, (nodes1, nodes2) in enumerate(zip(m0, m1)): if nodes1 == nodes2: continue if len(nodes1) != len(nodes2): yield ( glyph_name, { "type": InterpolatableProblem.NODE_COUNT, "path": pathIx, "master_1": names[m0idx], "master_2": names[m1idx], "master_1_idx": m0idx, "master_2_idx": m1idx, "value_1": len(nodes1), "value_2": len(nodes2), }, ) continue for nodeIx, (n1, n2) in enumerate(zip(nodes1, nodes2)): if n1 != n2: yield ( glyph_name, { "type": InterpolatableProblem.NODE_INCOMPATIBILITY, "path": pathIx, "node": nodeIx, "master_1": names[m0idx], "master_2": names[m1idx], "master_1_idx": m0idx, "master_2_idx": m1idx, "value_1": n1, "value_2": n2, }, ) continue # # InterpolatableProblem.CONTOUR_ORDER check # this_tolerance, matching = test_contour_order(glyph0, glyph1) if this_tolerance < tolerance: yield ( glyph_name, { "type": InterpolatableProblem.CONTOUR_ORDER, "master_1": names[m0idx], "master_2": names[m1idx], "master_1_idx": m0idx, "master_2_idx": m1idx, "value_1": list(range(len(matching))), "value_2": matching, "tolerance": this_tolerance, }, ) matchings[m1idx] = matching # # wrong-start-point / weight check # m0Isomorphisms = glyph0.isomorphisms m1Isomorphisms = glyph1.isomorphisms m0Vectors = glyph0.greenVectors m1Vectors = glyph1.greenVectors recording0 = glyph0.recordings recording1 = glyph1.recordings # If contour-order is wrong, adjust it matching = matchings[m1idx] if ( matching is not None and m1Isomorphisms ): # m1 is empty for composite glyphs m1Isomorphisms = [m1Isomorphisms[i] for i in matching] m1Vectors = [m1Vectors[i] for i in matching] recording1 = [recording1[i] for i in matching] midRecording = [] for c0, c1 in zip(recording0, recording1): try: r = RecordingPen() r.value = list(lerpRecordings(c0.value, c1.value)) midRecording.append(r) except ValueError: # Mismatch because of the reordering above midRecording.append(None) for ix, (contour0, contour1) in enumerate( zip(m0Isomorphisms, m1Isomorphisms) ): if ( contour0 is None or contour1 is None or len(contour0) == 0 or len(contour0) != len(contour1) ): # We already reported this; or nothing to do; or not compatible # after reordering above. continue this_tolerance, proposed_point, reverse = test_starting_point( glyph0, glyph1, ix, tolerance, matching ) if this_tolerance < tolerance: yield ( glyph_name, { "type": InterpolatableProblem.WRONG_START_POINT, "contour": ix, "master_1": names[m0idx], "master_2": names[m1idx], "master_1_idx": m0idx, "master_2_idx": m1idx, "value_1": 0, "value_2": proposed_point, "reversed": reverse, "tolerance": this_tolerance, }, ) # Weight check. # # If contour could be mid-interpolated, and the two # contours have the same area sign, proceeed. # # The sign difference can happen if it's a weirdo # self-intersecting contour; ignore it. contour = midRecording[ix] if contour and (m0Vectors[ix][0] < 0) == (m1Vectors[ix][0] < 0): midStats = StatisticsPen(glyphset=None) contour.replay(midStats) midVector = contour_vector_from_stats(midStats) m0Vec = m0Vectors[ix] m1Vec = m1Vectors[ix] size0 = m0Vec[0] * m0Vec[0] size1 = m1Vec[0] * m1Vec[0] midSize = midVector[0] * midVector[0] for overweight, problem_type in enumerate( ( InterpolatableProblem.UNDERWEIGHT, InterpolatableProblem.OVERWEIGHT, ) ): if overweight: expectedSize = max(size0, size1) continue else: expectedSize = sqrt(size0 * size1) log.debug( "%s: actual size %g; threshold size %g, master sizes: %g, %g", problem_type, midSize, expectedSize, size0, size1, ) if ( not overweight and expectedSize * tolerance > midSize + 1e-5 ) or (overweight and 1e-5 + expectedSize / tolerance < midSize): try: if overweight: this_tolerance = expectedSize / midSize else: this_tolerance = midSize / expectedSize except ZeroDivisionError: this_tolerance = 0 log.debug("tolerance %g", this_tolerance) yield ( glyph_name, { "type": problem_type, "contour": ix, "master_1": names[m0idx], "master_2": names[m1idx], "master_1_idx": m0idx, "master_2_idx": m1idx, "tolerance": this_tolerance, }, ) # # "kink" detector # m0 = glyph0.points m1 = glyph1.points # If contour-order is wrong, adjust it if matchings[m1idx] is not None and m1: # m1 is empty for composite glyphs m1 = [m1[i] for i in matchings[m1idx]] t = 0.1 # ~sin(radian(6)) for tolerance 0.95 deviation_threshold = ( upem * DEFAULT_KINKINESS_LENGTH * DEFAULT_KINKINESS / kinkiness ) for ix, (contour0, contour1) in enumerate(zip(m0, m1)): if ( contour0 is None or contour1 is None or len(contour0) == 0 or len(contour0) != len(contour1) ): # We already reported this; or nothing to do; or not compatible # after reordering above. continue # Walk the contour, keeping track of three consecutive points, with # middle one being an on-curve. If the three are co-linear then # check for kinky-ness. for i in range(len(contour0)): pt0 = contour0[i] pt1 = contour1[i] if not pt0[1] or not pt1[1]: # Skip off-curves continue pt0_prev = contour0[i - 1] pt1_prev = contour1[i - 1] pt0_next = contour0[(i + 1) % len(contour0)] pt1_next = contour1[(i + 1) % len(contour1)] if pt0_prev[1] and pt1_prev[1]: # At least one off-curve is required continue if pt0_prev[1] and pt1_prev[1]: # At least one off-curve is required continue pt0 = complex(*pt0[0]) pt1 = complex(*pt1[0]) pt0_prev = complex(*pt0_prev[0]) pt1_prev = complex(*pt1_prev[0]) pt0_next = complex(*pt0_next[0]) pt1_next = complex(*pt1_next[0]) # We have three consecutive points. Check whether # they are colinear. d0_prev = pt0 - pt0_prev d0_next = pt0_next - pt0 d1_prev = pt1 - pt1_prev d1_next = pt1_next - pt1 sin0 = d0_prev.real * d0_next.imag - d0_prev.imag * d0_next.real sin1 = d1_prev.real * d1_next.imag - d1_prev.imag * d1_next.real try: sin0 /= abs(d0_prev) * abs(d0_next) sin1 /= abs(d1_prev) * abs(d1_next) except ZeroDivisionError: continue if abs(sin0) > t or abs(sin1) > t: # Not colinear / not smooth. continue # Check the mid-point is actually, well, in the middle. dot0 = d0_prev.real * d0_next.real + d0_prev.imag * d0_next.imag dot1 = d1_prev.real * d1_next.real + d1_prev.imag * d1_next.imag if dot0 < 0 or dot1 < 0: # Sharp corner. continue # Fine, if handle ratios are similar... r0 = abs(d0_prev) / (abs(d0_prev) + abs(d0_next)) r1 = abs(d1_prev) / (abs(d1_prev) + abs(d1_next)) r_diff = abs(r0 - r1) if abs(r_diff) < t: # Smooth enough. continue mid = (pt0 + pt1) / 2 mid_prev = (pt0_prev + pt1_prev) / 2 mid_next = (pt0_next + pt1_next) / 2 mid_d0 = mid - mid_prev mid_d1 = mid_next - mid sin_mid = mid_d0.real * mid_d1.imag - mid_d0.imag * mid_d1.real try: sin_mid /= abs(mid_d0) * abs(mid_d1) except ZeroDivisionError: continue # ...or if the angles are similar. if abs(sin_mid) * (tolerance * kinkiness) <= t: # Smooth enough. continue # How visible is the kink? cross = sin_mid * abs(mid_d0) * abs(mid_d1) arc_len = abs(mid_d0 + mid_d1) deviation = abs(cross / arc_len) if deviation < deviation_threshold: continue deviation_ratio = deviation / arc_len if deviation_ratio > t: continue this_tolerance = t / (abs(sin_mid) * kinkiness) log.debug( "kink: deviation %g; deviation_ratio %g; sin_mid %g; r_diff %g", deviation, deviation_ratio, sin_mid, r_diff, ) log.debug("tolerance %g", this_tolerance) yield ( glyph_name, { "type": InterpolatableProblem.KINK, "contour": ix, "master_1": names[m0idx], "master_2": names[m1idx], "master_1_idx": m0idx, "master_2_idx": m1idx, "value": i, "tolerance": this_tolerance, }, ) # # --show-all # if show_all: yield ( glyph_name, { "type": InterpolatableProblem.NOTHING, "master_1": names[m0idx], "master_2": names[m1idx], "master_1_idx": m0idx, "master_2_idx": m1idx, }, ) @wraps(test_gen) def test(*args, **kwargs): problems = defaultdict(list) for glyphname, problem in test_gen(*args, **kwargs): problems[glyphname].append(problem) return problems def recursivelyAddGlyph(glyphname, glyphset, ttGlyphSet, glyf): if glyphname in glyphset: return glyphset[glyphname] = ttGlyphSet[glyphname] for component in getattr(glyf[glyphname], "components", []): recursivelyAddGlyph(component.glyphName, glyphset, ttGlyphSet, glyf) def ensure_parent_dir(path): dirname = os.path.dirname(path) if dirname: os.makedirs(dirname, exist_ok=True) return path def main(args=None): """Test for interpolatability issues between fonts""" import argparse import sys parser = argparse.ArgumentParser( "fonttools varLib.interpolatable", description=main.__doc__, ) parser.add_argument( "--glyphs", action="store", help="Space-separate name of glyphs to check", ) parser.add_argument( "--show-all", action="store_true", help="Show all glyph pairs, even if no problems are found", ) parser.add_argument( "--tolerance", action="store", type=float, help="Error tolerance. Between 0 and 1. Default %s" % DEFAULT_TOLERANCE, ) parser.add_argument( "--kinkiness", action="store", type=float, help="How aggressively report kinks. Default %s" % DEFAULT_KINKINESS, ) parser.add_argument( "--json", action="store_true", help="Output report in JSON format", ) parser.add_argument( "--pdf", action="store", help="Output report in PDF format", ) parser.add_argument( "--ps", action="store", help="Output report in PostScript format", ) parser.add_argument( "--html", action="store", help="Output report in HTML format", ) parser.add_argument( "--quiet", action="store_true", help="Only exit with code 1 or 0, no output", ) parser.add_argument( "--output", action="store", help="Output file for the problem report; Default: stdout", ) parser.add_argument( "--ignore-missing", action="store_true", help="Will not report glyphs missing from sparse masters as errors", ) parser.add_argument( "inputs", metavar="FILE", type=str, nargs="+", help="Input a single variable font / DesignSpace / Glyphs file, or multiple TTF/UFO files", ) parser.add_argument( "--name", metavar="NAME", type=str, action="append", help="Name of the master to use in the report. If not provided, all are used.", ) parser.add_argument("-v", "--verbose", action="store_true", help="Run verbosely.") parser.add_argument("--debug", action="store_true", help="Run with debug output.") args = parser.parse_args(args) from fontTools import configLogger configLogger(level=("INFO" if args.verbose else "ERROR")) if args.debug: configLogger(level="DEBUG") glyphs = args.glyphs.split() if args.glyphs else None from os.path import basename fonts = [] names = [] locations = [] upem = DEFAULT_UPEM original_args_inputs = tuple(args.inputs) if len(args.inputs) == 1: designspace = None if args.inputs[0].endswith(".designspace"): from fontTools.designspaceLib import DesignSpaceDocument designspace = DesignSpaceDocument.fromfile(args.inputs[0]) args.inputs = [master.path for master in designspace.sources] locations = [master.location for master in designspace.sources] axis_triples = { a.name: (a.minimum, a.default, a.maximum) for a in designspace.axes } axis_mappings = {a.name: a.map for a in designspace.axes} axis_triples = { k: tuple(piecewiseLinearMap(v, dict(axis_mappings[k])) for v in vv) for k, vv in axis_triples.items() } elif args.inputs[0].endswith((".glyphs", ".glyphspackage")): from glyphsLib import GSFont, to_designspace gsfont = GSFont(args.inputs[0]) upem = gsfont.upm designspace = to_designspace(gsfont) fonts = [source.font for source in designspace.sources] names = ["%s-%s" % (f.info.familyName, f.info.styleName) for f in fonts] args.inputs = [] locations = [master.location for master in designspace.sources] axis_triples = { a.name: (a.minimum, a.default, a.maximum) for a in designspace.axes } axis_mappings = {a.name: a.map for a in designspace.axes} axis_triples = { k: tuple(piecewiseLinearMap(v, dict(axis_mappings[k])) for v in vv) for k, vv in axis_triples.items() } elif args.inputs[0].endswith(".ttf"): from fontTools.ttLib import TTFont font = TTFont(args.inputs[0]) upem = font["head"].unitsPerEm if "gvar" in font: # Is variable font axisMapping = {} fvar = font["fvar"] for axis in fvar.axes: axisMapping[axis.axisTag] = { -1: axis.minValue, 0: axis.defaultValue, 1: axis.maxValue, } if "avar" in font: avar = font["avar"] for axisTag, segments in avar.segments.items(): fvarMapping = axisMapping[axisTag].copy() for location, value in segments.items(): axisMapping[axisTag][value] = piecewiseLinearMap( location, fvarMapping ) gvar = font["gvar"] glyf = font["glyf"] # Gather all glyphs at their "master" locations ttGlyphSets = {} glyphsets = defaultdict(dict) if glyphs is None: glyphs = sorted(gvar.variations.keys()) for glyphname in glyphs: for var in gvar.variations[glyphname]: locDict = {} loc = [] for tag, val in sorted(var.axes.items()): locDict[tag] = val[1] loc.append((tag, val[1])) locTuple = tuple(loc) if locTuple not in ttGlyphSets: ttGlyphSets[locTuple] = font.getGlyphSet( location=locDict, normalized=True, recalcBounds=False ) recursivelyAddGlyph( glyphname, glyphsets[locTuple], ttGlyphSets[locTuple], glyf ) names = ["''"] fonts = [font.getGlyphSet()] locations = [{}] axis_triples = {a: (-1, 0, +1) for a in sorted(axisMapping.keys())} for locTuple in sorted(glyphsets.keys(), key=lambda v: (len(v), v)): name = ( "'" + " ".join( "%s=%s" % ( k, floatToFixedToStr( piecewiseLinearMap(v, axisMapping[k]), 14 ), ) for k, v in locTuple ) + "'" ) names.append(name) fonts.append(glyphsets[locTuple]) locations.append(dict(locTuple)) args.ignore_missing = True args.inputs = [] if not locations: locations = [{} for _ in fonts] for filename in args.inputs: if filename.endswith(".ufo"): from fontTools.ufoLib import UFOReader font = UFOReader(filename) info = SimpleNamespace() font.readInfo(info) upem = info.unitsPerEm fonts.append(font) else: from fontTools.ttLib import TTFont font = TTFont(filename) upem = font["head"].unitsPerEm fonts.append(font) names.append(basename(filename).rsplit(".", 1)[0]) glyphsets = [] for font in fonts: if hasattr(font, "getGlyphSet"): glyphset = font.getGlyphSet() else: glyphset = font glyphsets.append({k: glyphset[k] for k in glyphset.keys()}) if args.name: accepted_names = set(args.name) glyphsets = [ glyphset for name, glyphset in zip(names, glyphsets) if name in accepted_names ] locations = [ location for name, location in zip(names, locations) if name in accepted_names ] names = [name for name in names if name in accepted_names] if not glyphs: glyphs = sorted(set([gn for glyphset in glyphsets for gn in glyphset.keys()])) glyphsSet = set(glyphs) for glyphset in glyphsets: glyphSetGlyphNames = set(glyphset.keys()) diff = glyphsSet - glyphSetGlyphNames if diff: for gn in diff: glyphset[gn] = None # Normalize locations locations = [normalizeLocation(loc, axis_triples) for loc in locations] tolerance = args.tolerance or DEFAULT_TOLERANCE kinkiness = args.kinkiness if args.kinkiness is not None else DEFAULT_KINKINESS try: log.info("Running on %d glyphsets", len(glyphsets)) log.info("Locations: %s", pformat(locations)) problems_gen = test_gen( glyphsets, glyphs=glyphs, names=names, locations=locations, upem=upem, ignore_missing=args.ignore_missing, tolerance=tolerance, kinkiness=kinkiness, show_all=args.show_all, ) problems = defaultdict(list) f = ( sys.stdout if args.output is None else open(ensure_parent_dir(args.output), "w") ) if not args.quiet: if args.json: import json for glyphname, problem in problems_gen: problems[glyphname].append(problem) print(json.dumps(problems), file=f) else: last_glyphname = None for glyphname, p in problems_gen: problems[glyphname].append(p) if glyphname != last_glyphname: print(f"Glyph {glyphname} was not compatible:", file=f) last_glyphname = glyphname last_master_idxs = None master_idxs = ( (p["master_idx"]) if "master_idx" in p else (p["master_1_idx"], p["master_2_idx"]) ) if master_idxs != last_master_idxs: master_names = ( (p["master"]) if "master" in p else (p["master_1"], p["master_2"]) ) print(f" Masters: %s:" % ", ".join(master_names), file=f) last_master_idxs = master_idxs if p["type"] == InterpolatableProblem.MISSING: print( " Glyph was missing in master %s" % p["master"], file=f ) elif p["type"] == InterpolatableProblem.OPEN_PATH: print( " Glyph has an open path in master %s" % p["master"], file=f, ) elif p["type"] == InterpolatableProblem.PATH_COUNT: print( " Path count differs: %i in %s, %i in %s" % ( p["value_1"], p["master_1"], p["value_2"], p["master_2"], ), file=f, ) elif p["type"] == InterpolatableProblem.NODE_COUNT: print( " Node count differs in path %i: %i in %s, %i in %s" % ( p["path"], p["value_1"], p["master_1"], p["value_2"], p["master_2"], ), file=f, ) elif p["type"] == InterpolatableProblem.NODE_INCOMPATIBILITY: print( " Node %o incompatible in path %i: %s in %s, %s in %s" % ( p["node"], p["path"], p["value_1"], p["master_1"], p["value_2"], p["master_2"], ), file=f, ) elif p["type"] == InterpolatableProblem.CONTOUR_ORDER: print( " Contour order differs: %s in %s, %s in %s" % ( p["value_1"], p["master_1"], p["value_2"], p["master_2"], ), file=f, ) elif p["type"] == InterpolatableProblem.WRONG_START_POINT: print( " Contour %d start point differs: %s in %s, %s in %s; reversed: %s" % ( p["contour"], p["value_1"], p["master_1"], p["value_2"], p["master_2"], p["reversed"], ), file=f, ) elif p["type"] == InterpolatableProblem.UNDERWEIGHT: print( " Contour %d interpolation is underweight: %s, %s" % ( p["contour"], p["master_1"], p["master_2"], ), file=f, ) elif p["type"] == InterpolatableProblem.OVERWEIGHT: print( " Contour %d interpolation is overweight: %s, %s" % ( p["contour"], p["master_1"], p["master_2"], ), file=f, ) elif p["type"] == InterpolatableProblem.KINK: print( " Contour %d has a kink at %s: %s, %s" % ( p["contour"], p["value"], p["master_1"], p["master_2"], ), file=f, ) elif p["type"] == InterpolatableProblem.NOTHING: print( " Showing %s and %s" % ( p["master_1"], p["master_2"], ), file=f, ) else: for glyphname, problem in problems_gen: problems[glyphname].append(problem) problems = sort_problems(problems) for p in "ps", "pdf": arg = getattr(args, p) if arg is None: continue log.info("Writing %s to %s", p.upper(), arg) from .interpolatablePlot import InterpolatablePS, InterpolatablePDF PlotterClass = InterpolatablePS if p == "ps" else InterpolatablePDF with PlotterClass( ensure_parent_dir(arg), glyphsets=glyphsets, names=names ) as doc: doc.add_title_page( original_args_inputs, tolerance=tolerance, kinkiness=kinkiness ) if problems: doc.add_summary(problems) doc.add_problems(problems) if not problems and not args.quiet: doc.draw_cupcake() if problems: doc.add_index() doc.add_table_of_contents() if args.html: log.info("Writing HTML to %s", args.html) from .interpolatablePlot import InterpolatableSVG svgs = [] glyph_starts = {} with InterpolatableSVG(svgs, glyphsets=glyphsets, names=names) as svg: svg.add_title_page( original_args_inputs, show_tolerance=False, tolerance=tolerance, kinkiness=kinkiness, ) for glyph, glyph_problems in problems.items(): glyph_starts[len(svgs)] = glyph svg.add_problems( {glyph: glyph_problems}, show_tolerance=False, show_page_number=False, ) if not problems and not args.quiet: svg.draw_cupcake() import base64 with open(ensure_parent_dir(args.html), "wb") as f: f.write(b"<!DOCTYPE html>\n") f.write( b'<html><body align="center" style="font-family: sans-serif; text-color: #222">\n' ) f.write(b"<title>fonttools varLib.interpolatable report</title>\n") for i, svg in enumerate(svgs): if i in glyph_starts: f.write(f"<h1>Glyph {glyph_starts[i]}</h1>\n".encode("utf-8")) f.write("<img src='data:image/svg+xml;base64,".encode("utf-8")) f.write(base64.b64encode(svg)) f.write(b"' />\n") f.write(b"<hr>\n") f.write(b"</body></html>\n") except Exception as e: e.args += original_args_inputs log.error(e) raise if problems: return problems if __name__ == "__main__": import sys problems = main() sys.exit(int(bool(problems))) PKaZZZ�sO�:*:*)fontTools/varLib/interpolatableHelpers.pyfrom fontTools.ttLib.ttGlyphSet import LerpGlyphSet from fontTools.pens.basePen import AbstractPen, BasePen, DecomposingPen from fontTools.pens.pointPen import AbstractPointPen, SegmentToPointPen from fontTools.pens.recordingPen import RecordingPen, DecomposingRecordingPen from fontTools.misc.transform import Transform from collections import defaultdict, deque from math import sqrt, copysign, atan2, pi from enum import Enum import itertools import logging log = logging.getLogger("fontTools.varLib.interpolatable") class InterpolatableProblem: NOTHING = "nothing" MISSING = "missing" OPEN_PATH = "open_path" PATH_COUNT = "path_count" NODE_COUNT = "node_count" NODE_INCOMPATIBILITY = "node_incompatibility" CONTOUR_ORDER = "contour_order" WRONG_START_POINT = "wrong_start_point" KINK = "kink" UNDERWEIGHT = "underweight" OVERWEIGHT = "overweight" severity = { MISSING: 1, OPEN_PATH: 2, PATH_COUNT: 3, NODE_COUNT: 4, NODE_INCOMPATIBILITY: 5, CONTOUR_ORDER: 6, WRONG_START_POINT: 7, KINK: 8, UNDERWEIGHT: 9, OVERWEIGHT: 10, NOTHING: 11, } def sort_problems(problems): """Sort problems by severity, then by glyph name, then by problem message.""" return dict( sorted( problems.items(), key=lambda _: -min( ( (InterpolatableProblem.severity[p["type"]] + p.get("tolerance", 0)) for p in _[1] ), ), reverse=True, ) ) def rot_list(l, k): """Rotate list by k items forward. Ie. item at position 0 will be at position k in returned list. Negative k is allowed.""" return l[-k:] + l[:-k] class PerContourPen(BasePen): def __init__(self, Pen, glyphset=None): BasePen.__init__(self, glyphset) self._glyphset = glyphset self._Pen = Pen self._pen = None self.value = [] def _moveTo(self, p0): self._newItem() self._pen.moveTo(p0) def _lineTo(self, p1): self._pen.lineTo(p1) def _qCurveToOne(self, p1, p2): self._pen.qCurveTo(p1, p2) def _curveToOne(self, p1, p2, p3): self._pen.curveTo(p1, p2, p3) def _closePath(self): self._pen.closePath() self._pen = None def _endPath(self): self._pen.endPath() self._pen = None def _newItem(self): self._pen = pen = self._Pen() self.value.append(pen) class PerContourOrComponentPen(PerContourPen): def addComponent(self, glyphName, transformation): self._newItem() self.value[-1].addComponent(glyphName, transformation) class SimpleRecordingPointPen(AbstractPointPen): def __init__(self): self.value = [] def beginPath(self, identifier=None, **kwargs): pass def endPath(self) -> None: pass def addPoint(self, pt, segmentType=None): self.value.append((pt, False if segmentType is None else True)) def vdiff_hypot2(v0, v1): s = 0 for x0, x1 in zip(v0, v1): d = x1 - x0 s += d * d return s def vdiff_hypot2_complex(v0, v1): s = 0 for x0, x1 in zip(v0, v1): d = x1 - x0 s += d.real * d.real + d.imag * d.imag # This does the same but seems to be slower: # s += (d * d.conjugate()).real return s def matching_cost(G, matching): return sum(G[i][j] for i, j in enumerate(matching)) def min_cost_perfect_bipartite_matching_scipy(G): n = len(G) rows, cols = linear_sum_assignment(G) assert (rows == list(range(n))).all() return list(cols), matching_cost(G, cols) def min_cost_perfect_bipartite_matching_munkres(G): n = len(G) cols = [None] * n for row, col in Munkres().compute(G): cols[row] = col return cols, matching_cost(G, cols) def min_cost_perfect_bipartite_matching_bruteforce(G): n = len(G) if n > 6: raise Exception("Install Python module 'munkres' or 'scipy >= 0.17.0'") # Otherwise just brute-force permutations = itertools.permutations(range(n)) best = list(next(permutations)) best_cost = matching_cost(G, best) for p in permutations: cost = matching_cost(G, p) if cost < best_cost: best, best_cost = list(p), cost return best, best_cost try: from scipy.optimize import linear_sum_assignment min_cost_perfect_bipartite_matching = min_cost_perfect_bipartite_matching_scipy except ImportError: try: from munkres import Munkres min_cost_perfect_bipartite_matching = ( min_cost_perfect_bipartite_matching_munkres ) except ImportError: min_cost_perfect_bipartite_matching = ( min_cost_perfect_bipartite_matching_bruteforce ) def contour_vector_from_stats(stats): # Don't change the order of items here. # It's okay to add to the end, but otherwise, other # code depends on it. Search for "covariance". size = sqrt(abs(stats.area)) return ( copysign((size), stats.area), stats.meanX, stats.meanY, stats.stddevX * 2, stats.stddevY * 2, stats.correlation * size, ) def matching_for_vectors(m0, m1): n = len(m0) identity_matching = list(range(n)) costs = [[vdiff_hypot2(v0, v1) for v1 in m1] for v0 in m0] ( matching, matching_cost, ) = min_cost_perfect_bipartite_matching(costs) identity_cost = sum(costs[i][i] for i in range(n)) return matching, matching_cost, identity_cost def points_characteristic_bits(points): bits = 0 for pt, b in reversed(points): bits = (bits << 1) | b return bits _NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR = 4 def points_complex_vector(points): vector = [] if not points: return vector points = [complex(*pt) for pt, _ in points] n = len(points) assert _NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR == 4 points.extend(points[: _NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR - 1]) while len(points) < _NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR: points.extend(points[: _NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR - 1]) for i in range(n): # The weights are magic numbers. # The point itself p0 = points[i] vector.append(p0) # The vector to the next point p1 = points[i + 1] d0 = p1 - p0 vector.append(d0 * 3) # The turn vector p2 = points[i + 2] d1 = p2 - p1 vector.append(d1 - d0) # The angle to the next point, as a cross product; # Square root of, to match dimentionality of distance. cross = d0.real * d1.imag - d0.imag * d1.real cross = copysign(sqrt(abs(cross)), cross) vector.append(cross * 4) return vector def add_isomorphisms(points, isomorphisms, reverse): reference_bits = points_characteristic_bits(points) n = len(points) # if points[0][0] == points[-1][0]: # abort if reverse: points = points[::-1] bits = points_characteristic_bits(points) else: bits = reference_bits vector = points_complex_vector(points) assert len(vector) % n == 0 mult = len(vector) // n mask = (1 << n) - 1 for i in range(n): b = ((bits << (n - i)) & mask) | (bits >> i) if b == reference_bits: isomorphisms.append( (rot_list(vector, -i * mult), n - 1 - i if reverse else i, reverse) ) def find_parents_and_order(glyphsets, locations): parents = [None] + list(range(len(glyphsets) - 1)) order = list(range(len(glyphsets))) if locations: # Order base master first bases = (i for i, l in enumerate(locations) if all(v == 0 for v in l.values())) if bases: base = next(bases) logging.info("Base master index %s, location %s", base, locations[base]) else: base = 0 logging.warning("No base master location found") # Form a minimum spanning tree of the locations try: from scipy.sparse.csgraph import minimum_spanning_tree graph = [[0] * len(locations) for _ in range(len(locations))] axes = set() for l in locations: axes.update(l.keys()) axes = sorted(axes) vectors = [tuple(l.get(k, 0) for k in axes) for l in locations] for i, j in itertools.combinations(range(len(locations)), 2): graph[i][j] = vdiff_hypot2(vectors[i], vectors[j]) tree = minimum_spanning_tree(graph) rows, cols = tree.nonzero() graph = defaultdict(set) for row, col in zip(rows, cols): graph[row].add(col) graph[col].add(row) # Traverse graph from the base and assign parents parents = [None] * len(locations) order = [] visited = set() queue = deque([base]) while queue: i = queue.popleft() visited.add(i) order.append(i) for j in sorted(graph[i]): if j not in visited: parents[j] = i queue.append(j) except ImportError: pass log.info("Parents: %s", parents) log.info("Order: %s", order) return parents, order def transform_from_stats(stats, inverse=False): # https://cookierobotics.com/007/ a = stats.varianceX b = stats.covariance c = stats.varianceY delta = (((a - c) * 0.5) ** 2 + b * b) ** 0.5 lambda1 = (a + c) * 0.5 + delta # Major eigenvalue lambda2 = (a + c) * 0.5 - delta # Minor eigenvalue theta = atan2(lambda1 - a, b) if b != 0 else (pi * 0.5 if a < c else 0) trans = Transform() if lambda2 < 0: # XXX This is a hack. # The problem is that the covariance matrix is singular. # This happens when the contour is a line, or a circle. # In that case, the covariance matrix is not a good # representation of the contour. # We should probably detect this earlier and avoid # computing the covariance matrix in the first place. # But for now, we just avoid the division by zero. lambda2 = 0 if inverse: trans = trans.translate(-stats.meanX, -stats.meanY) trans = trans.rotate(-theta) trans = trans.scale(1 / sqrt(lambda1), 1 / sqrt(lambda2)) else: trans = trans.scale(sqrt(lambda1), sqrt(lambda2)) trans = trans.rotate(theta) trans = trans.translate(stats.meanX, stats.meanY) return trans PKaZZZ �%~W�W�&fontTools/varLib/interpolatablePlot.pyfrom .interpolatableHelpers import * from fontTools.ttLib import TTFont from fontTools.ttLib.ttGlyphSet import LerpGlyphSet from fontTools.pens.recordingPen import ( RecordingPen, DecomposingRecordingPen, RecordingPointPen, ) from fontTools.pens.boundsPen import ControlBoundsPen from fontTools.pens.cairoPen import CairoPen from fontTools.pens.pointPen import ( SegmentToPointPen, PointToSegmentPen, ReverseContourPointPen, ) from fontTools.varLib.interpolatableHelpers import ( PerContourOrComponentPen, SimpleRecordingPointPen, ) from itertools import cycle from functools import wraps from io import BytesIO import cairo import math import os import logging log = logging.getLogger("fontTools.varLib.interpolatable") class OverridingDict(dict): def __init__(self, parent_dict): self.parent_dict = parent_dict def __missing__(self, key): return self.parent_dict[key] class InterpolatablePlot: width = 8.5 * 72 height = 11 * 72 pad = 0.1 * 72 title_font_size = 24 font_size = 16 page_number = 1 head_color = (0.3, 0.3, 0.3) label_color = (0.2, 0.2, 0.2) border_color = (0.9, 0.9, 0.9) border_width = 0.5 fill_color = (0.8, 0.8, 0.8) stroke_color = (0.1, 0.1, 0.1) stroke_width = 1 oncurve_node_color = (0, 0.8, 0, 0.7) oncurve_node_diameter = 6 offcurve_node_color = (0, 0.5, 0, 0.7) offcurve_node_diameter = 4 handle_color = (0, 0.5, 0, 0.7) handle_width = 0.5 corrected_start_point_color = (0, 0.9, 0, 0.7) corrected_start_point_size = 7 wrong_start_point_color = (1, 0, 0, 0.7) start_point_color = (0, 0, 1, 0.7) start_arrow_length = 9 kink_point_size = 7 kink_point_color = (1, 0, 1, 0.7) kink_circle_size = 15 kink_circle_stroke_width = 1 kink_circle_color = (1, 0, 1, 0.7) contour_colors = ((1, 0, 0), (0, 0, 1), (0, 1, 0), (1, 1, 0), (1, 0, 1), (0, 1, 1)) contour_alpha = 0.5 weight_issue_contour_color = (0, 0, 0, 0.4) no_issues_label = "Your font's good! Have a cupcake..." no_issues_label_color = (0, 0.5, 0) cupcake_color = (0.3, 0, 0.3) cupcake = r""" ,@. ,@.@@,. ,@@,.@@@. @.@@@,. ,@@. @@@. @@. @@,. ,@@@.@,.@. @. @@@@,.@.@@,. ,@@.@. @@.@@. @,. .@' @' @@, ,@@. @. .@@.@@@. @@' @, ,@. @@. @, @. @,@@,. , .@@, @,. .@,@@,. .@@,. , .@@, @, @, @. .@. @ @@,. , @ @,.@@. @,. @@,. @. @,. @' @@||@,. @'@,. @@,. @@ @,. @'@@, @' \\@@@@' @,. @'@@@@' @@,. @@@' //@@@' |||||||| @@,. @@' ||||||| |@@@|@|| || \\\\\\\ ||@@@|| ||||||| ||||||| // ||||||| |||||| |||||| |||||| || \\\\\\ |||||| |||||| |||||| // |||||| ||||| ||||| ||||| || \\\\\ ||||| ||||| ||||| // ||||| |||| ||||| |||| || \\\\ |||| |||| |||| // |||||||||||||||||||||||| """ emoticon_color = (0, 0.3, 0.3) shrug = r"""\_(")_/""" underweight = r""" o /|\ / \ """ overweight = r""" o /O\ / \ """ yay = r""" \o/ """ def __init__(self, out, glyphsets, names=None, **kwargs): self.out = out self.glyphsets = glyphsets self.names = names or [repr(g) for g in glyphsets] self.toc = {} for k, v in kwargs.items(): if not hasattr(self, k): raise TypeError("Unknown keyword argument: %s" % k) setattr(self, k, v) self.panel_width = self.width / 2 - self.pad * 3 self.panel_height = ( self.height / 2 - self.pad * 6 - self.font_size * 2 - self.title_font_size ) def __enter__(self): return self def __exit__(self, type, value, traceback): pass def show_page(self): self.page_number += 1 def add_title_page( self, files, *, show_tolerance=True, tolerance=None, kinkiness=None ): pad = self.pad width = self.width - 3 * self.pad height = self.height - 2 * self.pad x = y = pad self.draw_label( "Problem report for:", x=x, y=y, bold=True, width=width, font_size=self.title_font_size, ) y += self.title_font_size import hashlib for file in files: base_file = os.path.basename(file) y += self.font_size + self.pad self.draw_label(base_file, x=x, y=y, bold=True, width=width) y += self.font_size + self.pad try: h = hashlib.sha1(open(file, "rb").read()).hexdigest() self.draw_label("sha1: %s" % h, x=x + pad, y=y, width=width) y += self.font_size except IsADirectoryError: pass if file.endswith(".ttf"): ttFont = TTFont(file) name = ttFont["name"] if "name" in ttFont else None if name: for what, nameIDs in ( ("Family name", (21, 16, 1)), ("Version", (5,)), ): n = name.getFirstDebugName(nameIDs) if n is None: continue self.draw_label( "%s: %s" % (what, n), x=x + pad, y=y, width=width ) y += self.font_size + self.pad elif file.endswith((".glyphs", ".glyphspackage")): from glyphsLib import GSFont f = GSFont(file) for what, field in ( ("Family name", "familyName"), ("VersionMajor", "versionMajor"), ("VersionMinor", "_versionMinor"), ): self.draw_label( "%s: %s" % (what, getattr(f, field)), x=x + pad, y=y, width=width, ) y += self.font_size + self.pad self.draw_legend( show_tolerance=show_tolerance, tolerance=tolerance, kinkiness=kinkiness ) self.show_page() def draw_legend(self, *, show_tolerance=True, tolerance=None, kinkiness=None): cr = cairo.Context(self.surface) x = self.pad y = self.height - self.pad - self.font_size * 2 width = self.width - 2 * self.pad xx = x + self.pad * 2 xxx = x + self.pad * 4 if show_tolerance: self.draw_label( "Tolerance: badness; closer to zero the worse", x=xxx, y=y, width=width ) y -= self.pad + self.font_size self.draw_label("Underweight contours", x=xxx, y=y, width=width) cr.rectangle(xx - self.pad * 0.7, y, 1.5 * self.pad, self.font_size) cr.set_source_rgb(*self.fill_color) cr.fill_preserve() if self.stroke_color: cr.set_source_rgb(*self.stroke_color) cr.set_line_width(self.stroke_width) cr.stroke_preserve() cr.set_source_rgba(*self.weight_issue_contour_color) cr.fill() y -= self.pad + self.font_size self.draw_label( "Colored contours: contours with the wrong order", x=xxx, y=y, width=width ) cr.rectangle(xx - self.pad * 0.7, y, 1.5 * self.pad, self.font_size) if self.fill_color: cr.set_source_rgb(*self.fill_color) cr.fill_preserve() if self.stroke_color: cr.set_source_rgb(*self.stroke_color) cr.set_line_width(self.stroke_width) cr.stroke_preserve() cr.set_source_rgba(*self.contour_colors[0], self.contour_alpha) cr.fill() y -= self.pad + self.font_size self.draw_label("Kink artifact", x=xxx, y=y, width=width) self.draw_circle( cr, x=xx, y=y + self.font_size * 0.5, diameter=self.kink_circle_size, stroke_width=self.kink_circle_stroke_width, color=self.kink_circle_color, ) y -= self.pad + self.font_size self.draw_label("Point causing kink in the contour", x=xxx, y=y, width=width) self.draw_dot( cr, x=xx, y=y + self.font_size * 0.5, diameter=self.kink_point_size, color=self.kink_point_color, ) y -= self.pad + self.font_size self.draw_label("Suggested new contour start point", x=xxx, y=y, width=width) self.draw_dot( cr, x=xx, y=y + self.font_size * 0.5, diameter=self.corrected_start_point_size, color=self.corrected_start_point_color, ) y -= self.pad + self.font_size self.draw_label( "Contour start point in contours with wrong direction", x=xxx, y=y, width=width, ) self.draw_arrow( cr, x=xx - self.start_arrow_length * 0.3, y=y + self.font_size * 0.5, color=self.wrong_start_point_color, ) y -= self.pad + self.font_size self.draw_label( "Contour start point when the first two points overlap", x=xxx, y=y, width=width, ) self.draw_dot( cr, x=xx, y=y + self.font_size * 0.5, diameter=self.corrected_start_point_size, color=self.start_point_color, ) y -= self.pad + self.font_size self.draw_label("Contour start point and direction", x=xxx, y=y, width=width) self.draw_arrow( cr, x=xx - self.start_arrow_length * 0.3, y=y + self.font_size * 0.5, color=self.start_point_color, ) y -= self.pad + self.font_size self.draw_label("Legend:", x=x, y=y, width=width, bold=True) y -= self.pad + self.font_size if kinkiness is not None: self.draw_label( "Kink-reporting aggressiveness: %g" % kinkiness, x=xxx, y=y, width=width, ) y -= self.pad + self.font_size if tolerance is not None: self.draw_label( "Error tolerance: %g" % tolerance, x=xxx, y=y, width=width, ) y -= self.pad + self.font_size self.draw_label("Parameters:", x=x, y=y, width=width, bold=True) y -= self.pad + self.font_size def add_summary(self, problems): pad = self.pad width = self.width - 3 * self.pad height = self.height - 2 * self.pad x = y = pad self.draw_label( "Summary of problems", x=x, y=y, bold=True, width=width, font_size=self.title_font_size, ) y += self.title_font_size glyphs_per_problem = defaultdict(set) for glyphname, problems in sorted(problems.items()): for problem in problems: glyphs_per_problem[problem["type"]].add(glyphname) if "nothing" in glyphs_per_problem: del glyphs_per_problem["nothing"] for problem_type in sorted( glyphs_per_problem, key=lambda x: InterpolatableProblem.severity[x] ): y += self.font_size self.draw_label( "%s: %d" % (problem_type, len(glyphs_per_problem[problem_type])), x=x, y=y, width=width, bold=True, ) y += self.font_size for glyphname in sorted(glyphs_per_problem[problem_type]): if y + self.font_size > height: self.show_page() y = self.font_size + pad self.draw_label(glyphname, x=x + 2 * pad, y=y, width=width - 2 * pad) y += self.font_size self.show_page() def _add_listing(self, title, items): pad = self.pad width = self.width - 2 * self.pad height = self.height - 2 * self.pad x = y = pad self.draw_label( title, x=x, y=y, bold=True, width=width, font_size=self.title_font_size ) y += self.title_font_size + self.pad last_glyphname = None for page_no, (glyphname, problems) in items: if glyphname == last_glyphname: continue last_glyphname = glyphname if y + self.font_size > height: self.show_page() y = self.font_size + pad self.draw_label(glyphname, x=x + 5 * pad, y=y, width=width - 2 * pad) self.draw_label(str(page_no), x=x, y=y, width=4 * pad, align=1) y += self.font_size self.show_page() def add_table_of_contents(self): self._add_listing("Table of contents", sorted(self.toc.items())) def add_index(self): self._add_listing("Index", sorted(self.toc.items(), key=lambda x: x[1][0])) def add_problems(self, problems, *, show_tolerance=True, show_page_number=True): for glyph, glyph_problems in problems.items(): last_masters = None current_glyph_problems = [] for p in glyph_problems: masters = ( p["master_idx"] if "master_idx" in p else (p["master_1_idx"], p["master_2_idx"]) ) if masters == last_masters: current_glyph_problems.append(p) continue # Flush if current_glyph_problems: self.add_problem( glyph, current_glyph_problems, show_tolerance=show_tolerance, show_page_number=show_page_number, ) self.show_page() current_glyph_problems = [] last_masters = masters current_glyph_problems.append(p) if current_glyph_problems: self.add_problem( glyph, current_glyph_problems, show_tolerance=show_tolerance, show_page_number=show_page_number, ) self.show_page() def add_problem( self, glyphname, problems, *, show_tolerance=True, show_page_number=True ): if type(problems) not in (list, tuple): problems = [problems] self.toc[self.page_number] = (glyphname, problems) problem_type = problems[0]["type"] problem_types = set(problem["type"] for problem in problems) if not all(pt == problem_type for pt in problem_types): problem_type = ", ".join(sorted({problem["type"] for problem in problems})) log.info("Drawing %s: %s", glyphname, problem_type) master_keys = ( ("master_idx",) if "master_idx" in problems[0] else ("master_1_idx", "master_2_idx") ) master_indices = [problems[0][k] for k in master_keys] if problem_type == InterpolatableProblem.MISSING: sample_glyph = next( i for i, m in enumerate(self.glyphsets) if m[glyphname] is not None ) master_indices.insert(0, sample_glyph) x = self.pad y = self.pad self.draw_label( "Glyph name: " + glyphname, x=x, y=y, color=self.head_color, align=0, bold=True, font_size=self.title_font_size, ) tolerance = min(p.get("tolerance", 1) for p in problems) if tolerance < 1 and show_tolerance: self.draw_label( "tolerance: %.2f" % tolerance, x=x, y=y, width=self.width - 2 * self.pad, align=1, bold=True, ) y += self.title_font_size + self.pad self.draw_label( "Problems: " + problem_type, x=x, y=y, width=self.width - 2 * self.pad, color=self.head_color, bold=True, ) y += self.font_size + self.pad * 2 scales = [] for which, master_idx in enumerate(master_indices): glyphset = self.glyphsets[master_idx] name = self.names[master_idx] self.draw_label( name, x=x, y=y, color=self.label_color, width=self.panel_width, align=0.5, ) y += self.font_size + self.pad if glyphset[glyphname] is not None: scales.append( self.draw_glyph(glyphset, glyphname, problems, which, x=x, y=y) ) else: self.draw_emoticon(self.shrug, x=x, y=y) y += self.panel_height + self.font_size + self.pad if any( pt in ( InterpolatableProblem.NOTHING, InterpolatableProblem.WRONG_START_POINT, InterpolatableProblem.CONTOUR_ORDER, InterpolatableProblem.KINK, InterpolatableProblem.UNDERWEIGHT, InterpolatableProblem.OVERWEIGHT, ) for pt in problem_types ): x = self.pad + self.panel_width + self.pad y = self.pad y += self.title_font_size + self.pad * 2 y += self.font_size + self.pad glyphset1 = self.glyphsets[master_indices[0]] glyphset2 = self.glyphsets[master_indices[1]] # Draw the mid-way of the two masters self.draw_label( "midway interpolation", x=x, y=y, color=self.head_color, width=self.panel_width, align=0.5, ) y += self.font_size + self.pad midway_glyphset = LerpGlyphSet(glyphset1, glyphset2) self.draw_glyph( midway_glyphset, glyphname, [{"type": "midway"}] + [ p for p in problems if p["type"] in ( InterpolatableProblem.KINK, InterpolatableProblem.UNDERWEIGHT, InterpolatableProblem.OVERWEIGHT, ) ], None, x=x, y=y, scale=min(scales), ) y += self.panel_height + self.font_size + self.pad if any( pt in ( InterpolatableProblem.WRONG_START_POINT, InterpolatableProblem.CONTOUR_ORDER, InterpolatableProblem.KINK, ) for pt in problem_types ): # Draw the proposed fix self.draw_label( "proposed fix", x=x, y=y, color=self.head_color, width=self.panel_width, align=0.5, ) y += self.font_size + self.pad overriding1 = OverridingDict(glyphset1) overriding2 = OverridingDict(glyphset2) perContourPen1 = PerContourOrComponentPen( RecordingPen, glyphset=overriding1 ) perContourPen2 = PerContourOrComponentPen( RecordingPen, glyphset=overriding2 ) glyphset1[glyphname].draw(perContourPen1) glyphset2[glyphname].draw(perContourPen2) for problem in problems: if problem["type"] == InterpolatableProblem.CONTOUR_ORDER: fixed_contours = [ perContourPen2.value[i] for i in problems[0]["value_2"] ] perContourPen2.value = fixed_contours for problem in problems: if problem["type"] == InterpolatableProblem.WRONG_START_POINT: # Save the wrong contours wrongContour1 = perContourPen1.value[problem["contour"]] wrongContour2 = perContourPen2.value[problem["contour"]] # Convert the wrong contours to point pens points1 = RecordingPointPen() converter = SegmentToPointPen(points1, False) wrongContour1.replay(converter) points2 = RecordingPointPen() converter = SegmentToPointPen(points2, False) wrongContour2.replay(converter) proposed_start = problem["value_2"] # See if we need reversing; fragile but worth a try if problem["reversed"]: new_points2 = RecordingPointPen() reversedPen = ReverseContourPointPen(new_points2) points2.replay(reversedPen) points2 = new_points2 proposed_start = len(points2.value) - 2 - proposed_start # Rotate points2 so that the first point is the same as in points1 beginPath = points2.value[:1] endPath = points2.value[-1:] pts = points2.value[1:-1] pts = pts[proposed_start:] + pts[:proposed_start] points2.value = beginPath + pts + endPath # Convert the point pens back to segment pens segment1 = RecordingPen() converter = PointToSegmentPen(segment1, True) points1.replay(converter) segment2 = RecordingPen() converter = PointToSegmentPen(segment2, True) points2.replay(converter) # Replace the wrong contours wrongContour1.value = segment1.value wrongContour2.value = segment2.value perContourPen1.value[problem["contour"]] = wrongContour1 perContourPen2.value[problem["contour"]] = wrongContour2 for problem in problems: # If we have a kink, try to fix it. if problem["type"] == InterpolatableProblem.KINK: # Save the wrong contours wrongContour1 = perContourPen1.value[problem["contour"]] wrongContour2 = perContourPen2.value[problem["contour"]] # Convert the wrong contours to point pens points1 = RecordingPointPen() converter = SegmentToPointPen(points1, False) wrongContour1.replay(converter) points2 = RecordingPointPen() converter = SegmentToPointPen(points2, False) wrongContour2.replay(converter) i = problem["value"] # Position points to be around the same ratio # beginPath / endPath dance j = i + 1 pt0 = points1.value[j][1][0] pt1 = points2.value[j][1][0] j_prev = (i - 1) % (len(points1.value) - 2) + 1 pt0_prev = points1.value[j_prev][1][0] pt1_prev = points2.value[j_prev][1][0] j_next = (i + 1) % (len(points1.value) - 2) + 1 pt0_next = points1.value[j_next][1][0] pt1_next = points2.value[j_next][1][0] pt0 = complex(*pt0) pt1 = complex(*pt1) pt0_prev = complex(*pt0_prev) pt1_prev = complex(*pt1_prev) pt0_next = complex(*pt0_next) pt1_next = complex(*pt1_next) # Find the ratio of the distance between the points r0 = abs(pt0 - pt0_prev) / abs(pt0_next - pt0_prev) r1 = abs(pt1 - pt1_prev) / abs(pt1_next - pt1_prev) r_mid = (r0 + r1) / 2 pt0 = pt0_prev + r_mid * (pt0_next - pt0_prev) pt1 = pt1_prev + r_mid * (pt1_next - pt1_prev) points1.value[j] = ( points1.value[j][0], (((pt0.real, pt0.imag),) + points1.value[j][1][1:]), points1.value[j][2], ) points2.value[j] = ( points2.value[j][0], (((pt1.real, pt1.imag),) + points2.value[j][1][1:]), points2.value[j][2], ) # Convert the point pens back to segment pens segment1 = RecordingPen() converter = PointToSegmentPen(segment1, True) points1.replay(converter) segment2 = RecordingPen() converter = PointToSegmentPen(segment2, True) points2.replay(converter) # Replace the wrong contours wrongContour1.value = segment1.value wrongContour2.value = segment2.value # Assemble fixed1 = RecordingPen() fixed2 = RecordingPen() for contour in perContourPen1.value: fixed1.value.extend(contour.value) for contour in perContourPen2.value: fixed2.value.extend(contour.value) fixed1.draw = fixed1.replay fixed2.draw = fixed2.replay overriding1[glyphname] = fixed1 overriding2[glyphname] = fixed2 try: midway_glyphset = LerpGlyphSet(overriding1, overriding2) self.draw_glyph( midway_glyphset, glyphname, {"type": "fixed"}, None, x=x, y=y, scale=min(scales), ) except ValueError: self.draw_emoticon(self.shrug, x=x, y=y) y += self.panel_height + self.pad else: emoticon = self.shrug if InterpolatableProblem.UNDERWEIGHT in problem_types: emoticon = self.underweight elif InterpolatableProblem.OVERWEIGHT in problem_types: emoticon = self.overweight elif InterpolatableProblem.NOTHING in problem_types: emoticon = self.yay self.draw_emoticon(emoticon, x=x, y=y) if show_page_number: self.draw_label( str(self.page_number), x=0, y=self.height - self.font_size - self.pad, width=self.width, color=self.head_color, align=0.5, ) def draw_label( self, label, *, x=0, y=0, color=(0, 0, 0), align=0, bold=False, width=None, height=None, font_size=None, ): if width is None: width = self.width if height is None: height = self.height if font_size is None: font_size = self.font_size cr = cairo.Context(self.surface) cr.select_font_face( "@cairo:", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD if bold else cairo.FONT_WEIGHT_NORMAL, ) cr.set_font_size(font_size) font_extents = cr.font_extents() font_size = font_size * font_size / font_extents[2] cr.set_font_size(font_size) font_extents = cr.font_extents() cr.set_source_rgb(*color) extents = cr.text_extents(label) if extents.width > width: # Shrink font_size *= width / extents.width cr.set_font_size(font_size) font_extents = cr.font_extents() extents = cr.text_extents(label) # Center label_x = x + (width - extents.width) * align label_y = y + font_extents[0] cr.move_to(label_x, label_y) cr.show_text(label) def draw_glyph(self, glyphset, glyphname, problems, which, *, x=0, y=0, scale=None): if type(problems) not in (list, tuple): problems = [problems] midway = any(problem["type"] == "midway" for problem in problems) problem_type = problems[0]["type"] problem_types = set(problem["type"] for problem in problems) if not all(pt == problem_type for pt in problem_types): problem_type = "mixed" glyph = glyphset[glyphname] recording = RecordingPen() glyph.draw(recording) decomposedRecording = DecomposingRecordingPen(glyphset) glyph.draw(decomposedRecording) boundsPen = ControlBoundsPen(glyphset) decomposedRecording.replay(boundsPen) bounds = boundsPen.bounds if bounds is None: bounds = (0, 0, 0, 0) glyph_width = bounds[2] - bounds[0] glyph_height = bounds[3] - bounds[1] if glyph_width: if scale is None: scale = self.panel_width / glyph_width else: scale = min(scale, self.panel_height / glyph_height) if glyph_height: if scale is None: scale = self.panel_height / glyph_height else: scale = min(scale, self.panel_height / glyph_height) if scale is None: scale = 1 cr = cairo.Context(self.surface) cr.translate(x, y) # Center cr.translate( (self.panel_width - glyph_width * scale) / 2, (self.panel_height - glyph_height * scale) / 2, ) cr.scale(scale, -scale) cr.translate(-bounds[0], -bounds[3]) if self.border_color: cr.set_source_rgb(*self.border_color) cr.rectangle(bounds[0], bounds[1], glyph_width, glyph_height) cr.set_line_width(self.border_width / scale) cr.stroke() if self.fill_color or self.stroke_color: pen = CairoPen(glyphset, cr) decomposedRecording.replay(pen) if self.fill_color and problem_type != InterpolatableProblem.OPEN_PATH: cr.set_source_rgb(*self.fill_color) cr.fill_preserve() if self.stroke_color: cr.set_source_rgb(*self.stroke_color) cr.set_line_width(self.stroke_width / scale) cr.stroke_preserve() cr.new_path() if ( InterpolatableProblem.UNDERWEIGHT in problem_types or InterpolatableProblem.OVERWEIGHT in problem_types ): perContourPen = PerContourOrComponentPen(RecordingPen, glyphset=glyphset) recording.replay(perContourPen) for problem in problems: if problem["type"] in ( InterpolatableProblem.UNDERWEIGHT, InterpolatableProblem.OVERWEIGHT, ): contour = perContourPen.value[problem["contour"]] contour.replay(CairoPen(glyphset, cr)) cr.set_source_rgba(*self.weight_issue_contour_color) cr.fill() if any( t in problem_types for t in { InterpolatableProblem.NOTHING, InterpolatableProblem.NODE_COUNT, InterpolatableProblem.NODE_INCOMPATIBILITY, } ): cr.set_line_cap(cairo.LINE_CAP_ROUND) # Oncurve nodes for segment, args in decomposedRecording.value: if not args: continue x, y = args[-1] cr.move_to(x, y) cr.line_to(x, y) cr.set_source_rgba(*self.oncurve_node_color) cr.set_line_width(self.oncurve_node_diameter / scale) cr.stroke() # Offcurve nodes for segment, args in decomposedRecording.value: if not args: continue for x, y in args[:-1]: cr.move_to(x, y) cr.line_to(x, y) cr.set_source_rgba(*self.offcurve_node_color) cr.set_line_width(self.offcurve_node_diameter / scale) cr.stroke() # Handles for segment, args in decomposedRecording.value: if not args: pass elif segment in ("moveTo", "lineTo"): cr.move_to(*args[0]) elif segment == "qCurveTo": for x, y in args: cr.line_to(x, y) cr.new_sub_path() cr.move_to(*args[-1]) elif segment == "curveTo": cr.line_to(*args[0]) cr.new_sub_path() cr.move_to(*args[1]) cr.line_to(*args[2]) cr.new_sub_path() cr.move_to(*args[-1]) else: continue cr.set_source_rgba(*self.handle_color) cr.set_line_width(self.handle_width / scale) cr.stroke() matching = None for problem in problems: if problem["type"] == InterpolatableProblem.CONTOUR_ORDER: matching = problem["value_2"] colors = cycle(self.contour_colors) perContourPen = PerContourOrComponentPen( RecordingPen, glyphset=glyphset ) recording.replay(perContourPen) for i, contour in enumerate(perContourPen.value): if matching[i] == i: continue color = next(colors) contour.replay(CairoPen(glyphset, cr)) cr.set_source_rgba(*color, self.contour_alpha) cr.fill() for problem in problems: if problem["type"] in ( InterpolatableProblem.NOTHING, InterpolatableProblem.WRONG_START_POINT, ): idx = problem.get("contour") # Draw suggested point if idx is not None and which == 1 and "value_2" in problem: perContourPen = PerContourOrComponentPen( RecordingPen, glyphset=glyphset ) decomposedRecording.replay(perContourPen) points = SimpleRecordingPointPen() converter = SegmentToPointPen(points, False) perContourPen.value[ idx if matching is None else matching[idx] ].replay(converter) targetPoint = points.value[problem["value_2"]][0] cr.save() cr.translate(*targetPoint) cr.scale(1 / scale, 1 / scale) self.draw_dot( cr, diameter=self.corrected_start_point_size, color=self.corrected_start_point_color, ) cr.restore() # Draw start-point arrow if which == 0 or not problem.get("reversed"): color = self.start_point_color else: color = self.wrong_start_point_color first_pt = None i = 0 cr.save() for segment, args in decomposedRecording.value: if segment == "moveTo": first_pt = args[0] continue if first_pt is None: continue if segment == "closePath": second_pt = first_pt else: second_pt = args[0] if idx is None or i == idx: cr.save() first_pt = complex(*first_pt) second_pt = complex(*second_pt) length = abs(second_pt - first_pt) cr.translate(first_pt.real, first_pt.imag) if length: # Draw arrowhead cr.rotate( math.atan2( second_pt.imag - first_pt.imag, second_pt.real - first_pt.real, ) ) cr.scale(1 / scale, 1 / scale) self.draw_arrow(cr, color=color) else: # Draw circle cr.scale(1 / scale, 1 / scale) self.draw_dot( cr, diameter=self.corrected_start_point_size, color=color, ) cr.restore() if idx is not None: break first_pt = None i += 1 cr.restore() if problem["type"] == InterpolatableProblem.KINK: idx = problem.get("contour") perContourPen = PerContourOrComponentPen( RecordingPen, glyphset=glyphset ) decomposedRecording.replay(perContourPen) points = SimpleRecordingPointPen() converter = SegmentToPointPen(points, False) perContourPen.value[idx if matching is None else matching[idx]].replay( converter ) targetPoint = points.value[problem["value"]][0] cr.save() cr.translate(*targetPoint) cr.scale(1 / scale, 1 / scale) if midway: self.draw_circle( cr, diameter=self.kink_circle_size, stroke_width=self.kink_circle_stroke_width, color=self.kink_circle_color, ) else: self.draw_dot( cr, diameter=self.kink_point_size, color=self.kink_point_color, ) cr.restore() return scale def draw_dot(self, cr, *, x=0, y=0, color=(0, 0, 0), diameter=10): cr.save() cr.set_line_width(diameter) cr.set_line_cap(cairo.LINE_CAP_ROUND) cr.move_to(x, y) cr.line_to(x, y) if len(color) == 3: color = color + (1,) cr.set_source_rgba(*color) cr.stroke() cr.restore() def draw_circle( self, cr, *, x=0, y=0, color=(0, 0, 0), diameter=10, stroke_width=1 ): cr.save() cr.set_line_width(stroke_width) cr.set_line_cap(cairo.LINE_CAP_SQUARE) cr.arc(x, y, diameter / 2, 0, 2 * math.pi) if len(color) == 3: color = color + (1,) cr.set_source_rgba(*color) cr.stroke() cr.restore() def draw_arrow(self, cr, *, x=0, y=0, color=(0, 0, 0)): cr.save() if len(color) == 3: color = color + (1,) cr.set_source_rgba(*color) cr.translate(self.start_arrow_length + x, y) cr.move_to(0, 0) cr.line_to( -self.start_arrow_length, -self.start_arrow_length * 0.4, ) cr.line_to( -self.start_arrow_length, self.start_arrow_length * 0.4, ) cr.close_path() cr.fill() cr.restore() def draw_text(self, text, *, x=0, y=0, color=(0, 0, 0), width=None, height=None): if width is None: width = self.width if height is None: height = self.height text = text.splitlines() cr = cairo.Context(self.surface) cr.set_source_rgb(*color) cr.set_font_size(self.font_size) cr.select_font_face( "@cairo:monospace", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL ) text_width = 0 text_height = 0 font_extents = cr.font_extents() font_font_size = font_extents[2] font_ascent = font_extents[0] for line in text: extents = cr.text_extents(line) text_width = max(text_width, extents.x_advance) text_height += font_font_size if not text_width: return cr.translate(x, y) scale = min(width / text_width, height / text_height) # center cr.translate( (width - text_width * scale) / 2, (height - text_height * scale) / 2 ) cr.scale(scale, scale) cr.translate(0, font_ascent) for line in text: cr.move_to(0, 0) cr.show_text(line) cr.translate(0, font_font_size) def draw_cupcake(self): self.draw_label( self.no_issues_label, x=self.pad, y=self.pad, color=self.no_issues_label_color, width=self.width - 2 * self.pad, align=0.5, bold=True, font_size=self.title_font_size, ) self.draw_text( self.cupcake, x=self.pad, y=self.pad + self.font_size, width=self.width - 2 * self.pad, height=self.height - 2 * self.pad - self.font_size, color=self.cupcake_color, ) def draw_emoticon(self, emoticon, x=0, y=0): self.draw_text( emoticon, x=x, y=y, color=self.emoticon_color, width=self.panel_width, height=self.panel_height, ) class InterpolatablePostscriptLike(InterpolatablePlot): def __exit__(self, type, value, traceback): self.surface.finish() def show_page(self): super().show_page() self.surface.show_page() class InterpolatablePS(InterpolatablePostscriptLike): def __enter__(self): self.surface = cairo.PSSurface(self.out, self.width, self.height) return self class InterpolatablePDF(InterpolatablePostscriptLike): def __enter__(self): self.surface = cairo.PDFSurface(self.out, self.width, self.height) self.surface.set_metadata( cairo.PDF_METADATA_CREATOR, "fonttools varLib.interpolatable" ) self.surface.set_metadata(cairo.PDF_METADATA_CREATE_DATE, "") return self class InterpolatableSVG(InterpolatablePlot): def __enter__(self): self.sink = BytesIO() self.surface = cairo.SVGSurface(self.sink, self.width, self.height) return self def __exit__(self, type, value, traceback): if self.surface is not None: self.show_page() def show_page(self): super().show_page() self.surface.finish() self.out.append(self.sink.getvalue()) self.sink = BytesIO() self.surface = cairo.SVGSurface(self.sink, self.width, self.height) PKaZZZ��<� � 2fontTools/varLib/interpolatableTestContourOrder.pyfrom .interpolatableHelpers import * import logging log = logging.getLogger("fontTools.varLib.interpolatable") def test_contour_order(glyph0, glyph1): # We try matching both the StatisticsControlPen vector # and the StatisticsPen vector. # # If either method found a identity matching, accept it. # This is crucial for fonts like Kablammo[MORF].ttf and # Nabla[EDPT,EHLT].ttf, since they really confuse the # StatisticsPen vector because of their area=0 contours. n = len(glyph0.controlVectors) matching = None matching_cost = 0 identity_cost = 0 done = n <= 1 if not done: m0Control = glyph0.controlVectors m1Control = glyph1.controlVectors ( matching_control, matching_cost_control, identity_cost_control, ) = matching_for_vectors(m0Control, m1Control) done = matching_cost_control == identity_cost_control if not done: m0Green = glyph0.greenVectors m1Green = glyph1.greenVectors ( matching_green, matching_cost_green, identity_cost_green, ) = matching_for_vectors(m0Green, m1Green) done = matching_cost_green == identity_cost_green if not done: # See if reversing contours in one master helps. # That's a common problem. Then the wrong_start_point # test will fix them. # # Reverse the sign of the area (0); the rest stay the same. if not done: m1ControlReversed = [(-m[0],) + m[1:] for m in m1Control] ( matching_control_reversed, matching_cost_control_reversed, identity_cost_control_reversed, ) = matching_for_vectors(m0Control, m1ControlReversed) done = matching_cost_control_reversed == identity_cost_control_reversed if not done: m1GreenReversed = [(-m[0],) + m[1:] for m in m1Green] ( matching_control_reversed, matching_cost_control_reversed, identity_cost_control_reversed, ) = matching_for_vectors(m0Control, m1ControlReversed) done = matching_cost_control_reversed == identity_cost_control_reversed if not done: # Otherwise, use the worst of the two matchings. if ( matching_cost_control / identity_cost_control < matching_cost_green / identity_cost_green ): matching = matching_control matching_cost = matching_cost_control identity_cost = identity_cost_control else: matching = matching_green matching_cost = matching_cost_green identity_cost = identity_cost_green this_tolerance = matching_cost / identity_cost if identity_cost else 1 log.debug( "test-contour-order: tolerance %g", this_tolerance, ) return this_tolerance, matching PKaZZZ�.�z��3fontTools/varLib/interpolatableTestStartingPoint.pyfrom .interpolatableHelpers import * def test_starting_point(glyph0, glyph1, ix, tolerance, matching): if matching is None: matching = list(range(len(glyph0.isomorphisms))) contour0 = glyph0.isomorphisms[ix] contour1 = glyph1.isomorphisms[matching[ix]] m0Vectors = glyph0.greenVectors m1Vectors = [glyph1.greenVectors[i] for i in matching] c0 = contour0[0] # Next few lines duplicated below. costs = [vdiff_hypot2_complex(c0[0], c1[0]) for c1 in contour1] min_cost_idx, min_cost = min(enumerate(costs), key=lambda x: x[1]) first_cost = costs[0] proposed_point = contour1[min_cost_idx][1] reverse = contour1[min_cost_idx][2] if min_cost < first_cost * tolerance: # c0 is the first isomorphism of the m0 master # contour1 is list of all isomorphisms of the m1 master # # If the two shapes are both circle-ish and slightly # rotated, we detect wrong start point. This is for # example the case hundreds of times in # RobotoSerif-Italic[GRAD,opsz,wdth,wght].ttf # # If the proposed point is only one off from the first # point (and not reversed), try harder: # # Find the major eigenvector of the covariance matrix, # and rotate the contours by that angle. Then find the # closest point again. If it matches this time, let it # pass. num_points = len(glyph1.points[ix]) leeway = 3 if not reverse and ( proposed_point <= leeway or proposed_point >= num_points - leeway ): # Try harder # Recover the covariance matrix from the GreenVectors. # This is a 2x2 matrix. transforms = [] for vector in (m0Vectors[ix], m1Vectors[ix]): meanX = vector[1] meanY = vector[2] stddevX = vector[3] * 0.5 stddevY = vector[4] * 0.5 correlation = vector[5] / abs(vector[0]) # https://cookierobotics.com/007/ a = stddevX * stddevX # VarianceX c = stddevY * stddevY # VarianceY b = correlation * stddevX * stddevY # Covariance delta = (((a - c) * 0.5) ** 2 + b * b) ** 0.5 lambda1 = (a + c) * 0.5 + delta # Major eigenvalue lambda2 = (a + c) * 0.5 - delta # Minor eigenvalue theta = atan2(lambda1 - a, b) if b != 0 else (pi * 0.5 if a < c else 0) trans = Transform() # Don't translate here. We are working on the complex-vector # that includes more than just the points. It's horrible what # we are doing anyway... # trans = trans.translate(meanX, meanY) trans = trans.rotate(theta) trans = trans.scale(sqrt(lambda1), sqrt(lambda2)) transforms.append(trans) trans = transforms[0] new_c0 = ( [complex(*trans.transformPoint((pt.real, pt.imag))) for pt in c0[0]], ) + c0[1:] trans = transforms[1] new_contour1 = [] for c1 in contour1: new_c1 = ( [ complex(*trans.transformPoint((pt.real, pt.imag))) for pt in c1[0] ], ) + c1[1:] new_contour1.append(new_c1) # Next few lines duplicate from above. costs = [ vdiff_hypot2_complex(new_c0[0], new_c1[0]) for new_c1 in new_contour1 ] min_cost_idx, min_cost = min(enumerate(costs), key=lambda x: x[1]) first_cost = costs[0] if min_cost < first_cost * tolerance: # Don't report this # min_cost = first_cost # reverse = False # proposed_point = 0 # new_contour1[min_cost_idx][1] pass this_tolerance = min_cost / first_cost if first_cost else 1 log.debug( "test-starting-point: tolerance %g", this_tolerance, ) return this_tolerance, proposed_point, reverse PKaZZZ.��ii&fontTools/varLib/interpolate_layout.py""" Interpolate OpenType Layout tables (GDEF / GPOS / GSUB). """ from fontTools.ttLib import TTFont from fontTools.varLib import models, VarLibError, load_designspace, load_masters from fontTools.varLib.merger import InstancerMerger import os.path import logging from copy import deepcopy from pprint import pformat log = logging.getLogger("fontTools.varLib.interpolate_layout") def interpolate_layout(designspace, loc, master_finder=lambda s: s, mapped=False): """ Interpolate GPOS from a designspace file and location. If master_finder is set, it should be a callable that takes master filename as found in designspace file and map it to master font binary as to be opened (eg. .ttf or .otf). If mapped is False (default), then location is mapped using the map element of the axes in designspace file. If mapped is True, it is assumed that location is in designspace's internal space and no mapping is performed. """ if hasattr(designspace, "sources"): # Assume a DesignspaceDocument pass else: # Assume a file path from fontTools.designspaceLib import DesignSpaceDocument designspace = DesignSpaceDocument.fromfile(designspace) ds = load_designspace(designspace) log.info("Building interpolated font") log.info("Loading master fonts") master_fonts = load_masters(designspace, master_finder) font = deepcopy(master_fonts[ds.base_idx]) log.info("Location: %s", pformat(loc)) if not mapped: loc = {name: ds.axes[name].map_forward(v) for name, v in loc.items()} log.info("Internal location: %s", pformat(loc)) loc = models.normalizeLocation(loc, ds.internal_axis_supports) log.info("Normalized location: %s", pformat(loc)) # Assume single-model for now. model = models.VariationModel(ds.normalized_master_locs) assert 0 == model.mapping[ds.base_idx] merger = InstancerMerger(font, model, loc) log.info("Building interpolated tables") # TODO GSUB/GDEF merger.mergeTables(font, master_fonts, ["GPOS"]) return font def main(args=None): """Interpolate GDEF/GPOS/GSUB tables for a point on a designspace""" from fontTools import configLogger import argparse import sys parser = argparse.ArgumentParser( "fonttools varLib.interpolate_layout", description=main.__doc__, ) parser.add_argument( "designspace_filename", metavar="DESIGNSPACE", help="Input TTF files" ) parser.add_argument( "locations", metavar="LOCATION", type=str, nargs="+", help="Axis locations (e.g. wdth=120", ) parser.add_argument( "-o", "--output", metavar="OUTPUT", help="Output font file (defaults to <designspacename>-instance.ttf)", ) parser.add_argument( "-l", "--loglevel", metavar="LEVEL", default="INFO", help="Logging level (defaults to INFO)", ) args = parser.parse_args(args) if not args.output: args.output = os.path.splitext(args.designspace_filename)[0] + "-instance.ttf" configLogger(level=args.loglevel) finder = lambda s: s.replace("master_ufo", "master_ttf_interpolatable").replace( ".ufo", ".ttf" ) loc = {} for arg in args.locations: tag, val = arg.split("=") loc[tag] = float(val) font = interpolate_layout(args.designspace_filename, loc, finder) log.info("Saving font %s", args.output) font.save(args.output) if __name__ == "__main__": import sys if len(sys.argv) > 1: sys.exit(main()) import doctest sys.exit(doctest.testmod().failed) PKaZZZ���S9S9fontTools/varLib/iup.pytry: import cython COMPILED = cython.compiled except (AttributeError, ImportError): # if cython not installed, use mock module with no-op decorators and types from fontTools.misc import cython COMPILED = False from typing import ( Sequence, Tuple, Union, ) from numbers import Integral, Real _Point = Tuple[Real, Real] _Delta = Tuple[Real, Real] _PointSegment = Sequence[_Point] _DeltaSegment = Sequence[_Delta] _DeltaOrNone = Union[_Delta, None] _DeltaOrNoneSegment = Sequence[_DeltaOrNone] _Endpoints = Sequence[Integral] MAX_LOOKBACK = 8 @cython.cfunc @cython.locals( j=cython.int, n=cython.int, x1=cython.double, x2=cython.double, d1=cython.double, d2=cython.double, scale=cython.double, x=cython.double, d=cython.double, ) def iup_segment( coords: _PointSegment, rc1: _Point, rd1: _Delta, rc2: _Point, rd2: _Delta ): # -> _DeltaSegment: """Given two reference coordinates `rc1` & `rc2` and their respective delta vectors `rd1` & `rd2`, returns interpolated deltas for the set of coordinates `coords`.""" # rc1 = reference coord 1 # rd1 = reference delta 1 out_arrays = [None, None] for j in 0, 1: out_arrays[j] = out = [] x1, x2, d1, d2 = rc1[j], rc2[j], rd1[j], rd2[j] if x1 == x2: n = len(coords) if d1 == d2: out.extend([d1] * n) else: out.extend([0] * n) continue if x1 > x2: x1, x2 = x2, x1 d1, d2 = d2, d1 # x1 < x2 scale = (d2 - d1) / (x2 - x1) for pair in coords: x = pair[j] if x <= x1: d = d1 elif x >= x2: d = d2 else: # Interpolate d = d1 + (x - x1) * scale out.append(d) return zip(*out_arrays) def iup_contour(deltas: _DeltaOrNoneSegment, coords: _PointSegment) -> _DeltaSegment: """For the contour given in `coords`, interpolate any missing delta values in delta vector `deltas`. Returns fully filled-out delta vector.""" assert len(deltas) == len(coords) if None not in deltas: return deltas n = len(deltas) # indices of points with explicit deltas indices = [i for i, v in enumerate(deltas) if v is not None] if not indices: # All deltas are None. Return 0,0 for all. return [(0, 0)] * n out = [] it = iter(indices) start = next(it) if start != 0: # Initial segment that wraps around i1, i2, ri1, ri2 = 0, start, start, indices[-1] out.extend( iup_segment( coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2] ) ) out.append(deltas[start]) for end in it: if end - start > 1: i1, i2, ri1, ri2 = start + 1, end, start, end out.extend( iup_segment( coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2] ) ) out.append(deltas[end]) start = end if start != n - 1: # Final segment that wraps around i1, i2, ri1, ri2 = start + 1, n, start, indices[0] out.extend( iup_segment( coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2] ) ) assert len(deltas) == len(out), (len(deltas), len(out)) return out def iup_delta( deltas: _DeltaOrNoneSegment, coords: _PointSegment, ends: _Endpoints ) -> _DeltaSegment: """For the outline given in `coords`, with contour endpoints given in sorted increasing order in `ends`, interpolate any missing delta values in delta vector `deltas`. Returns fully filled-out delta vector.""" assert sorted(ends) == ends and len(coords) == (ends[-1] + 1 if ends else 0) + 4 n = len(coords) ends = ends + [n - 4, n - 3, n - 2, n - 1] out = [] start = 0 for end in ends: end += 1 contour = iup_contour(deltas[start:end], coords[start:end]) out.extend(contour) start = end return out # Optimizer @cython.cfunc @cython.inline @cython.locals( i=cython.int, j=cython.int, # tolerance=cython.double, # https://github.com/fonttools/fonttools/issues/3282 x=cython.double, y=cython.double, p=cython.double, q=cython.double, ) @cython.returns(int) def can_iup_in_between( deltas: _DeltaSegment, coords: _PointSegment, i: Integral, j: Integral, tolerance: Real, ): # -> bool: """Return true if the deltas for points at `i` and `j` (`i < j`) can be successfully used to interpolate deltas for points in between them within provided error tolerance.""" assert j - i >= 2 interp = iup_segment(coords[i + 1 : j], coords[i], deltas[i], coords[j], deltas[j]) deltas = deltas[i + 1 : j] return all( abs(complex(x - p, y - q)) <= tolerance for (x, y), (p, q) in zip(deltas, interp) ) @cython.locals( cj=cython.double, dj=cython.double, lcj=cython.double, ldj=cython.double, ncj=cython.double, ndj=cython.double, force=cython.int, forced=set, ) def _iup_contour_bound_forced_set( deltas: _DeltaSegment, coords: _PointSegment, tolerance: Real = 0 ) -> set: """The forced set is a conservative set of points on the contour that must be encoded explicitly (ie. cannot be interpolated). Calculating this set allows for significantly speeding up the dynamic-programming, as well as resolve circularity in DP. The set is precise; that is, if an index is in the returned set, then there is no way that IUP can generate delta for that point, given `coords` and `deltas`. """ assert len(deltas) == len(coords) n = len(deltas) forced = set() # Track "last" and "next" points on the contour as we sweep. for i in range(len(deltas) - 1, -1, -1): ld, lc = deltas[i - 1], coords[i - 1] d, c = deltas[i], coords[i] nd, nc = deltas[i - n + 1], coords[i - n + 1] for j in (0, 1): # For X and for Y cj = c[j] dj = d[j] lcj = lc[j] ldj = ld[j] ncj = nc[j] ndj = nd[j] if lcj <= ncj: c1, c2 = lcj, ncj d1, d2 = ldj, ndj else: c1, c2 = ncj, lcj d1, d2 = ndj, ldj force = False # If the two coordinates are the same, then the interpolation # algorithm produces the same delta if both deltas are equal, # and zero if they differ. # # This test has to be before the next one. if c1 == c2: if abs(d1 - d2) > tolerance and abs(dj) > tolerance: force = True # If coordinate for current point is between coordinate of adjacent # points on the two sides, but the delta for current point is NOT # between delta for those adjacent points (considering tolerance # allowance), then there is no way that current point can be IUP-ed. # Mark it forced. elif c1 <= cj <= c2: # and c1 != c2 if not (min(d1, d2) - tolerance <= dj <= max(d1, d2) + tolerance): force = True # Otherwise, the delta should either match the closest, or have the # same sign as the interpolation of the two deltas. else: # cj < c1 or c2 < cj if d1 != d2: if cj < c1: if ( abs(dj) > tolerance and abs(dj - d1) > tolerance and ((dj - tolerance < d1) != (d1 < d2)) ): force = True else: # c2 < cj if ( abs(dj) > tolerance and abs(dj - d2) > tolerance and ((d2 < dj + tolerance) != (d1 < d2)) ): force = True if force: forced.add(i) break return forced @cython.locals( i=cython.int, j=cython.int, best_cost=cython.double, best_j=cython.int, cost=cython.double, forced=set, tolerance=cython.double, ) def _iup_contour_optimize_dp( deltas: _DeltaSegment, coords: _PointSegment, forced=set(), tolerance: Real = 0, lookback: Integral = None, ): """Straightforward Dynamic-Programming. For each index i, find least-costly encoding of points 0 to i where i is explicitly encoded. We find this by considering all previous explicit points j and check whether interpolation can fill points between j and i. Note that solution always encodes last point explicitly. Higher-level is responsible for removing that restriction. As major speedup, we stop looking further whenever we see a "forced" point.""" n = len(deltas) if lookback is None: lookback = n lookback = min(lookback, MAX_LOOKBACK) costs = {-1: 0} chain = {-1: None} for i in range(0, n): best_cost = costs[i - 1] + 1 costs[i] = best_cost chain[i] = i - 1 if i - 1 in forced: continue for j in range(i - 2, max(i - lookback, -2), -1): cost = costs[j] + 1 if cost < best_cost and can_iup_in_between(deltas, coords, j, i, tolerance): costs[i] = best_cost = cost chain[i] = j if j in forced: break return chain, costs def _rot_list(l: list, k: int): """Rotate list by k items forward. Ie. item at position 0 will be at position k in returned list. Negative k is allowed.""" n = len(l) k %= n if not k: return l return l[n - k :] + l[: n - k] def _rot_set(s: set, k: int, n: int): k %= n if not k: return s return {(v + k) % n for v in s} def iup_contour_optimize( deltas: _DeltaSegment, coords: _PointSegment, tolerance: Real = 0.0 ) -> _DeltaOrNoneSegment: """For contour with coordinates `coords`, optimize a set of delta values `deltas` within error `tolerance`. Returns delta vector that has most number of None items instead of the input delta. """ n = len(deltas) # Get the easy cases out of the way: # If all are within tolerance distance of 0, encode nothing: if all(abs(complex(*p)) <= tolerance for p in deltas): return [None] * n # If there's exactly one point, return it: if n == 1: return deltas # If all deltas are exactly the same, return just one (the first one): d0 = deltas[0] if all(d0 == d for d in deltas): return [d0] + [None] * (n - 1) # Else, solve the general problem using Dynamic Programming. forced = _iup_contour_bound_forced_set(deltas, coords, tolerance) # The _iup_contour_optimize_dp() routine returns the optimal encoding # solution given the constraint that the last point is always encoded. # To remove this constraint, we use two different methods, depending on # whether forced set is non-empty or not: # Debugging: Make the next if always take the second branch and observe # if the font size changes (reduced); that would mean the forced-set # has members it should not have. if forced: # Forced set is non-empty: rotate the contour start point # such that the last point in the list is a forced point. k = (n - 1) - max(forced) assert k >= 0 deltas = _rot_list(deltas, k) coords = _rot_list(coords, k) forced = _rot_set(forced, k, n) # Debugging: Pass a set() instead of forced variable to the next call # to exercise forced-set computation for under-counting. chain, costs = _iup_contour_optimize_dp(deltas, coords, forced, tolerance) # Assemble solution. solution = set() i = n - 1 while i is not None: solution.add(i) i = chain[i] solution.remove(-1) # if not forced <= solution: # print("coord", coords) # print("deltas", deltas) # print("len", len(deltas)) assert forced <= solution, (forced, solution) deltas = [deltas[i] if i in solution else None for i in range(n)] deltas = _rot_list(deltas, -k) else: # Repeat the contour an extra time, solve the new case, then look for solutions of the # circular n-length problem in the solution for new linear case. I cannot prove that # this always produces the optimal solution... chain, costs = _iup_contour_optimize_dp( deltas + deltas, coords + coords, forced, tolerance, n ) best_sol, best_cost = None, n + 1 for start in range(n - 1, len(costs) - 1): # Assemble solution. solution = set() i = start while i > start - n: solution.add(i % n) i = chain[i] if i == start - n: cost = costs[start] - costs[start - n] if cost <= best_cost: best_sol, best_cost = solution, cost # if not forced <= best_sol: # print("coord", coords) # print("deltas", deltas) # print("len", len(deltas)) assert forced <= best_sol, (forced, best_sol) deltas = [deltas[i] if i in best_sol else None for i in range(n)] return deltas def iup_delta_optimize( deltas: _DeltaSegment, coords: _PointSegment, ends: _Endpoints, tolerance: Real = 0.0, ) -> _DeltaOrNoneSegment: """For the outline given in `coords`, with contour endpoints given in sorted increasing order in `ends`, optimize a set of delta values `deltas` within error `tolerance`. Returns delta vector that has most number of None items instead of the input delta. """ assert sorted(ends) == ends and len(coords) == (ends[-1] + 1 if ends else 0) + 4 n = len(coords) ends = ends + [n - 4, n - 3, n - 2, n - 1] out = [] start = 0 for end in ends: contour = iup_contour_optimize( deltas[start : end + 1], coords[start : end + 1], tolerance ) assert len(contour) == end - start + 1 out.extend(contour) start = end + 1 return out PKaZZZYW����fontTools/varLib/merger.py""" Merge OpenType Layout tables (GDEF / GPOS / GSUB). """ import os import copy import enum from operator import ior import logging from fontTools.colorLib.builder import MAX_PAINT_COLR_LAYER_COUNT, LayerReuseCache from fontTools.misc import classifyTools from fontTools.misc.roundTools import otRound from fontTools.misc.treeTools import build_n_ary_tree from fontTools.ttLib.tables import otTables as ot from fontTools.ttLib.tables import otBase as otBase from fontTools.ttLib.tables.otConverters import BaseFixedValue from fontTools.ttLib.tables.otTraverse import dfs_base_table from fontTools.ttLib.tables.DefaultTable import DefaultTable from fontTools.varLib import builder, models, varStore from fontTools.varLib.models import nonNone, allNone, allEqual, allEqualTo, subList from fontTools.varLib.varStore import VarStoreInstancer from functools import reduce from fontTools.otlLib.builder import buildSinglePos from fontTools.otlLib.optimize.gpos import ( _compression_level_from_env, compact_pair_pos, ) log = logging.getLogger("fontTools.varLib.merger") from .errors import ( ShouldBeConstant, FoundANone, MismatchedTypes, NotANone, LengthsDiffer, KeysDiffer, InconsistentGlyphOrder, InconsistentExtensions, InconsistentFormats, UnsupportedFormat, VarLibMergeError, ) class Merger(object): def __init__(self, font=None): self.font = font # mergeTables populates this from the parent's master ttfs self.ttfs = None @classmethod def merger(celf, clazzes, attrs=(None,)): assert celf != Merger, "Subclass Merger instead." if "mergers" not in celf.__dict__: celf.mergers = {} if type(clazzes) in (type, enum.EnumMeta): clazzes = (clazzes,) if type(attrs) == str: attrs = (attrs,) def wrapper(method): assert method.__name__ == "merge" done = [] for clazz in clazzes: if clazz in done: continue # Support multiple names of a clazz done.append(clazz) mergers = celf.mergers.setdefault(clazz, {}) for attr in attrs: assert attr not in mergers, ( "Oops, class '%s' has merge function for '%s' defined already." % (clazz.__name__, attr) ) mergers[attr] = method return None return wrapper @classmethod def mergersFor(celf, thing, _default={}): typ = type(thing) for celf in celf.mro(): mergers = getattr(celf, "mergers", None) if mergers is None: break m = celf.mergers.get(typ, None) if m is not None: return m return _default def mergeObjects(self, out, lst, exclude=()): if hasattr(out, "ensureDecompiled"): out.ensureDecompiled(recurse=False) for item in lst: if hasattr(item, "ensureDecompiled"): item.ensureDecompiled(recurse=False) keys = sorted(vars(out).keys()) if not all(keys == sorted(vars(v).keys()) for v in lst): raise KeysDiffer( self, expected=keys, got=[sorted(vars(v).keys()) for v in lst] ) mergers = self.mergersFor(out) defaultMerger = mergers.get("*", self.__class__.mergeThings) try: for key in keys: if key in exclude: continue value = getattr(out, key) values = [getattr(table, key) for table in lst] mergerFunc = mergers.get(key, defaultMerger) mergerFunc(self, value, values) except VarLibMergeError as e: e.stack.append("." + key) raise def mergeLists(self, out, lst): if not allEqualTo(out, lst, len): raise LengthsDiffer(self, expected=len(out), got=[len(x) for x in lst]) for i, (value, values) in enumerate(zip(out, zip(*lst))): try: self.mergeThings(value, values) except VarLibMergeError as e: e.stack.append("[%d]" % i) raise def mergeThings(self, out, lst): if not allEqualTo(out, lst, type): raise MismatchedTypes( self, expected=type(out).__name__, got=[type(x).__name__ for x in lst] ) mergerFunc = self.mergersFor(out).get(None, None) if mergerFunc is not None: mergerFunc(self, out, lst) elif isinstance(out, enum.Enum): # need to special-case Enums as have __dict__ but are not regular 'objects', # otherwise mergeObjects/mergeThings get trapped in a RecursionError if not allEqualTo(out, lst): raise ShouldBeConstant(self, expected=out, got=lst) elif hasattr(out, "__dict__"): self.mergeObjects(out, lst) elif isinstance(out, list): self.mergeLists(out, lst) else: if not allEqualTo(out, lst): raise ShouldBeConstant(self, expected=out, got=lst) def mergeTables(self, font, master_ttfs, tableTags): for tag in tableTags: if tag not in font: continue try: self.ttfs = master_ttfs self.mergeThings(font[tag], [m.get(tag) for m in master_ttfs]) except VarLibMergeError as e: e.stack.append(tag) raise # # Aligning merger # class AligningMerger(Merger): pass @AligningMerger.merger(ot.GDEF, "GlyphClassDef") def merge(merger, self, lst): if self is None: if not allNone(lst): raise NotANone(merger, expected=None, got=lst) return lst = [l.classDefs for l in lst] self.classDefs = {} # We only care about the .classDefs self = self.classDefs allKeys = set() allKeys.update(*[l.keys() for l in lst]) for k in allKeys: allValues = nonNone(l.get(k) for l in lst) if not allEqual(allValues): raise ShouldBeConstant( merger, expected=allValues[0], got=lst, stack=["." + k] ) if not allValues: self[k] = None else: self[k] = allValues[0] def _SinglePosUpgradeToFormat2(self): if self.Format == 2: return self ret = ot.SinglePos() ret.Format = 2 ret.Coverage = self.Coverage ret.ValueFormat = self.ValueFormat ret.Value = [self.Value for _ in ret.Coverage.glyphs] ret.ValueCount = len(ret.Value) return ret def _merge_GlyphOrders(font, lst, values_lst=None, default=None): """Takes font and list of glyph lists (must be sorted by glyph id), and returns two things: - Combined glyph list, - If values_lst is None, return input glyph lists, but padded with None when a glyph was missing in a list. Otherwise, return values_lst list-of-list, padded with None to match combined glyph lists. """ if values_lst is None: dict_sets = [set(l) for l in lst] else: dict_sets = [{g: v for g, v in zip(l, vs)} for l, vs in zip(lst, values_lst)] combined = set() combined.update(*dict_sets) sortKey = font.getReverseGlyphMap().__getitem__ order = sorted(combined, key=sortKey) # Make sure all input glyphsets were in proper order if not all(sorted(vs, key=sortKey) == vs for vs in lst): raise InconsistentGlyphOrder() del combined paddedValues = None if values_lst is None: padded = [ [glyph if glyph in dict_set else default for glyph in order] for dict_set in dict_sets ] else: assert len(lst) == len(values_lst) padded = [ [dict_set[glyph] if glyph in dict_set else default for glyph in order] for dict_set in dict_sets ] return order, padded @AligningMerger.merger(otBase.ValueRecord) def merge(merger, self, lst): # Code below sometimes calls us with self being # a new object. Copy it from lst and recurse. self.__dict__ = lst[0].__dict__.copy() merger.mergeObjects(self, lst) @AligningMerger.merger(ot.Anchor) def merge(merger, self, lst): # Code below sometimes calls us with self being # a new object. Copy it from lst and recurse. self.__dict__ = lst[0].__dict__.copy() merger.mergeObjects(self, lst) def _Lookup_SinglePos_get_effective_value(merger, subtables, glyph): for self in subtables: if ( self is None or type(self) != ot.SinglePos or self.Coverage is None or glyph not in self.Coverage.glyphs ): continue if self.Format == 1: return self.Value elif self.Format == 2: return self.Value[self.Coverage.glyphs.index(glyph)] else: raise UnsupportedFormat(merger, subtable="single positioning lookup") return None def _Lookup_PairPos_get_effective_value_pair( merger, subtables, firstGlyph, secondGlyph ): for self in subtables: if ( self is None or type(self) != ot.PairPos or self.Coverage is None or firstGlyph not in self.Coverage.glyphs ): continue if self.Format == 1: ps = self.PairSet[self.Coverage.glyphs.index(firstGlyph)] pvr = ps.PairValueRecord for rec in pvr: # TODO Speed up if rec.SecondGlyph == secondGlyph: return rec continue elif self.Format == 2: klass1 = self.ClassDef1.classDefs.get(firstGlyph, 0) klass2 = self.ClassDef2.classDefs.get(secondGlyph, 0) return self.Class1Record[klass1].Class2Record[klass2] else: raise UnsupportedFormat(merger, subtable="pair positioning lookup") return None @AligningMerger.merger(ot.SinglePos) def merge(merger, self, lst): self.ValueFormat = valueFormat = reduce(int.__or__, [l.ValueFormat for l in lst], 0) if not (len(lst) == 1 or (valueFormat & ~0xF == 0)): raise UnsupportedFormat(merger, subtable="single positioning lookup") # If all have same coverage table and all are format 1, coverageGlyphs = self.Coverage.glyphs if all(v.Format == 1 for v in lst) and all( coverageGlyphs == v.Coverage.glyphs for v in lst ): self.Value = otBase.ValueRecord(valueFormat, self.Value) if valueFormat != 0: # If v.Value is None, it means a kerning of 0; we want # it to participate in the model still. # https://github.com/fonttools/fonttools/issues/3111 merger.mergeThings( self.Value, [v.Value if v.Value is not None else otBase.ValueRecord() for v in lst], ) self.ValueFormat = self.Value.getFormat() return # Upgrade everything to Format=2 self.Format = 2 lst = [_SinglePosUpgradeToFormat2(v) for v in lst] # Align them glyphs, padded = _merge_GlyphOrders( merger.font, [v.Coverage.glyphs for v in lst], [v.Value for v in lst] ) self.Coverage.glyphs = glyphs self.Value = [otBase.ValueRecord(valueFormat) for _ in glyphs] self.ValueCount = len(self.Value) for i, values in enumerate(padded): for j, glyph in enumerate(glyphs): if values[j] is not None: continue # Fill in value from other subtables # Note!!! This *might* result in behavior change if ValueFormat2-zeroedness # is different between used subtable and current subtable! # TODO(behdad) Check and warn if that happens? v = _Lookup_SinglePos_get_effective_value( merger, merger.lookup_subtables[i], glyph ) if v is None: v = otBase.ValueRecord(valueFormat) values[j] = v merger.mergeLists(self.Value, padded) # Merge everything else; though, there shouldn't be anything else. :) merger.mergeObjects( self, lst, exclude=("Format", "Coverage", "Value", "ValueCount", "ValueFormat") ) self.ValueFormat = reduce( int.__or__, [v.getEffectiveFormat() for v in self.Value], 0 ) @AligningMerger.merger(ot.PairSet) def merge(merger, self, lst): # Align them glyphs, padded = _merge_GlyphOrders( merger.font, [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst], [vs.PairValueRecord for vs in lst], ) self.PairValueRecord = pvrs = [] for glyph in glyphs: pvr = ot.PairValueRecord() pvr.SecondGlyph = glyph pvr.Value1 = ( otBase.ValueRecord(merger.valueFormat1) if merger.valueFormat1 else None ) pvr.Value2 = ( otBase.ValueRecord(merger.valueFormat2) if merger.valueFormat2 else None ) pvrs.append(pvr) self.PairValueCount = len(self.PairValueRecord) for i, values in enumerate(padded): for j, glyph in enumerate(glyphs): # Fill in value from other subtables v = ot.PairValueRecord() v.SecondGlyph = glyph if values[j] is not None: vpair = values[j] else: vpair = _Lookup_PairPos_get_effective_value_pair( merger, merger.lookup_subtables[i], self._firstGlyph, glyph ) if vpair is None: v1, v2 = None, None else: v1 = getattr(vpair, "Value1", None) v2 = getattr(vpair, "Value2", None) v.Value1 = ( otBase.ValueRecord(merger.valueFormat1, src=v1) if merger.valueFormat1 else None ) v.Value2 = ( otBase.ValueRecord(merger.valueFormat2, src=v2) if merger.valueFormat2 else None ) values[j] = v del self._firstGlyph merger.mergeLists(self.PairValueRecord, padded) def _PairPosFormat1_merge(self, lst, merger): assert allEqual( [l.ValueFormat2 == 0 for l in lst if l.PairSet] ), "Report bug against fonttools." # Merge everything else; makes sure Format is the same. merger.mergeObjects( self, lst, exclude=("Coverage", "PairSet", "PairSetCount", "ValueFormat1", "ValueFormat2"), ) empty = ot.PairSet() empty.PairValueRecord = [] empty.PairValueCount = 0 # Align them glyphs, padded = _merge_GlyphOrders( merger.font, [v.Coverage.glyphs for v in lst], [v.PairSet for v in lst], default=empty, ) self.Coverage.glyphs = glyphs self.PairSet = [ot.PairSet() for _ in glyphs] self.PairSetCount = len(self.PairSet) for glyph, ps in zip(glyphs, self.PairSet): ps._firstGlyph = glyph merger.mergeLists(self.PairSet, padded) def _ClassDef_invert(self, allGlyphs=None): if isinstance(self, dict): classDefs = self else: classDefs = self.classDefs if self and self.classDefs else {} m = max(classDefs.values()) if classDefs else 0 ret = [] for _ in range(m + 1): ret.append(set()) for k, v in classDefs.items(): ret[v].add(k) # Class-0 is special. It's "everything else". if allGlyphs is None: ret[0] = None else: # Limit all classes to glyphs in allGlyphs. # Collect anything without a non-zero class into class=zero. ret[0] = class0 = set(allGlyphs) for s in ret[1:]: s.intersection_update(class0) class0.difference_update(s) return ret def _ClassDef_merge_classify(lst, allGlyphses=None): self = ot.ClassDef() self.classDefs = classDefs = {} allGlyphsesWasNone = allGlyphses is None if allGlyphsesWasNone: allGlyphses = [None] * len(lst) classifier = classifyTools.Classifier() for classDef, allGlyphs in zip(lst, allGlyphses): sets = _ClassDef_invert(classDef, allGlyphs) if allGlyphs is None: sets = sets[1:] classifier.update(sets) classes = classifier.getClasses() if allGlyphsesWasNone: classes.insert(0, set()) for i, classSet in enumerate(classes): if i == 0: continue for g in classSet: classDefs[g] = i return self, classes def _PairPosFormat2_align_matrices(self, lst, font, transparent=False): matrices = [l.Class1Record for l in lst] # Align first classes self.ClassDef1, classes = _ClassDef_merge_classify( [l.ClassDef1 for l in lst], [l.Coverage.glyphs for l in lst] ) self.Class1Count = len(classes) new_matrices = [] for l, matrix in zip(lst, matrices): nullRow = None coverage = set(l.Coverage.glyphs) classDef1 = l.ClassDef1.classDefs class1Records = [] for classSet in classes: exemplarGlyph = next(iter(classSet)) if exemplarGlyph not in coverage: # Follow-up to e6125b353e1f54a0280ded5434b8e40d042de69f, # Fixes https://github.com/googlei18n/fontmake/issues/470 # Again, revert 8d441779e5afc664960d848f62c7acdbfc71d7b9 # when merger becomes selfless. nullRow = None if nullRow is None: nullRow = ot.Class1Record() class2records = nullRow.Class2Record = [] # TODO: When merger becomes selfless, revert e6125b353e1f54a0280ded5434b8e40d042de69f for _ in range(l.Class2Count): if transparent: rec2 = None else: rec2 = ot.Class2Record() rec2.Value1 = ( otBase.ValueRecord(self.ValueFormat1) if self.ValueFormat1 else None ) rec2.Value2 = ( otBase.ValueRecord(self.ValueFormat2) if self.ValueFormat2 else None ) class2records.append(rec2) rec1 = nullRow else: klass = classDef1.get(exemplarGlyph, 0) rec1 = matrix[klass] # TODO handle out-of-range? class1Records.append(rec1) new_matrices.append(class1Records) matrices = new_matrices del new_matrices # Align second classes self.ClassDef2, classes = _ClassDef_merge_classify([l.ClassDef2 for l in lst]) self.Class2Count = len(classes) new_matrices = [] for l, matrix in zip(lst, matrices): classDef2 = l.ClassDef2.classDefs class1Records = [] for rec1old in matrix: oldClass2Records = rec1old.Class2Record rec1new = ot.Class1Record() class2Records = rec1new.Class2Record = [] for classSet in classes: if not classSet: # class=0 rec2 = oldClass2Records[0] else: exemplarGlyph = next(iter(classSet)) klass = classDef2.get(exemplarGlyph, 0) rec2 = oldClass2Records[klass] class2Records.append(copy.deepcopy(rec2)) class1Records.append(rec1new) new_matrices.append(class1Records) matrices = new_matrices del new_matrices return matrices def _PairPosFormat2_merge(self, lst, merger): assert allEqual( [l.ValueFormat2 == 0 for l in lst if l.Class1Record] ), "Report bug against fonttools." merger.mergeObjects( self, lst, exclude=( "Coverage", "ClassDef1", "Class1Count", "ClassDef2", "Class2Count", "Class1Record", "ValueFormat1", "ValueFormat2", ), ) # Align coverages glyphs, _ = _merge_GlyphOrders(merger.font, [v.Coverage.glyphs for v in lst]) self.Coverage.glyphs = glyphs # Currently, if the coverage of PairPosFormat2 subtables are different, # we do NOT bother walking down the subtable list when filling in new # rows for alignment. As such, this is only correct if current subtable # is the last subtable in the lookup. Ensure that. # # Note that our canonicalization process merges trailing PairPosFormat2's, # so in reality this is rare. for l, subtables in zip(lst, merger.lookup_subtables): if l.Coverage.glyphs != glyphs: assert l == subtables[-1] matrices = _PairPosFormat2_align_matrices(self, lst, merger.font) self.Class1Record = list(matrices[0]) # TODO move merger to be selfless merger.mergeLists(self.Class1Record, matrices) @AligningMerger.merger(ot.PairPos) def merge(merger, self, lst): merger.valueFormat1 = self.ValueFormat1 = reduce( int.__or__, [l.ValueFormat1 for l in lst], 0 ) merger.valueFormat2 = self.ValueFormat2 = reduce( int.__or__, [l.ValueFormat2 for l in lst], 0 ) if self.Format == 1: _PairPosFormat1_merge(self, lst, merger) elif self.Format == 2: _PairPosFormat2_merge(self, lst, merger) else: raise UnsupportedFormat(merger, subtable="pair positioning lookup") del merger.valueFormat1, merger.valueFormat2 # Now examine the list of value records, and update to the union of format values, # as merge might have created new values. vf1 = 0 vf2 = 0 if self.Format == 1: for pairSet in self.PairSet: for pairValueRecord in pairSet.PairValueRecord: pv1 = getattr(pairValueRecord, "Value1", None) if pv1 is not None: vf1 |= pv1.getFormat() pv2 = getattr(pairValueRecord, "Value2", None) if pv2 is not None: vf2 |= pv2.getFormat() elif self.Format == 2: for class1Record in self.Class1Record: for class2Record in class1Record.Class2Record: pv1 = getattr(class2Record, "Value1", None) if pv1 is not None: vf1 |= pv1.getFormat() pv2 = getattr(class2Record, "Value2", None) if pv2 is not None: vf2 |= pv2.getFormat() self.ValueFormat1 = vf1 self.ValueFormat2 = vf2 def _MarkBasePosFormat1_merge(self, lst, merger, Mark="Mark", Base="Base"): self.ClassCount = max(l.ClassCount for l in lst) MarkCoverageGlyphs, MarkRecords = _merge_GlyphOrders( merger.font, [getattr(l, Mark + "Coverage").glyphs for l in lst], [getattr(l, Mark + "Array").MarkRecord for l in lst], ) getattr(self, Mark + "Coverage").glyphs = MarkCoverageGlyphs BaseCoverageGlyphs, BaseRecords = _merge_GlyphOrders( merger.font, [getattr(l, Base + "Coverage").glyphs for l in lst], [getattr(getattr(l, Base + "Array"), Base + "Record") for l in lst], ) getattr(self, Base + "Coverage").glyphs = BaseCoverageGlyphs # MarkArray records = [] for g, glyphRecords in zip(MarkCoverageGlyphs, zip(*MarkRecords)): allClasses = [r.Class for r in glyphRecords if r is not None] # TODO Right now we require that all marks have same class in # all masters that cover them. This is not required. # # We can relax that by just requiring that all marks that have # the same class in a master, have the same class in every other # master. Indeed, if, say, a sparse master only covers one mark, # that mark probably will get class 0, which would possibly be # different from its class in other masters. # # We can even go further and reclassify marks to support any # input. But, since, it's unlikely that two marks being both, # say, "top" in one master, and one being "top" and other being # "top-right" in another master, we shouldn't do that, as any # failures in that case will probably signify mistakes in the # input masters. if not allEqual(allClasses): raise ShouldBeConstant(merger, expected=allClasses[0], got=allClasses) else: rec = ot.MarkRecord() rec.Class = allClasses[0] allAnchors = [None if r is None else r.MarkAnchor for r in glyphRecords] if allNone(allAnchors): anchor = None else: anchor = ot.Anchor() anchor.Format = 1 merger.mergeThings(anchor, allAnchors) rec.MarkAnchor = anchor records.append(rec) array = ot.MarkArray() array.MarkRecord = records array.MarkCount = len(records) setattr(self, Mark + "Array", array) # BaseArray records = [] for g, glyphRecords in zip(BaseCoverageGlyphs, zip(*BaseRecords)): if allNone(glyphRecords): rec = None else: rec = getattr(ot, Base + "Record")() anchors = [] setattr(rec, Base + "Anchor", anchors) glyphAnchors = [ [] if r is None else getattr(r, Base + "Anchor") for r in glyphRecords ] for l in glyphAnchors: l.extend([None] * (self.ClassCount - len(l))) for allAnchors in zip(*glyphAnchors): if allNone(allAnchors): anchor = None else: anchor = ot.Anchor() anchor.Format = 1 merger.mergeThings(anchor, allAnchors) anchors.append(anchor) records.append(rec) array = getattr(ot, Base + "Array")() setattr(array, Base + "Record", records) setattr(array, Base + "Count", len(records)) setattr(self, Base + "Array", array) @AligningMerger.merger(ot.MarkBasePos) def merge(merger, self, lst): if not allEqualTo(self.Format, (l.Format for l in lst)): raise InconsistentFormats( merger, subtable="mark-to-base positioning lookup", expected=self.Format, got=[l.Format for l in lst], ) if self.Format == 1: _MarkBasePosFormat1_merge(self, lst, merger) else: raise UnsupportedFormat(merger, subtable="mark-to-base positioning lookup") @AligningMerger.merger(ot.MarkMarkPos) def merge(merger, self, lst): if not allEqualTo(self.Format, (l.Format for l in lst)): raise InconsistentFormats( merger, subtable="mark-to-mark positioning lookup", expected=self.Format, got=[l.Format for l in lst], ) if self.Format == 1: _MarkBasePosFormat1_merge(self, lst, merger, "Mark1", "Mark2") else: raise UnsupportedFormat(merger, subtable="mark-to-mark positioning lookup") def _PairSet_flatten(lst, font): self = ot.PairSet() self.Coverage = ot.Coverage() # Align them glyphs, padded = _merge_GlyphOrders( font, [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst], [vs.PairValueRecord for vs in lst], ) self.Coverage.glyphs = glyphs self.PairValueRecord = pvrs = [] for values in zip(*padded): for v in values: if v is not None: pvrs.append(v) break else: assert False self.PairValueCount = len(self.PairValueRecord) return self def _Lookup_PairPosFormat1_subtables_flatten(lst, font): assert allEqual( [l.ValueFormat2 == 0 for l in lst if l.PairSet] ), "Report bug against fonttools." self = ot.PairPos() self.Format = 1 self.Coverage = ot.Coverage() self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0) self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0) # Align them glyphs, padded = _merge_GlyphOrders( font, [v.Coverage.glyphs for v in lst], [v.PairSet for v in lst] ) self.Coverage.glyphs = glyphs self.PairSet = [ _PairSet_flatten([v for v in values if v is not None], font) for values in zip(*padded) ] self.PairSetCount = len(self.PairSet) return self def _Lookup_PairPosFormat2_subtables_flatten(lst, font): assert allEqual( [l.ValueFormat2 == 0 for l in lst if l.Class1Record] ), "Report bug against fonttools." self = ot.PairPos() self.Format = 2 self.Coverage = ot.Coverage() self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0) self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0) # Align them glyphs, _ = _merge_GlyphOrders(font, [v.Coverage.glyphs for v in lst]) self.Coverage.glyphs = glyphs matrices = _PairPosFormat2_align_matrices(self, lst, font, transparent=True) matrix = self.Class1Record = [] for rows in zip(*matrices): row = ot.Class1Record() matrix.append(row) row.Class2Record = [] row = row.Class2Record for cols in zip(*list(r.Class2Record for r in rows)): col = next(iter(c for c in cols if c is not None)) row.append(col) return self def _Lookup_PairPos_subtables_canonicalize(lst, font): """Merge multiple Format1 subtables at the beginning of lst, and merge multiple consecutive Format2 subtables that have the same Class2 (ie. were split because of offset overflows). Returns new list.""" lst = list(lst) l = len(lst) i = 0 while i < l and lst[i].Format == 1: i += 1 lst[:i] = [_Lookup_PairPosFormat1_subtables_flatten(lst[:i], font)] l = len(lst) i = l while i > 0 and lst[i - 1].Format == 2: i -= 1 lst[i:] = [_Lookup_PairPosFormat2_subtables_flatten(lst[i:], font)] return lst def _Lookup_SinglePos_subtables_flatten(lst, font, min_inclusive_rec_format): glyphs, _ = _merge_GlyphOrders(font, [v.Coverage.glyphs for v in lst], None) num_glyphs = len(glyphs) new = ot.SinglePos() new.Format = 2 new.ValueFormat = min_inclusive_rec_format new.Coverage = ot.Coverage() new.Coverage.glyphs = glyphs new.ValueCount = num_glyphs new.Value = [None] * num_glyphs for singlePos in lst: if singlePos.Format == 1: val_rec = singlePos.Value for gname in singlePos.Coverage.glyphs: i = glyphs.index(gname) new.Value[i] = copy.deepcopy(val_rec) elif singlePos.Format == 2: for j, gname in enumerate(singlePos.Coverage.glyphs): val_rec = singlePos.Value[j] i = glyphs.index(gname) new.Value[i] = copy.deepcopy(val_rec) return [new] @AligningMerger.merger(ot.CursivePos) def merge(merger, self, lst): # Align them glyphs, padded = _merge_GlyphOrders( merger.font, [l.Coverage.glyphs for l in lst], [l.EntryExitRecord for l in lst], ) self.Format = 1 self.Coverage = ot.Coverage() self.Coverage.glyphs = glyphs self.EntryExitRecord = [] for _ in glyphs: rec = ot.EntryExitRecord() rec.EntryAnchor = ot.Anchor() rec.EntryAnchor.Format = 1 rec.ExitAnchor = ot.Anchor() rec.ExitAnchor.Format = 1 self.EntryExitRecord.append(rec) merger.mergeLists(self.EntryExitRecord, padded) self.EntryExitCount = len(self.EntryExitRecord) @AligningMerger.merger(ot.EntryExitRecord) def merge(merger, self, lst): if all(master.EntryAnchor is None for master in lst): self.EntryAnchor = None if all(master.ExitAnchor is None for master in lst): self.ExitAnchor = None merger.mergeObjects(self, lst) @AligningMerger.merger(ot.Lookup) def merge(merger, self, lst): subtables = merger.lookup_subtables = [l.SubTable for l in lst] # Remove Extension subtables for l, sts in list(zip(lst, subtables)) + [(self, self.SubTable)]: if not sts: continue if sts[0].__class__.__name__.startswith("Extension"): if not allEqual([st.__class__ for st in sts]): raise InconsistentExtensions( merger, expected="Extension", got=[st.__class__.__name__ for st in sts], ) if not allEqual([st.ExtensionLookupType for st in sts]): raise InconsistentExtensions(merger) l.LookupType = sts[0].ExtensionLookupType new_sts = [st.ExtSubTable for st in sts] del sts[:] sts.extend(new_sts) isPairPos = self.SubTable and isinstance(self.SubTable[0], ot.PairPos) if isPairPos: # AFDKO and feaLib sometimes generate two Format1 subtables instead of one. # Merge those before continuing. # https://github.com/fonttools/fonttools/issues/719 self.SubTable = _Lookup_PairPos_subtables_canonicalize( self.SubTable, merger.font ) subtables = merger.lookup_subtables = [ _Lookup_PairPos_subtables_canonicalize(st, merger.font) for st in subtables ] else: isSinglePos = self.SubTable and isinstance(self.SubTable[0], ot.SinglePos) if isSinglePos: numSubtables = [len(st) for st in subtables] if not all([nums == numSubtables[0] for nums in numSubtables]): # Flatten list of SinglePos subtables to single Format 2 subtable, # with all value records set to the rec format type. # We use buildSinglePos() to optimize the lookup after merging. valueFormatList = [t.ValueFormat for st in subtables for t in st] # Find the minimum value record that can accomodate all the singlePos subtables. mirf = reduce(ior, valueFormatList) self.SubTable = _Lookup_SinglePos_subtables_flatten( self.SubTable, merger.font, mirf ) subtables = merger.lookup_subtables = [ _Lookup_SinglePos_subtables_flatten(st, merger.font, mirf) for st in subtables ] flattened = True else: flattened = False merger.mergeLists(self.SubTable, subtables) self.SubTableCount = len(self.SubTable) if isPairPos: # If format-1 subtable created during canonicalization is empty, remove it. assert len(self.SubTable) >= 1 and self.SubTable[0].Format == 1 if not self.SubTable[0].Coverage.glyphs: self.SubTable.pop(0) self.SubTableCount -= 1 # If format-2 subtable created during canonicalization is empty, remove it. assert len(self.SubTable) >= 1 and self.SubTable[-1].Format == 2 if not self.SubTable[-1].Coverage.glyphs: self.SubTable.pop(-1) self.SubTableCount -= 1 # Compact the merged subtables # This is a good moment to do it because the compaction should create # smaller subtables, which may prevent overflows from happening. # Keep reading the value from the ENV until ufo2ft switches to the config system level = merger.font.cfg.get( "fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL", default=_compression_level_from_env(), ) if level != 0: log.info("Compacting GPOS...") self.SubTable = compact_pair_pos(merger.font, level, self.SubTable) self.SubTableCount = len(self.SubTable) elif isSinglePos and flattened: singlePosTable = self.SubTable[0] glyphs = singlePosTable.Coverage.glyphs # We know that singlePosTable is Format 2, as this is set # in _Lookup_SinglePos_subtables_flatten. singlePosMapping = { gname: valRecord for gname, valRecord in zip(glyphs, singlePosTable.Value) } self.SubTable = buildSinglePos( singlePosMapping, merger.font.getReverseGlyphMap() ) merger.mergeObjects(self, lst, exclude=["SubTable", "SubTableCount"]) del merger.lookup_subtables # # InstancerMerger # class InstancerMerger(AligningMerger): """A merger that takes multiple master fonts, and instantiates an instance.""" def __init__(self, font, model, location): Merger.__init__(self, font) self.model = model self.location = location self.masterScalars = model.getMasterScalars(location) @InstancerMerger.merger(ot.CaretValue) def merge(merger, self, lst): assert self.Format == 1 Coords = [a.Coordinate for a in lst] model = merger.model masterScalars = merger.masterScalars self.Coordinate = otRound( model.interpolateFromValuesAndScalars(Coords, masterScalars) ) @InstancerMerger.merger(ot.Anchor) def merge(merger, self, lst): assert self.Format == 1 XCoords = [a.XCoordinate for a in lst] YCoords = [a.YCoordinate for a in lst] model = merger.model masterScalars = merger.masterScalars self.XCoordinate = otRound( model.interpolateFromValuesAndScalars(XCoords, masterScalars) ) self.YCoordinate = otRound( model.interpolateFromValuesAndScalars(YCoords, masterScalars) ) @InstancerMerger.merger(otBase.ValueRecord) def merge(merger, self, lst): model = merger.model masterScalars = merger.masterScalars # TODO Handle differing valueformats for name, tableName in [ ("XAdvance", "XAdvDevice"), ("YAdvance", "YAdvDevice"), ("XPlacement", "XPlaDevice"), ("YPlacement", "YPlaDevice"), ]: assert not hasattr(self, tableName) if hasattr(self, name): values = [getattr(a, name, 0) for a in lst] value = otRound( model.interpolateFromValuesAndScalars(values, masterScalars) ) setattr(self, name, value) # # MutatorMerger # class MutatorMerger(AligningMerger): """A merger that takes a variable font, and instantiates an instance. While there's no "merging" to be done per se, the operation can benefit from many operations that the aligning merger does.""" def __init__(self, font, instancer, deleteVariations=True): Merger.__init__(self, font) self.instancer = instancer self.deleteVariations = deleteVariations @MutatorMerger.merger(ot.CaretValue) def merge(merger, self, lst): # Hack till we become selfless. self.__dict__ = lst[0].__dict__.copy() if self.Format != 3: return instancer = merger.instancer dev = self.DeviceTable if merger.deleteVariations: del self.DeviceTable if dev: assert dev.DeltaFormat == 0x8000 varidx = (dev.StartSize << 16) + dev.EndSize delta = otRound(instancer[varidx]) self.Coordinate += delta if merger.deleteVariations: self.Format = 1 @MutatorMerger.merger(ot.Anchor) def merge(merger, self, lst): # Hack till we become selfless. self.__dict__ = lst[0].__dict__.copy() if self.Format != 3: return instancer = merger.instancer for v in "XY": tableName = v + "DeviceTable" if not hasattr(self, tableName): continue dev = getattr(self, tableName) if merger.deleteVariations: delattr(self, tableName) if dev is None: continue assert dev.DeltaFormat == 0x8000 varidx = (dev.StartSize << 16) + dev.EndSize delta = otRound(instancer[varidx]) attr = v + "Coordinate" setattr(self, attr, getattr(self, attr) + delta) if merger.deleteVariations: self.Format = 1 @MutatorMerger.merger(otBase.ValueRecord) def merge(merger, self, lst): # Hack till we become selfless. self.__dict__ = lst[0].__dict__.copy() instancer = merger.instancer for name, tableName in [ ("XAdvance", "XAdvDevice"), ("YAdvance", "YAdvDevice"), ("XPlacement", "XPlaDevice"), ("YPlacement", "YPlaDevice"), ]: if not hasattr(self, tableName): continue dev = getattr(self, tableName) if merger.deleteVariations: delattr(self, tableName) if dev is None: continue assert dev.DeltaFormat == 0x8000 varidx = (dev.StartSize << 16) + dev.EndSize delta = otRound(instancer[varidx]) setattr(self, name, getattr(self, name, 0) + delta) # # VariationMerger # class VariationMerger(AligningMerger): """A merger that takes multiple master fonts, and builds a variable font.""" def __init__(self, model, axisTags, font): Merger.__init__(self, font) self.store_builder = varStore.OnlineVarStoreBuilder(axisTags) self.setModel(model) def setModel(self, model): self.model = model self.store_builder.setModel(model) def mergeThings(self, out, lst): masterModel = None origTTFs = None if None in lst: if allNone(lst): if out is not None: raise FoundANone(self, got=lst) return # temporarily subset the list of master ttfs to the ones for which # master values are not None origTTFs = self.ttfs if self.ttfs: self.ttfs = subList([v is not None for v in lst], self.ttfs) masterModel = self.model model, lst = masterModel.getSubModel(lst) self.setModel(model) super(VariationMerger, self).mergeThings(out, lst) if masterModel: self.setModel(masterModel) if origTTFs: self.ttfs = origTTFs def buildVarDevTable(store_builder, master_values): if allEqual(master_values): return master_values[0], None base, varIdx = store_builder.storeMasters(master_values) return base, builder.buildVarDevTable(varIdx) @VariationMerger.merger(ot.BaseCoord) def merge(merger, self, lst): if self.Format != 1: raise UnsupportedFormat(merger, subtable="a baseline coordinate") self.Coordinate, DeviceTable = buildVarDevTable( merger.store_builder, [a.Coordinate for a in lst] ) if DeviceTable: self.Format = 3 self.DeviceTable = DeviceTable @VariationMerger.merger(ot.CaretValue) def merge(merger, self, lst): if self.Format != 1: raise UnsupportedFormat(merger, subtable="a caret") self.Coordinate, DeviceTable = buildVarDevTable( merger.store_builder, [a.Coordinate for a in lst] ) if DeviceTable: self.Format = 3 self.DeviceTable = DeviceTable @VariationMerger.merger(ot.Anchor) def merge(merger, self, lst): if self.Format != 1: raise UnsupportedFormat(merger, subtable="an anchor") self.XCoordinate, XDeviceTable = buildVarDevTable( merger.store_builder, [a.XCoordinate for a in lst] ) self.YCoordinate, YDeviceTable = buildVarDevTable( merger.store_builder, [a.YCoordinate for a in lst] ) if XDeviceTable or YDeviceTable: self.Format = 3 self.XDeviceTable = XDeviceTable self.YDeviceTable = YDeviceTable @VariationMerger.merger(otBase.ValueRecord) def merge(merger, self, lst): for name, tableName in [ ("XAdvance", "XAdvDevice"), ("YAdvance", "YAdvDevice"), ("XPlacement", "XPlaDevice"), ("YPlacement", "YPlaDevice"), ]: if hasattr(self, name): value, deviceTable = buildVarDevTable( merger.store_builder, [getattr(a, name, 0) for a in lst] ) setattr(self, name, value) if deviceTable: setattr(self, tableName, deviceTable) class COLRVariationMerger(VariationMerger): """A specialized VariationMerger that takes multiple master fonts containing COLRv1 tables, and builds a variable COLR font. COLR tables are special in that variable subtables can be associated with multiple delta-set indices (via VarIndexBase). They also contain tables that must change their type (not simply the Format) as they become variable (e.g. Affine2x3 -> VarAffine2x3) so this merger takes care of that too. """ def __init__(self, model, axisTags, font, allowLayerReuse=True): VariationMerger.__init__(self, model, axisTags, font) # maps {tuple(varIdxes): VarIndexBase} to facilitate reuse of VarIndexBase # between variable tables with same varIdxes. self.varIndexCache = {} # flat list of all the varIdxes generated while merging self.varIdxes = [] # set of id()s of the subtables that contain variations after merging # and need to be upgraded to the associated VarType. self.varTableIds = set() # we keep these around for rebuilding a LayerList while merging PaintColrLayers self.layers = [] self.layerReuseCache = None if allowLayerReuse: self.layerReuseCache = LayerReuseCache() # flag to ensure BaseGlyphList is fully merged before LayerList gets processed self._doneBaseGlyphs = False def mergeTables(self, font, master_ttfs, tableTags=("COLR",)): if "COLR" in tableTags and "COLR" in font: # The merger modifies the destination COLR table in-place. If this contains # multiple PaintColrLayers referencing the same layers from LayerList, it's # a problem because we may risk modifying the same paint more than once, or # worse, fail while attempting to do that. # We don't know whether the master COLR table was built with layer reuse # disabled, thus to be safe we rebuild its LayerList so that it contains only # unique layers referenced from non-overlapping PaintColrLayers throughout # the base paint graphs. self.expandPaintColrLayers(font["COLR"].table) VariationMerger.mergeTables(self, font, master_ttfs, tableTags) def checkFormatEnum(self, out, lst, validate=lambda _: True): fmt = out.Format formatEnum = out.formatEnum ok = False try: fmt = formatEnum(fmt) except ValueError: pass else: ok = validate(fmt) if not ok: raise UnsupportedFormat(self, subtable=type(out).__name__, value=fmt) expected = fmt got = [] for v in lst: fmt = getattr(v, "Format", None) try: fmt = formatEnum(fmt) except ValueError: pass got.append(fmt) if not allEqualTo(expected, got): raise InconsistentFormats( self, subtable=type(out).__name__, expected=expected, got=got, ) return expected def mergeSparseDict(self, out, lst): for k in out.keys(): try: self.mergeThings(out[k], [v.get(k) for v in lst]) except VarLibMergeError as e: e.stack.append(f"[{k!r}]") raise def mergeAttrs(self, out, lst, attrs): for attr in attrs: value = getattr(out, attr) values = [getattr(item, attr) for item in lst] try: self.mergeThings(value, values) except VarLibMergeError as e: e.stack.append(f".{attr}") raise def storeMastersForAttr(self, out, lst, attr): master_values = [getattr(item, attr) for item in lst] # VarStore treats deltas for fixed-size floats as integers, so we # must convert master values to int before storing them in the builder # then back to float. is_fixed_size_float = False conv = out.getConverterByName(attr) if isinstance(conv, BaseFixedValue): is_fixed_size_float = True master_values = [conv.toInt(v) for v in master_values] baseValue = master_values[0] varIdx = ot.NO_VARIATION_INDEX if not allEqual(master_values): baseValue, varIdx = self.store_builder.storeMasters(master_values) if is_fixed_size_float: baseValue = conv.fromInt(baseValue) return baseValue, varIdx def storeVariationIndices(self, varIdxes) -> int: # try to reuse an existing VarIndexBase for the same varIdxes, or else # create a new one key = tuple(varIdxes) varIndexBase = self.varIndexCache.get(key) if varIndexBase is None: # scan for a full match anywhere in the self.varIdxes for i in range(len(self.varIdxes) - len(varIdxes) + 1): if self.varIdxes[i : i + len(varIdxes)] == varIdxes: self.varIndexCache[key] = varIndexBase = i break if varIndexBase is None: # try find a partial match at the end of the self.varIdxes for n in range(len(varIdxes) - 1, 0, -1): if self.varIdxes[-n:] == varIdxes[:n]: varIndexBase = len(self.varIdxes) - n self.varIndexCache[key] = varIndexBase self.varIdxes.extend(varIdxes[n:]) break if varIndexBase is None: # no match found, append at the end self.varIndexCache[key] = varIndexBase = len(self.varIdxes) self.varIdxes.extend(varIdxes) return varIndexBase def mergeVariableAttrs(self, out, lst, attrs) -> int: varIndexBase = ot.NO_VARIATION_INDEX varIdxes = [] for attr in attrs: baseValue, varIdx = self.storeMastersForAttr(out, lst, attr) setattr(out, attr, baseValue) varIdxes.append(varIdx) if any(v != ot.NO_VARIATION_INDEX for v in varIdxes): varIndexBase = self.storeVariationIndices(varIdxes) return varIndexBase @classmethod def convertSubTablesToVarType(cls, table): for path in dfs_base_table( table, skip_root=True, predicate=lambda path: ( getattr(type(path[-1].value), "VarType", None) is not None ), ): st = path[-1] subTable = st.value varType = type(subTable).VarType newSubTable = varType() newSubTable.__dict__.update(subTable.__dict__) newSubTable.populateDefaults() parent = path[-2].value if st.index is not None: getattr(parent, st.name)[st.index] = newSubTable else: setattr(parent, st.name, newSubTable) @staticmethod def expandPaintColrLayers(colr): """Rebuild LayerList without PaintColrLayers reuse. Each base paint graph is fully DFS-traversed (with exception of PaintColrGlyph which are irrelevant for this); any layers referenced via PaintColrLayers are collected into a new LayerList and duplicated when reuse is detected, to ensure that all paints are distinct objects at the end of the process. PaintColrLayers's FirstLayerIndex/NumLayers are updated so that no overlap is left. Also, any consecutively nested PaintColrLayers are flattened. The COLR table's LayerList is replaced with the new unique layers. A side effect is also that any layer from the old LayerList which is not referenced by any PaintColrLayers is dropped. """ if not colr.LayerList: # if no LayerList, there's nothing to expand return uniqueLayerIDs = set() newLayerList = [] for rec in colr.BaseGlyphList.BaseGlyphPaintRecord: frontier = [rec.Paint] while frontier: paint = frontier.pop() if paint.Format == ot.PaintFormat.PaintColrGlyph: # don't traverse these, we treat them as constant for merging continue elif paint.Format == ot.PaintFormat.PaintColrLayers: # de-treeify any nested PaintColrLayers, append unique copies to # the new layer list and update PaintColrLayers index/count children = list(_flatten_layers(paint, colr)) first_layer_index = len(newLayerList) for layer in children: if id(layer) in uniqueLayerIDs: layer = copy.deepcopy(layer) assert id(layer) not in uniqueLayerIDs newLayerList.append(layer) uniqueLayerIDs.add(id(layer)) paint.FirstLayerIndex = first_layer_index paint.NumLayers = len(children) else: children = paint.getChildren(colr) frontier.extend(reversed(children)) # sanity check all the new layers are distinct objects assert len(newLayerList) == len(uniqueLayerIDs) colr.LayerList.Paint = newLayerList colr.LayerList.LayerCount = len(newLayerList) @COLRVariationMerger.merger(ot.BaseGlyphList) def merge(merger, self, lst): # ignore BaseGlyphCount, allow sparse glyph sets across masters out = {rec.BaseGlyph: rec for rec in self.BaseGlyphPaintRecord} masters = [{rec.BaseGlyph: rec for rec in m.BaseGlyphPaintRecord} for m in lst] for i, g in enumerate(out.keys()): try: # missing base glyphs don't participate in the merge merger.mergeThings(out[g], [v.get(g) for v in masters]) except VarLibMergeError as e: e.stack.append(f".BaseGlyphPaintRecord[{i}]") e.cause["location"] = f"base glyph {g!r}" raise merger._doneBaseGlyphs = True @COLRVariationMerger.merger(ot.LayerList) def merge(merger, self, lst): # nothing to merge for LayerList, assuming we have already merged all PaintColrLayers # found while traversing the paint graphs rooted at BaseGlyphPaintRecords. assert merger._doneBaseGlyphs, "BaseGlyphList must be merged before LayerList" # Simply flush the final list of layers and go home. self.LayerCount = len(merger.layers) self.Paint = merger.layers def _flatten_layers(root, colr): assert root.Format == ot.PaintFormat.PaintColrLayers for paint in root.getChildren(colr): if paint.Format == ot.PaintFormat.PaintColrLayers: yield from _flatten_layers(paint, colr) else: yield paint def _merge_PaintColrLayers(self, out, lst): # we only enforce that the (flat) number of layers is the same across all masters # but we allow FirstLayerIndex to differ to acommodate for sparse glyph sets. out_layers = list(_flatten_layers(out, self.font["COLR"].table)) # sanity check ttfs are subset to current values (see VariationMerger.mergeThings) # before matching each master PaintColrLayers to its respective COLR by position assert len(self.ttfs) == len(lst) master_layerses = [ list(_flatten_layers(lst[i], self.ttfs[i]["COLR"].table)) for i in range(len(lst)) ] try: self.mergeLists(out_layers, master_layerses) except VarLibMergeError as e: # NOTE: This attribute doesn't actually exist in PaintColrLayers but it's # handy to have it in the stack trace for debugging. e.stack.append(".Layers") raise # following block is very similar to LayerListBuilder._beforeBuildPaintColrLayers # but I couldn't find a nice way to share the code between the two... if self.layerReuseCache is not None: # successful reuse can make the list smaller out_layers = self.layerReuseCache.try_reuse(out_layers) # if the list is still too big we need to tree-fy it is_tree = len(out_layers) > MAX_PAINT_COLR_LAYER_COUNT out_layers = build_n_ary_tree(out_layers, n=MAX_PAINT_COLR_LAYER_COUNT) # We now have a tree of sequences with Paint leaves. # Convert the sequences into PaintColrLayers. def listToColrLayers(paint): if isinstance(paint, list): layers = [listToColrLayers(l) for l in paint] paint = ot.Paint() paint.Format = int(ot.PaintFormat.PaintColrLayers) paint.NumLayers = len(layers) paint.FirstLayerIndex = len(self.layers) self.layers.extend(layers) if self.layerReuseCache is not None: self.layerReuseCache.add(layers, paint.FirstLayerIndex) return paint out_layers = [listToColrLayers(l) for l in out_layers] if len(out_layers) == 1 and out_layers[0].Format == ot.PaintFormat.PaintColrLayers: # special case when the reuse cache finds a single perfect PaintColrLayers match # (it can only come from a successful reuse, _flatten_layers has gotten rid of # all nested PaintColrLayers already); we assign it directly and avoid creating # an extra table out.NumLayers = out_layers[0].NumLayers out.FirstLayerIndex = out_layers[0].FirstLayerIndex else: out.NumLayers = len(out_layers) out.FirstLayerIndex = len(self.layers) self.layers.extend(out_layers) # Register our parts for reuse provided we aren't a tree # If we are a tree the leaves registered for reuse and that will suffice if self.layerReuseCache is not None and not is_tree: self.layerReuseCache.add(out_layers, out.FirstLayerIndex) @COLRVariationMerger.merger((ot.Paint, ot.ClipBox)) def merge(merger, self, lst): fmt = merger.checkFormatEnum(self, lst, lambda fmt: not fmt.is_variable()) if fmt is ot.PaintFormat.PaintColrLayers: _merge_PaintColrLayers(merger, self, lst) return varFormat = fmt.as_variable() varAttrs = () if varFormat is not None: varAttrs = otBase.getVariableAttrs(type(self), varFormat) staticAttrs = (c.name for c in self.getConverters() if c.name not in varAttrs) merger.mergeAttrs(self, lst, staticAttrs) varIndexBase = merger.mergeVariableAttrs(self, lst, varAttrs) subTables = [st.value for st in self.iterSubTables()] # Convert table to variable if itself has variations or any subtables have isVariable = varIndexBase != ot.NO_VARIATION_INDEX or any( id(table) in merger.varTableIds for table in subTables ) if isVariable: if varAttrs: # Some PaintVar* don't have any scalar attributes that can vary, # only indirect offsets to other variable subtables, thus have # no VarIndexBase of their own (e.g. PaintVarTransform) self.VarIndexBase = varIndexBase if subTables: # Convert Affine2x3 -> VarAffine2x3, ColorLine -> VarColorLine, etc. merger.convertSubTablesToVarType(self) assert varFormat is not None self.Format = int(varFormat) @COLRVariationMerger.merger((ot.Affine2x3, ot.ColorStop)) def merge(merger, self, lst): varType = type(self).VarType varAttrs = otBase.getVariableAttrs(varType) staticAttrs = (c.name for c in self.getConverters() if c.name not in varAttrs) merger.mergeAttrs(self, lst, staticAttrs) varIndexBase = merger.mergeVariableAttrs(self, lst, varAttrs) if varIndexBase != ot.NO_VARIATION_INDEX: self.VarIndexBase = varIndexBase # mark as having variations so the parent table will convert to Var{Type} merger.varTableIds.add(id(self)) @COLRVariationMerger.merger(ot.ColorLine) def merge(merger, self, lst): merger.mergeAttrs(self, lst, (c.name for c in self.getConverters())) if any(id(stop) in merger.varTableIds for stop in self.ColorStop): merger.convertSubTablesToVarType(self) merger.varTableIds.add(id(self)) @COLRVariationMerger.merger(ot.ClipList, "clips") def merge(merger, self, lst): # 'sparse' in that we allow non-default masters to omit ClipBox entries # for some/all glyphs (i.e. they don't participate) merger.mergeSparseDict(self, lst) PKaZZZ���y�U�UfontTools/varLib/models.py"""Variation fonts interpolation models.""" __all__ = [ "normalizeValue", "normalizeLocation", "supportScalar", "piecewiseLinearMap", "VariationModel", ] from fontTools.misc.roundTools import noRound from .errors import VariationModelError def nonNone(lst): return [l for l in lst if l is not None] def allNone(lst): return all(l is None for l in lst) def allEqualTo(ref, lst, mapper=None): if mapper is None: return all(ref == item for item in lst) mapped = mapper(ref) return all(mapped == mapper(item) for item in lst) def allEqual(lst, mapper=None): if not lst: return True it = iter(lst) try: first = next(it) except StopIteration: return True return allEqualTo(first, it, mapper=mapper) def subList(truth, lst): assert len(truth) == len(lst) return [l for l, t in zip(lst, truth) if t] def normalizeValue(v, triple, extrapolate=False): """Normalizes value based on a min/default/max triple. >>> normalizeValue(400, (100, 400, 900)) 0.0 >>> normalizeValue(100, (100, 400, 900)) -1.0 >>> normalizeValue(650, (100, 400, 900)) 0.5 """ lower, default, upper = triple if not (lower <= default <= upper): raise ValueError( f"Invalid axis values, must be minimum, default, maximum: " f"{lower:3.3f}, {default:3.3f}, {upper:3.3f}" ) if not extrapolate: v = max(min(v, upper), lower) if v == default or lower == upper: return 0.0 if (v < default and lower != default) or (v > default and upper == default): return (v - default) / (default - lower) else: assert (v > default and upper != default) or ( v < default and lower == default ), f"Ooops... v={v}, triple=({lower}, {default}, {upper})" return (v - default) / (upper - default) def normalizeLocation(location, axes, extrapolate=False): """Normalizes location based on axis min/default/max values from axes. >>> axes = {"wght": (100, 400, 900)} >>> normalizeLocation({"wght": 400}, axes) {'wght': 0.0} >>> normalizeLocation({"wght": 100}, axes) {'wght': -1.0} >>> normalizeLocation({"wght": 900}, axes) {'wght': 1.0} >>> normalizeLocation({"wght": 650}, axes) {'wght': 0.5} >>> normalizeLocation({"wght": 1000}, axes) {'wght': 1.0} >>> normalizeLocation({"wght": 0}, axes) {'wght': -1.0} >>> axes = {"wght": (0, 0, 1000)} >>> normalizeLocation({"wght": 0}, axes) {'wght': 0.0} >>> normalizeLocation({"wght": -1}, axes) {'wght': 0.0} >>> normalizeLocation({"wght": 1000}, axes) {'wght': 1.0} >>> normalizeLocation({"wght": 500}, axes) {'wght': 0.5} >>> normalizeLocation({"wght": 1001}, axes) {'wght': 1.0} >>> axes = {"wght": (0, 1000, 1000)} >>> normalizeLocation({"wght": 0}, axes) {'wght': -1.0} >>> normalizeLocation({"wght": -1}, axes) {'wght': -1.0} >>> normalizeLocation({"wght": 500}, axes) {'wght': -0.5} >>> normalizeLocation({"wght": 1000}, axes) {'wght': 0.0} >>> normalizeLocation({"wght": 1001}, axes) {'wght': 0.0} """ out = {} for tag, triple in axes.items(): v = location.get(tag, triple[1]) out[tag] = normalizeValue(v, triple, extrapolate=extrapolate) return out def supportScalar(location, support, ot=True, extrapolate=False, axisRanges=None): """Returns the scalar multiplier at location, for a master with support. If ot is True, then a peak value of zero for support of an axis means "axis does not participate". That is how OpenType Variation Font technology works. If extrapolate is True, axisRanges must be a dict that maps axis names to (axisMin, axisMax) tuples. >>> supportScalar({}, {}) 1.0 >>> supportScalar({'wght':.2}, {}) 1.0 >>> supportScalar({'wght':.2}, {'wght':(0,2,3)}) 0.1 >>> supportScalar({'wght':2.5}, {'wght':(0,2,4)}) 0.75 >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}) 0.75 >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}, ot=False) 0.375 >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}) 0.75 >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}) 0.75 >>> supportScalar({'wght':3}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)}) -1.0 >>> supportScalar({'wght':-1}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)}) -1.0 >>> supportScalar({'wght':3}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)}) 1.5 >>> supportScalar({'wght':-1}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)}) -0.5 """ if extrapolate and axisRanges is None: raise TypeError("axisRanges must be passed when extrapolate is True") scalar = 1.0 for axis, (lower, peak, upper) in support.items(): if ot: # OpenType-specific case handling if peak == 0.0: continue if lower > peak or peak > upper: continue if lower < 0.0 and upper > 0.0: continue v = location.get(axis, 0.0) else: assert axis in location v = location[axis] if v == peak: continue if extrapolate: axisMin, axisMax = axisRanges[axis] if v < axisMin and lower <= axisMin: if peak <= axisMin and peak < upper: scalar *= (v - upper) / (peak - upper) continue elif axisMin < peak: scalar *= (v - lower) / (peak - lower) continue elif axisMax < v and axisMax <= upper: if axisMax <= peak and lower < peak: scalar *= (v - lower) / (peak - lower) continue elif peak < axisMax: scalar *= (v - upper) / (peak - upper) continue if v <= lower or upper <= v: scalar = 0.0 break if v < peak: scalar *= (v - lower) / (peak - lower) else: # v > peak scalar *= (v - upper) / (peak - upper) return scalar class VariationModel(object): """Locations must have the base master at the origin (ie. 0). If the extrapolate argument is set to True, then values are extrapolated outside the axis range. >>> from pprint import pprint >>> locations = [ \ {'wght':100}, \ {'wght':-100}, \ {'wght':-180}, \ {'wdth':+.3}, \ {'wght':+120,'wdth':.3}, \ {'wght':+120,'wdth':.2}, \ {}, \ {'wght':+180,'wdth':.3}, \ {'wght':+180}, \ ] >>> model = VariationModel(locations, axisOrder=['wght']) >>> pprint(model.locations) [{}, {'wght': -100}, {'wght': -180}, {'wght': 100}, {'wght': 180}, {'wdth': 0.3}, {'wdth': 0.3, 'wght': 180}, {'wdth': 0.3, 'wght': 120}, {'wdth': 0.2, 'wght': 120}] >>> pprint(model.deltaWeights) [{}, {0: 1.0}, {0: 1.0}, {0: 1.0}, {0: 1.0}, {0: 1.0}, {0: 1.0, 4: 1.0, 5: 1.0}, {0: 1.0, 3: 0.75, 4: 0.25, 5: 1.0, 6: 0.6666666666666666}, {0: 1.0, 3: 0.75, 4: 0.25, 5: 0.6666666666666667, 6: 0.4444444444444445, 7: 0.6666666666666667}] """ def __init__(self, locations, axisOrder=None, extrapolate=False): if len(set(tuple(sorted(l.items())) for l in locations)) != len(locations): raise VariationModelError("Locations must be unique.") self.origLocations = locations self.axisOrder = axisOrder if axisOrder is not None else [] self.extrapolate = extrapolate self.axisRanges = self.computeAxisRanges(locations) if extrapolate else None locations = [{k: v for k, v in loc.items() if v != 0.0} for loc in locations] keyFunc = self.getMasterLocationsSortKeyFunc( locations, axisOrder=self.axisOrder ) self.locations = sorted(locations, key=keyFunc) # Mapping from user's master order to our master order self.mapping = [self.locations.index(l) for l in locations] self.reverseMapping = [locations.index(l) for l in self.locations] self._computeMasterSupports() self._subModels = {} def getSubModel(self, items): """Return a sub-model and the items that are not None. The sub-model is necessary for working with the subset of items when some are None. The sub-model is cached.""" if None not in items: return self, items key = tuple(v is not None for v in items) subModel = self._subModels.get(key) if subModel is None: subModel = VariationModel(subList(key, self.origLocations), self.axisOrder) self._subModels[key] = subModel return subModel, subList(key, items) @staticmethod def computeAxisRanges(locations): axisRanges = {} allAxes = {axis for loc in locations for axis in loc.keys()} for loc in locations: for axis in allAxes: value = loc.get(axis, 0) axisMin, axisMax = axisRanges.get(axis, (value, value)) axisRanges[axis] = min(value, axisMin), max(value, axisMax) return axisRanges @staticmethod def getMasterLocationsSortKeyFunc(locations, axisOrder=[]): if {} not in locations: raise VariationModelError("Base master not found.") axisPoints = {} for loc in locations: if len(loc) != 1: continue axis = next(iter(loc)) value = loc[axis] if axis not in axisPoints: axisPoints[axis] = {0.0} assert ( value not in axisPoints[axis] ), 'Value "%s" in axisPoints["%s"] --> %s' % (value, axis, axisPoints) axisPoints[axis].add(value) def getKey(axisPoints, axisOrder): def sign(v): return -1 if v < 0 else +1 if v > 0 else 0 def key(loc): rank = len(loc) onPointAxes = [ axis for axis, value in loc.items() if axis in axisPoints and value in axisPoints[axis] ] orderedAxes = [axis for axis in axisOrder if axis in loc] orderedAxes.extend( [axis for axis in sorted(loc.keys()) if axis not in axisOrder] ) return ( rank, # First, order by increasing rank -len(onPointAxes), # Next, by decreasing number of onPoint axes tuple( axisOrder.index(axis) if axis in axisOrder else 0x10000 for axis in orderedAxes ), # Next, by known axes tuple(orderedAxes), # Next, by all axes tuple( sign(loc[axis]) for axis in orderedAxes ), # Next, by signs of axis values tuple( abs(loc[axis]) for axis in orderedAxes ), # Next, by absolute value of axis values ) return key ret = getKey(axisPoints, axisOrder) return ret def reorderMasters(self, master_list, mapping): # For changing the master data order without # recomputing supports and deltaWeights. new_list = [master_list[idx] for idx in mapping] self.origLocations = [self.origLocations[idx] for idx in mapping] locations = [ {k: v for k, v in loc.items() if v != 0.0} for loc in self.origLocations ] self.mapping = [self.locations.index(l) for l in locations] self.reverseMapping = [locations.index(l) for l in self.locations] self._subModels = {} return new_list def _computeMasterSupports(self): self.supports = [] regions = self._locationsToRegions() for i, region in enumerate(regions): locAxes = set(region.keys()) # Walk over previous masters now for prev_region in regions[:i]: # Master with extra axes do not participte if set(prev_region.keys()) != locAxes: continue # If it's NOT in the current box, it does not participate relevant = True for axis, (lower, peak, upper) in region.items(): if not ( prev_region[axis][1] == peak or lower < prev_region[axis][1] < upper ): relevant = False break if not relevant: continue # Split the box for new master; split in whatever direction # that has largest range ratio. # # For symmetry, we actually cut across multiple axes # if they have the largest, equal, ratio. # https://github.com/fonttools/fonttools/commit/7ee81c8821671157968b097f3e55309a1faa511e#commitcomment-31054804 bestAxes = {} bestRatio = -1 for axis in prev_region.keys(): val = prev_region[axis][1] assert axis in region lower, locV, upper = region[axis] newLower, newUpper = lower, upper if val < locV: newLower = val ratio = (val - locV) / (lower - locV) elif locV < val: newUpper = val ratio = (val - locV) / (upper - locV) else: # val == locV # Can't split box in this direction. continue if ratio > bestRatio: bestAxes = {} bestRatio = ratio if ratio == bestRatio: bestAxes[axis] = (newLower, locV, newUpper) for axis, triple in bestAxes.items(): region[axis] = triple self.supports.append(region) self._computeDeltaWeights() def _locationsToRegions(self): locations = self.locations # Compute min/max across each axis, use it as total range. # TODO Take this as input from outside? minV = {} maxV = {} for l in locations: for k, v in l.items(): minV[k] = min(v, minV.get(k, v)) maxV[k] = max(v, maxV.get(k, v)) regions = [] for loc in locations: region = {} for axis, locV in loc.items(): if locV > 0: region[axis] = (0, locV, maxV[axis]) else: region[axis] = (minV[axis], locV, 0) regions.append(region) return regions def _computeDeltaWeights(self): self.deltaWeights = [] for i, loc in enumerate(self.locations): deltaWeight = {} # Walk over previous masters now, populate deltaWeight for j, support in enumerate(self.supports[:i]): scalar = supportScalar(loc, support) if scalar: deltaWeight[j] = scalar self.deltaWeights.append(deltaWeight) def getDeltas(self, masterValues, *, round=noRound): assert len(masterValues) == len(self.deltaWeights) mapping = self.reverseMapping out = [] for i, weights in enumerate(self.deltaWeights): delta = masterValues[mapping[i]] for j, weight in weights.items(): if weight == 1: delta -= out[j] else: delta -= out[j] * weight out.append(round(delta)) return out def getDeltasAndSupports(self, items, *, round=noRound): model, items = self.getSubModel(items) return model.getDeltas(items, round=round), model.supports def getScalars(self, loc): """Return scalars for each delta, for the given location. If interpolating many master-values at the same location, this function allows speed up by fetching the scalars once and using them with interpolateFromMastersAndScalars().""" return [ supportScalar( loc, support, extrapolate=self.extrapolate, axisRanges=self.axisRanges ) for support in self.supports ] def getMasterScalars(self, targetLocation): """Return multipliers for each master, for the given location. If interpolating many master-values at the same location, this function allows speed up by fetching the scalars once and using them with interpolateFromValuesAndScalars(). Note that the scalars used in interpolateFromMastersAndScalars(), are *not* the same as the ones returned here. They are the result of getScalars().""" out = self.getScalars(targetLocation) for i, weights in reversed(list(enumerate(self.deltaWeights))): for j, weight in weights.items(): out[j] -= out[i] * weight out = [out[self.mapping[i]] for i in range(len(out))] return out @staticmethod def interpolateFromValuesAndScalars(values, scalars): """Interpolate from values and scalars coefficients. If the values are master-values, then the scalars should be fetched from getMasterScalars(). If the values are deltas, then the scalars should be fetched from getScalars(); in which case this is the same as interpolateFromDeltasAndScalars(). """ v = None assert len(values) == len(scalars) for value, scalar in zip(values, scalars): if not scalar: continue contribution = value * scalar if v is None: v = contribution else: v += contribution return v @staticmethod def interpolateFromDeltasAndScalars(deltas, scalars): """Interpolate from deltas and scalars fetched from getScalars().""" return VariationModel.interpolateFromValuesAndScalars(deltas, scalars) def interpolateFromDeltas(self, loc, deltas): """Interpolate from deltas, at location loc.""" scalars = self.getScalars(loc) return self.interpolateFromDeltasAndScalars(deltas, scalars) def interpolateFromMasters(self, loc, masterValues, *, round=noRound): """Interpolate from master-values, at location loc.""" scalars = self.getMasterScalars(loc) return self.interpolateFromValuesAndScalars(masterValues, scalars) def interpolateFromMastersAndScalars(self, masterValues, scalars, *, round=noRound): """Interpolate from master-values, and scalars fetched from getScalars(), which is useful when you want to interpolate multiple master-values with the same location.""" deltas = self.getDeltas(masterValues, round=round) return self.interpolateFromDeltasAndScalars(deltas, scalars) def piecewiseLinearMap(v, mapping): keys = mapping.keys() if not keys: return v if v in keys: return mapping[v] k = min(keys) if v < k: return v + mapping[k] - k k = max(keys) if v > k: return v + mapping[k] - k # Interpolate a = max(k for k in keys if k < v) b = min(k for k in keys if k > v) va = mapping[a] vb = mapping[b] return va + (vb - va) * (v - a) / (b - a) def main(args=None): """Normalize locations on a given designspace""" from fontTools import configLogger import argparse parser = argparse.ArgumentParser( "fonttools varLib.models", description=main.__doc__, ) parser.add_argument( "--loglevel", metavar="LEVEL", default="INFO", help="Logging level (defaults to INFO)", ) group = parser.add_mutually_exclusive_group(required=True) group.add_argument("-d", "--designspace", metavar="DESIGNSPACE", type=str) group.add_argument( "-l", "--locations", metavar="LOCATION", nargs="+", help="Master locations as comma-separate coordinates. One must be all zeros.", ) args = parser.parse_args(args) configLogger(level=args.loglevel) from pprint import pprint if args.designspace: from fontTools.designspaceLib import DesignSpaceDocument doc = DesignSpaceDocument() doc.read(args.designspace) locs = [s.location for s in doc.sources] print("Original locations:") pprint(locs) doc.normalize() print("Normalized locations:") locs = [s.location for s in doc.sources] pprint(locs) else: axes = [chr(c) for c in range(ord("A"), ord("Z") + 1)] locs = [ dict(zip(axes, (float(v) for v in s.split(",")))) for s in args.locations ] model = VariationModel(locs) print("Sorted locations:") pprint(model.locations) print("Supports:") pprint(model.supports) if __name__ == "__main__": import doctest, sys if len(sys.argv) > 1: sys.exit(main()) sys.exit(doctest.testmod().failed) PKaZZZx#SXKKfontTools/varLib/mutator.py""" Instantiate a variation font. Run, eg: $ fonttools varLib.mutator ./NotoSansArabic-VF.ttf wght=140 wdth=85 """ from fontTools.misc.fixedTools import floatToFixedToFloat, floatToFixed from fontTools.misc.roundTools import otRound from fontTools.pens.boundsPen import BoundsPen from fontTools.ttLib import TTFont, newTable from fontTools.ttLib.tables import ttProgram from fontTools.ttLib.tables._g_l_y_f import ( GlyphCoordinates, flagOverlapSimple, OVERLAP_COMPOUND, ) from fontTools.varLib.models import ( supportScalar, normalizeLocation, piecewiseLinearMap, ) from fontTools.varLib.merger import MutatorMerger from fontTools.varLib.varStore import VarStoreInstancer from fontTools.varLib.mvar import MVAR_ENTRIES from fontTools.varLib.iup import iup_delta import fontTools.subset.cff import os.path import logging from io import BytesIO log = logging.getLogger("fontTools.varlib.mutator") # map 'wdth' axis (1..200) to OS/2.usWidthClass (1..9), rounding to closest OS2_WIDTH_CLASS_VALUES = {} percents = [50.0, 62.5, 75.0, 87.5, 100.0, 112.5, 125.0, 150.0, 200.0] for i, (prev, curr) in enumerate(zip(percents[:-1], percents[1:]), start=1): half = (prev + curr) / 2 OS2_WIDTH_CLASS_VALUES[half] = i def interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas): pd_blend_lists = ( "BlueValues", "OtherBlues", "FamilyBlues", "FamilyOtherBlues", "StemSnapH", "StemSnapV", ) pd_blend_values = ("BlueScale", "BlueShift", "BlueFuzz", "StdHW", "StdVW") for fontDict in topDict.FDArray: pd = fontDict.Private vsindex = pd.vsindex if (hasattr(pd, "vsindex")) else 0 for key, value in pd.rawDict.items(): if (key in pd_blend_values) and isinstance(value, list): delta = interpolateFromDeltas(vsindex, value[1:]) pd.rawDict[key] = otRound(value[0] + delta) elif (key in pd_blend_lists) and isinstance(value[0], list): """If any argument in a BlueValues list is a blend list, then they all are. The first value of each list is an absolute value. The delta tuples are calculated from relative master values, hence we need to append all the deltas to date to each successive absolute value.""" delta = 0 for i, val_list in enumerate(value): delta += otRound(interpolateFromDeltas(vsindex, val_list[1:])) value[i] = val_list[0] + delta def interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder): charstrings = topDict.CharStrings for gname in glyphOrder: # Interpolate charstring # e.g replace blend op args with regular args, # and use and discard vsindex op. charstring = charstrings[gname] new_program = [] vsindex = 0 last_i = 0 for i, token in enumerate(charstring.program): if token == "vsindex": vsindex = charstring.program[i - 1] if last_i != 0: new_program.extend(charstring.program[last_i : i - 1]) last_i = i + 1 elif token == "blend": num_regions = charstring.getNumRegions(vsindex) numMasters = 1 + num_regions num_args = charstring.program[i - 1] # The program list starting at program[i] is now: # ..args for following operations # num_args values from the default font # num_args tuples, each with numMasters-1 delta values # num_blend_args # 'blend' argi = i - (num_args * numMasters + 1) end_args = tuplei = argi + num_args while argi < end_args: next_ti = tuplei + num_regions deltas = charstring.program[tuplei:next_ti] delta = interpolateFromDeltas(vsindex, deltas) charstring.program[argi] += otRound(delta) tuplei = next_ti argi += 1 new_program.extend(charstring.program[last_i:end_args]) last_i = i + 1 if last_i != 0: new_program.extend(charstring.program[last_i:]) charstring.program = new_program def interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc): """Unlike TrueType glyphs, neither advance width nor bounding box info is stored in a CFF2 charstring. The width data exists only in the hmtx and HVAR tables. Since LSB data cannot be interpolated reliably from the master LSB values in the hmtx table, we traverse the charstring to determine the actual bound box.""" charstrings = topDict.CharStrings boundsPen = BoundsPen(glyphOrder) hmtx = varfont["hmtx"] hvar_table = None if "HVAR" in varfont: hvar_table = varfont["HVAR"].table fvar = varfont["fvar"] varStoreInstancer = VarStoreInstancer(hvar_table.VarStore, fvar.axes, loc) for gid, gname in enumerate(glyphOrder): entry = list(hmtx[gname]) # get width delta. if hvar_table: if hvar_table.AdvWidthMap: width_idx = hvar_table.AdvWidthMap.mapping[gname] else: width_idx = gid width_delta = otRound(varStoreInstancer[width_idx]) else: width_delta = 0 # get LSB. boundsPen.init() charstring = charstrings[gname] charstring.draw(boundsPen) if boundsPen.bounds is None: # Happens with non-marking glyphs lsb_delta = 0 else: lsb = otRound(boundsPen.bounds[0]) lsb_delta = entry[1] - lsb if lsb_delta or width_delta: if width_delta: entry[0] = max(0, entry[0] + width_delta) if lsb_delta: entry[1] = lsb hmtx[gname] = tuple(entry) def instantiateVariableFont(varfont, location, inplace=False, overlap=True): """Generate a static instance from a variable TTFont and a dictionary defining the desired location along the variable font's axes. The location values must be specified as user-space coordinates, e.g.: {'wght': 400, 'wdth': 100} By default, a new TTFont object is returned. If ``inplace`` is True, the input varfont is modified and reduced to a static font. When the overlap parameter is defined as True, OVERLAP_SIMPLE and OVERLAP_COMPOUND bits are set to 1. See https://docs.microsoft.com/en-us/typography/opentype/spec/glyf """ if not inplace: # make a copy to leave input varfont unmodified stream = BytesIO() varfont.save(stream) stream.seek(0) varfont = TTFont(stream) fvar = varfont["fvar"] axes = {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in fvar.axes} loc = normalizeLocation(location, axes) if "avar" in varfont: maps = varfont["avar"].segments loc = {k: piecewiseLinearMap(v, maps[k]) for k, v in loc.items()} # Quantize to F2Dot14, to avoid surprise interpolations. loc = {k: floatToFixedToFloat(v, 14) for k, v in loc.items()} # Location is normalized now log.info("Normalized location: %s", loc) if "gvar" in varfont: log.info("Mutating glyf/gvar tables") gvar = varfont["gvar"] glyf = varfont["glyf"] hMetrics = varfont["hmtx"].metrics vMetrics = getattr(varfont.get("vmtx"), "metrics", None) # get list of glyph names in gvar sorted by component depth glyphnames = sorted( gvar.variations.keys(), key=lambda name: ( ( glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth if glyf[name].isComposite() or glyf[name].isVarComposite() else 0 ), name, ), ) for glyphname in glyphnames: variations = gvar.variations[glyphname] coordinates, _ = glyf._getCoordinatesAndControls( glyphname, hMetrics, vMetrics ) origCoords, endPts = None, None for var in variations: scalar = supportScalar(loc, var.axes) if not scalar: continue delta = var.coordinates if None in delta: if origCoords is None: origCoords, g = glyf._getCoordinatesAndControls( glyphname, hMetrics, vMetrics ) delta = iup_delta(delta, origCoords, g.endPts) coordinates += GlyphCoordinates(delta) * scalar glyf._setCoordinates(glyphname, coordinates, hMetrics, vMetrics) else: glyf = None if "DSIG" in varfont: del varfont["DSIG"] if "cvar" in varfont: log.info("Mutating cvt/cvar tables") cvar = varfont["cvar"] cvt = varfont["cvt "] deltas = {} for var in cvar.variations: scalar = supportScalar(loc, var.axes) if not scalar: continue for i, c in enumerate(var.coordinates): if c is not None: deltas[i] = deltas.get(i, 0) + scalar * c for i, delta in deltas.items(): cvt[i] += otRound(delta) if "CFF2" in varfont: log.info("Mutating CFF2 table") glyphOrder = varfont.getGlyphOrder() CFF2 = varfont["CFF2"] topDict = CFF2.cff.topDictIndex[0] vsInstancer = VarStoreInstancer(topDict.VarStore.otVarStore, fvar.axes, loc) interpolateFromDeltas = vsInstancer.interpolateFromDeltas interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas) CFF2.desubroutinize() interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder) interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc) del topDict.rawDict["VarStore"] del topDict.VarStore if "MVAR" in varfont: log.info("Mutating MVAR table") mvar = varfont["MVAR"].table varStoreInstancer = VarStoreInstancer(mvar.VarStore, fvar.axes, loc) records = mvar.ValueRecord for rec in records: mvarTag = rec.ValueTag if mvarTag not in MVAR_ENTRIES: continue tableTag, itemName = MVAR_ENTRIES[mvarTag] delta = otRound(varStoreInstancer[rec.VarIdx]) if not delta: continue setattr( varfont[tableTag], itemName, getattr(varfont[tableTag], itemName) + delta, ) log.info("Mutating FeatureVariations") for tableTag in "GSUB", "GPOS": if not tableTag in varfont: continue table = varfont[tableTag].table if not getattr(table, "FeatureVariations", None): continue variations = table.FeatureVariations for record in variations.FeatureVariationRecord: applies = True for condition in record.ConditionSet.ConditionTable: if condition.Format == 1: axisIdx = condition.AxisIndex axisTag = fvar.axes[axisIdx].axisTag Min = condition.FilterRangeMinValue Max = condition.FilterRangeMaxValue v = loc[axisTag] if not (Min <= v <= Max): applies = False else: applies = False if not applies: break if applies: assert record.FeatureTableSubstitution.Version == 0x00010000 for rec in record.FeatureTableSubstitution.SubstitutionRecord: table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = ( rec.Feature ) break del table.FeatureVariations if "GDEF" in varfont and varfont["GDEF"].table.Version >= 0x00010003: log.info("Mutating GDEF/GPOS/GSUB tables") gdef = varfont["GDEF"].table instancer = VarStoreInstancer(gdef.VarStore, fvar.axes, loc) merger = MutatorMerger(varfont, instancer) merger.mergeTables(varfont, [varfont], ["GDEF", "GPOS"]) # Downgrade GDEF. del gdef.VarStore gdef.Version = 0x00010002 if gdef.MarkGlyphSetsDef is None: del gdef.MarkGlyphSetsDef gdef.Version = 0x00010000 if not ( gdef.LigCaretList or gdef.MarkAttachClassDef or gdef.GlyphClassDef or gdef.AttachList or (gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef) ): del varfont["GDEF"] addidef = False if glyf: for glyph in glyf.glyphs.values(): if hasattr(glyph, "program"): instructions = glyph.program.getAssembly() # If GETVARIATION opcode is used in bytecode of any glyph add IDEF addidef = any(op.startswith("GETVARIATION") for op in instructions) if addidef: break if overlap: for glyph_name in glyf.keys(): glyph = glyf[glyph_name] # Set OVERLAP_COMPOUND bit for compound glyphs if glyph.isComposite(): glyph.components[0].flags |= OVERLAP_COMPOUND # Set OVERLAP_SIMPLE bit for simple glyphs elif glyph.numberOfContours > 0: glyph.flags[0] |= flagOverlapSimple if addidef: log.info("Adding IDEF to fpgm table for GETVARIATION opcode") asm = [] if "fpgm" in varfont: fpgm = varfont["fpgm"] asm = fpgm.program.getAssembly() else: fpgm = newTable("fpgm") fpgm.program = ttProgram.Program() varfont["fpgm"] = fpgm asm.append("PUSHB[000] 145") asm.append("IDEF[ ]") args = [str(len(loc))] for a in fvar.axes: args.append(str(floatToFixed(loc[a.axisTag], 14))) asm.append("NPUSHW[ ] " + " ".join(args)) asm.append("ENDF[ ]") fpgm.program.fromAssembly(asm) # Change maxp attributes as IDEF is added if "maxp" in varfont: maxp = varfont["maxp"] setattr( maxp, "maxInstructionDefs", 1 + getattr(maxp, "maxInstructionDefs", 0) ) setattr( maxp, "maxStackElements", max(len(loc), getattr(maxp, "maxStackElements", 0)), ) if "name" in varfont: log.info("Pruning name table") exclude = {a.axisNameID for a in fvar.axes} for i in fvar.instances: exclude.add(i.subfamilyNameID) exclude.add(i.postscriptNameID) if "ltag" in varfont: # Drop the whole 'ltag' table if all its language tags are referenced by # name records to be pruned. # TODO: prune unused ltag tags and re-enumerate langIDs accordingly excludedUnicodeLangIDs = [ n.langID for n in varfont["name"].names if n.nameID in exclude and n.platformID == 0 and n.langID != 0xFFFF ] if set(excludedUnicodeLangIDs) == set(range(len((varfont["ltag"].tags)))): del varfont["ltag"] varfont["name"].names[:] = [ n for n in varfont["name"].names if n.nameID not in exclude ] if "wght" in location and "OS/2" in varfont: varfont["OS/2"].usWeightClass = otRound(max(1, min(location["wght"], 1000))) if "wdth" in location: wdth = location["wdth"] for percent, widthClass in sorted(OS2_WIDTH_CLASS_VALUES.items()): if wdth < percent: varfont["OS/2"].usWidthClass = widthClass break else: varfont["OS/2"].usWidthClass = 9 if "slnt" in location and "post" in varfont: varfont["post"].italicAngle = max(-90, min(location["slnt"], 90)) log.info("Removing variable tables") for tag in ("avar", "cvar", "fvar", "gvar", "HVAR", "MVAR", "VVAR", "STAT"): if tag in varfont: del varfont[tag] return varfont def main(args=None): """Instantiate a variation font""" from fontTools import configLogger import argparse parser = argparse.ArgumentParser( "fonttools varLib.mutator", description="Instantiate a variable font" ) parser.add_argument("input", metavar="INPUT.ttf", help="Input variable TTF file.") parser.add_argument( "locargs", metavar="AXIS=LOC", nargs="*", help="List of space separated locations. A location consist in " "the name of a variation axis, followed by '=' and a number. E.g.: " " wght=700 wdth=80. The default is the location of the base master.", ) parser.add_argument( "-o", "--output", metavar="OUTPUT.ttf", default=None, help="Output instance TTF file (default: INPUT-instance.ttf).", ) parser.add_argument( "--no-recalc-timestamp", dest="recalc_timestamp", action="store_false", help="Don't set the output font's timestamp to the current time.", ) logging_group = parser.add_mutually_exclusive_group(required=False) logging_group.add_argument( "-v", "--verbose", action="store_true", help="Run more verbosely." ) logging_group.add_argument( "-q", "--quiet", action="store_true", help="Turn verbosity off." ) parser.add_argument( "--no-overlap", dest="overlap", action="store_false", help="Don't set OVERLAP_SIMPLE/OVERLAP_COMPOUND glyf flags.", ) options = parser.parse_args(args) varfilename = options.input outfile = ( os.path.splitext(varfilename)[0] + "-instance.ttf" if not options.output else options.output ) configLogger( level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO") ) loc = {} for arg in options.locargs: try: tag, val = arg.split("=") assert len(tag) <= 4 loc[tag.ljust(4)] = float(val) except (ValueError, AssertionError): parser.error("invalid location argument format: %r" % arg) log.info("Location: %s", loc) log.info("Loading variable font") varfont = TTFont(varfilename, recalcTimestamp=options.recalc_timestamp) instantiateVariableFont(varfont, loc, inplace=True, overlap=options.overlap) log.info("Saving instance font %s", outfile) varfont.save(outfile) if __name__ == "__main__": import sys if len(sys.argv) > 1: sys.exit(main()) import doctest sys.exit(doctest.testmod().failed) PKaZZZ�J� � fontTools/varLib/mvar.pyMVAR_ENTRIES = { "hasc": ("OS/2", "sTypoAscender"), # horizontal ascender "hdsc": ("OS/2", "sTypoDescender"), # horizontal descender "hlgp": ("OS/2", "sTypoLineGap"), # horizontal line gap "hcla": ("OS/2", "usWinAscent"), # horizontal clipping ascent "hcld": ("OS/2", "usWinDescent"), # horizontal clipping descent "vasc": ("vhea", "ascent"), # vertical ascender "vdsc": ("vhea", "descent"), # vertical descender "vlgp": ("vhea", "lineGap"), # vertical line gap "hcrs": ("hhea", "caretSlopeRise"), # horizontal caret rise "hcrn": ("hhea", "caretSlopeRun"), # horizontal caret run "hcof": ("hhea", "caretOffset"), # horizontal caret offset "vcrs": ("vhea", "caretSlopeRise"), # vertical caret rise "vcrn": ("vhea", "caretSlopeRun"), # vertical caret run "vcof": ("vhea", "caretOffset"), # vertical caret offset "xhgt": ("OS/2", "sxHeight"), # x height "cpht": ("OS/2", "sCapHeight"), # cap height "sbxs": ("OS/2", "ySubscriptXSize"), # subscript em x size "sbys": ("OS/2", "ySubscriptYSize"), # subscript em y size "sbxo": ("OS/2", "ySubscriptXOffset"), # subscript em x offset "sbyo": ("OS/2", "ySubscriptYOffset"), # subscript em y offset "spxs": ("OS/2", "ySuperscriptXSize"), # superscript em x size "spys": ("OS/2", "ySuperscriptYSize"), # superscript em y size "spxo": ("OS/2", "ySuperscriptXOffset"), # superscript em x offset "spyo": ("OS/2", "ySuperscriptYOffset"), # superscript em y offset "strs": ("OS/2", "yStrikeoutSize"), # strikeout size "stro": ("OS/2", "yStrikeoutPosition"), # strikeout offset "unds": ("post", "underlineThickness"), # underline size "undo": ("post", "underlinePosition"), # underline offset #'gsp0': ('gasp', 'gaspRange[0].rangeMaxPPEM'), # gaspRange[0] #'gsp1': ('gasp', 'gaspRange[1].rangeMaxPPEM'), # gaspRange[1] #'gsp2': ('gasp', 'gaspRange[2].rangeMaxPPEM'), # gaspRange[2] #'gsp3': ('gasp', 'gaspRange[3].rangeMaxPPEM'), # gaspRange[3] #'gsp4': ('gasp', 'gaspRange[4].rangeMaxPPEM'), # gaspRange[4] #'gsp5': ('gasp', 'gaspRange[5].rangeMaxPPEM'), # gaspRange[5] #'gsp6': ('gasp', 'gaspRange[6].rangeMaxPPEM'), # gaspRange[6] #'gsp7': ('gasp', 'gaspRange[7].rangeMaxPPEM'), # gaspRange[7] #'gsp8': ('gasp', 'gaspRange[8].rangeMaxPPEM'), # gaspRange[8] #'gsp9': ('gasp', 'gaspRange[9].rangeMaxPPEM'), # gaspRange[9] } PKaZZZ���FFfontTools/varLib/plot.py"""Visualize DesignSpaceDocument and resulting VariationModel.""" from fontTools.varLib.models import VariationModel, supportScalar from fontTools.designspaceLib import DesignSpaceDocument from matplotlib import pyplot from mpl_toolkits.mplot3d import axes3d from itertools import cycle import math import logging import sys log = logging.getLogger(__name__) def stops(support, count=10): a, b, c = support return ( [a + (b - a) * i / count for i in range(count)] + [b + (c - b) * i / count for i in range(count)] + [c] ) def _plotLocationsDots(locations, axes, subplot, **kwargs): for loc, color in zip(locations, cycle(pyplot.cm.Set1.colors)): if len(axes) == 1: subplot.plot([loc.get(axes[0], 0)], [1.0], "o", color=color, **kwargs) elif len(axes) == 2: subplot.plot( [loc.get(axes[0], 0)], [loc.get(axes[1], 0)], [1.0], "o", color=color, **kwargs, ) else: raise AssertionError(len(axes)) def plotLocations(locations, fig, names=None, **kwargs): n = len(locations) cols = math.ceil(n**0.5) rows = math.ceil(n / cols) if names is None: names = [None] * len(locations) model = VariationModel(locations) names = [names[model.reverseMapping[i]] for i in range(len(names))] axes = sorted(locations[0].keys()) if len(axes) == 1: _plotLocations2D(model, axes[0], fig, cols, rows, names=names, **kwargs) elif len(axes) == 2: _plotLocations3D(model, axes, fig, cols, rows, names=names, **kwargs) else: raise ValueError("Only 1 or 2 axes are supported") def _plotLocations2D(model, axis, fig, cols, rows, names, **kwargs): subplot = fig.add_subplot(111) for i, (support, color, name) in enumerate( zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names)) ): if name is not None: subplot.set_title(name) subplot.set_xlabel(axis) pyplot.xlim(-1.0, +1.0) Xs = support.get(axis, (-1.0, 0.0, +1.0)) X, Y = [], [] for x in stops(Xs): y = supportScalar({axis: x}, support) X.append(x) Y.append(y) subplot.plot(X, Y, color=color, **kwargs) _plotLocationsDots(model.locations, [axis], subplot) def _plotLocations3D(model, axes, fig, rows, cols, names, **kwargs): ax1, ax2 = axes axis3D = fig.add_subplot(111, projection="3d") for i, (support, color, name) in enumerate( zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names)) ): if name is not None: axis3D.set_title(name) axis3D.set_xlabel(ax1) axis3D.set_ylabel(ax2) pyplot.xlim(-1.0, +1.0) pyplot.ylim(-1.0, +1.0) Xs = support.get(ax1, (-1.0, 0.0, +1.0)) Ys = support.get(ax2, (-1.0, 0.0, +1.0)) for x in stops(Xs): X, Y, Z = [], [], [] for y in Ys: z = supportScalar({ax1: x, ax2: y}, support) X.append(x) Y.append(y) Z.append(z) axis3D.plot(X, Y, Z, color=color, **kwargs) for y in stops(Ys): X, Y, Z = [], [], [] for x in Xs: z = supportScalar({ax1: x, ax2: y}, support) X.append(x) Y.append(y) Z.append(z) axis3D.plot(X, Y, Z, color=color, **kwargs) _plotLocationsDots(model.locations, [ax1, ax2], axis3D) def plotDocument(doc, fig, **kwargs): doc.normalize() locations = [s.location for s in doc.sources] names = [s.name for s in doc.sources] plotLocations(locations, fig, names, **kwargs) def _plotModelFromMasters2D(model, masterValues, fig, **kwargs): assert len(model.axisOrder) == 1 axis = model.axisOrder[0] axis_min = min(loc.get(axis, 0) for loc in model.locations) axis_max = max(loc.get(axis, 0) for loc in model.locations) import numpy as np X = np.arange(axis_min, axis_max, (axis_max - axis_min) / 100) Y = [] for x in X: loc = {axis: x} v = model.interpolateFromMasters(loc, masterValues) Y.append(v) subplot = fig.add_subplot(111) subplot.plot(X, Y, "-", **kwargs) def _plotModelFromMasters3D(model, masterValues, fig, **kwargs): assert len(model.axisOrder) == 2 axis1, axis2 = model.axisOrder[0], model.axisOrder[1] axis1_min = min(loc.get(axis1, 0) for loc in model.locations) axis1_max = max(loc.get(axis1, 0) for loc in model.locations) axis2_min = min(loc.get(axis2, 0) for loc in model.locations) axis2_max = max(loc.get(axis2, 0) for loc in model.locations) import numpy as np X = np.arange(axis1_min, axis1_max, (axis1_max - axis1_min) / 100) Y = np.arange(axis2_min, axis2_max, (axis2_max - axis2_min) / 100) X, Y = np.meshgrid(X, Y) Z = [] for row_x, row_y in zip(X, Y): z_row = [] Z.append(z_row) for x, y in zip(row_x, row_y): loc = {axis1: x, axis2: y} v = model.interpolateFromMasters(loc, masterValues) z_row.append(v) Z = np.array(Z) axis3D = fig.add_subplot(111, projection="3d") axis3D.plot_surface(X, Y, Z, **kwargs) def plotModelFromMasters(model, masterValues, fig, **kwargs): """Plot a variation model and set of master values corresponding to the locations to the model into a pyplot figure. Variation model must have axisOrder of size 1 or 2.""" if len(model.axisOrder) == 1: _plotModelFromMasters2D(model, masterValues, fig, **kwargs) elif len(model.axisOrder) == 2: _plotModelFromMasters3D(model, masterValues, fig, **kwargs) else: raise ValueError("Only 1 or 2 axes are supported") def main(args=None): from fontTools import configLogger if args is None: args = sys.argv[1:] # configure the library logger (for >= WARNING) configLogger() # comment this out to enable debug messages from logger # log.setLevel(logging.DEBUG) if len(args) < 1: print("usage: fonttools varLib.plot source.designspace", file=sys.stderr) print(" or") print("usage: fonttools varLib.plot location1 location2 ...", file=sys.stderr) print(" or") print( "usage: fonttools varLib.plot location1=value1 location2=value2 ...", file=sys.stderr, ) sys.exit(1) fig = pyplot.figure() fig.set_tight_layout(True) if len(args) == 1 and args[0].endswith(".designspace"): doc = DesignSpaceDocument() doc.read(args[0]) plotDocument(doc, fig) else: axes = [chr(c) for c in range(ord("A"), ord("Z") + 1)] if "=" not in args[0]: locs = [dict(zip(axes, (float(v) for v in s.split(",")))) for s in args] plotLocations(locs, fig) else: locations = [] masterValues = [] for arg in args: loc, v = arg.split("=") locations.append(dict(zip(axes, (float(v) for v in loc.split(","))))) masterValues.append(float(v)) model = VariationModel(locations, axes[: len(locations[0])]) plotModelFromMasters(model, masterValues, fig) pyplot.show() if __name__ == "__main__": import sys sys.exit(main()) PKaZZZC���fontTools/varLib/stat.py"""Extra methods for DesignSpaceDocument to generate its STAT table data.""" from __future__ import annotations from typing import Dict, List, Union import fontTools.otlLib.builder from fontTools.designspaceLib import ( AxisLabelDescriptor, DesignSpaceDocument, DesignSpaceDocumentError, LocationLabelDescriptor, ) from fontTools.designspaceLib.types import Region, getVFUserRegion, locationInRegion from fontTools.ttLib import TTFont def buildVFStatTable(ttFont: TTFont, doc: DesignSpaceDocument, vfName: str) -> None: """Build the STAT table for the variable font identified by its name in the given document. Knowing which variable we're building STAT data for is needed to subset the STAT locations to only include what the variable font actually ships. .. versionadded:: 5.0 .. seealso:: - :func:`getStatAxes()` - :func:`getStatLocations()` - :func:`fontTools.otlLib.builder.buildStatTable()` """ for vf in doc.getVariableFonts(): if vf.name == vfName: break else: raise DesignSpaceDocumentError( f"Cannot find the variable font by name {vfName}" ) region = getVFUserRegion(doc, vf) return fontTools.otlLib.builder.buildStatTable( ttFont, getStatAxes(doc, region), getStatLocations(doc, region), doc.elidedFallbackName if doc.elidedFallbackName is not None else 2, ) def getStatAxes(doc: DesignSpaceDocument, userRegion: Region) -> List[Dict]: """Return a list of axis dicts suitable for use as the ``axes`` argument to :func:`fontTools.otlLib.builder.buildStatTable()`. .. versionadded:: 5.0 """ # First, get the axis labels with explicit ordering # then append the others in the order they appear. maxOrdering = max( (axis.axisOrdering for axis in doc.axes if axis.axisOrdering is not None), default=-1, ) axisOrderings = [] for axis in doc.axes: if axis.axisOrdering is not None: axisOrderings.append(axis.axisOrdering) else: maxOrdering += 1 axisOrderings.append(maxOrdering) return [ dict( tag=axis.tag, name={"en": axis.name, **axis.labelNames}, ordering=ordering, values=[ _axisLabelToStatLocation(label) for label in axis.axisLabels if locationInRegion({axis.name: label.userValue}, userRegion) ], ) for axis, ordering in zip(doc.axes, axisOrderings) ] def getStatLocations(doc: DesignSpaceDocument, userRegion: Region) -> List[Dict]: """Return a list of location dicts suitable for use as the ``locations`` argument to :func:`fontTools.otlLib.builder.buildStatTable()`. .. versionadded:: 5.0 """ axesByName = {axis.name: axis for axis in doc.axes} return [ dict( name={"en": label.name, **label.labelNames}, # Location in the designspace is keyed by axis name # Location in buildStatTable by axis tag location={ axesByName[name].tag: value for name, value in label.getFullUserLocation(doc).items() }, flags=_labelToFlags(label), ) for label in doc.locationLabels if locationInRegion(label.getFullUserLocation(doc), userRegion) ] def _labelToFlags(label: Union[AxisLabelDescriptor, LocationLabelDescriptor]) -> int: flags = 0 if label.olderSibling: flags |= 1 if label.elidable: flags |= 2 return flags def _axisLabelToStatLocation( label: AxisLabelDescriptor, ) -> Dict: label_format = label.getFormat() name = {"en": label.name, **label.labelNames} flags = _labelToFlags(label) if label_format == 1: return dict(name=name, value=label.userValue, flags=flags) if label_format == 3: return dict( name=name, value=label.userValue, linkedValue=label.linkedUserValue, flags=flags, ) if label_format == 2: res = dict( name=name, nominalValue=label.userValue, flags=flags, ) if label.userMinimum is not None: res["rangeMinValue"] = label.userMinimum if label.userMaximum is not None: res["rangeMaxValue"] = label.userMaximum return res raise NotImplementedError("Unknown STAT label format") PKaZZZ��b6�\�\fontTools/varLib/varStore.pyfrom fontTools.misc.roundTools import noRound, otRound from fontTools.misc.intTools import bit_count from fontTools.ttLib.tables import otTables as ot from fontTools.varLib.models import supportScalar from fontTools.varLib.builder import ( buildVarRegionList, buildVarStore, buildVarRegion, buildVarData, ) from functools import partial from collections import defaultdict from heapq import heappush, heappop NO_VARIATION_INDEX = ot.NO_VARIATION_INDEX ot.VarStore.NO_VARIATION_INDEX = NO_VARIATION_INDEX def _getLocationKey(loc): return tuple(sorted(loc.items(), key=lambda kv: kv[0])) class OnlineVarStoreBuilder(object): def __init__(self, axisTags): self._axisTags = axisTags self._regionMap = {} self._regionList = buildVarRegionList([], axisTags) self._store = buildVarStore(self._regionList, []) self._data = None self._model = None self._supports = None self._varDataIndices = {} self._varDataCaches = {} self._cache = {} def setModel(self, model): self.setSupports(model.supports) self._model = model def setSupports(self, supports): self._model = None self._supports = list(supports) if not self._supports[0]: del self._supports[0] # Drop base master support self._cache = {} self._data = None def finish(self, optimize=True): self._regionList.RegionCount = len(self._regionList.Region) self._store.VarDataCount = len(self._store.VarData) for data in self._store.VarData: data.ItemCount = len(data.Item) data.calculateNumShorts(optimize=optimize) return self._store def _add_VarData(self): regionMap = self._regionMap regionList = self._regionList regions = self._supports regionIndices = [] for region in regions: key = _getLocationKey(region) idx = regionMap.get(key) if idx is None: varRegion = buildVarRegion(region, self._axisTags) idx = regionMap[key] = len(regionList.Region) regionList.Region.append(varRegion) regionIndices.append(idx) # Check if we have one already... key = tuple(regionIndices) varDataIdx = self._varDataIndices.get(key) if varDataIdx is not None: self._outer = varDataIdx self._data = self._store.VarData[varDataIdx] self._cache = self._varDataCaches[key] if len(self._data.Item) == 0xFFFF: # This is full. Need new one. varDataIdx = None if varDataIdx is None: self._data = buildVarData(regionIndices, [], optimize=False) self._outer = len(self._store.VarData) self._store.VarData.append(self._data) self._varDataIndices[key] = self._outer if key not in self._varDataCaches: self._varDataCaches[key] = {} self._cache = self._varDataCaches[key] def storeMasters(self, master_values, *, round=round): deltas = self._model.getDeltas(master_values, round=round) base = deltas.pop(0) return base, self.storeDeltas(deltas, round=noRound) def storeDeltas(self, deltas, *, round=round): deltas = [round(d) for d in deltas] if len(deltas) == len(self._supports) + 1: deltas = tuple(deltas[1:]) else: assert len(deltas) == len(self._supports) deltas = tuple(deltas) varIdx = self._cache.get(deltas) if varIdx is not None: return varIdx if not self._data: self._add_VarData() inner = len(self._data.Item) if inner == 0xFFFF: # Full array. Start new one. self._add_VarData() return self.storeDeltas(deltas) self._data.addItem(deltas, round=noRound) varIdx = (self._outer << 16) + inner self._cache[deltas] = varIdx return varIdx def VarData_addItem(self, deltas, *, round=round): deltas = [round(d) for d in deltas] countUs = self.VarRegionCount countThem = len(deltas) if countUs + 1 == countThem: deltas = list(deltas[1:]) else: assert countUs == countThem, (countUs, countThem) deltas = list(deltas) self.Item.append(deltas) self.ItemCount = len(self.Item) ot.VarData.addItem = VarData_addItem def VarRegion_get_support(self, fvar_axes): return { fvar_axes[i].axisTag: (reg.StartCoord, reg.PeakCoord, reg.EndCoord) for i, reg in enumerate(self.VarRegionAxis) if reg.PeakCoord != 0 } ot.VarRegion.get_support = VarRegion_get_support def VarStore___bool__(self): return bool(self.VarData) ot.VarStore.__bool__ = VarStore___bool__ class VarStoreInstancer(object): def __init__(self, varstore, fvar_axes, location={}): self.fvar_axes = fvar_axes assert varstore is None or varstore.Format == 1 self._varData = varstore.VarData if varstore else [] self._regions = varstore.VarRegionList.Region if varstore else [] self.setLocation(location) def setLocation(self, location): self.location = dict(location) self._clearCaches() def _clearCaches(self): self._scalars = {} def _getScalar(self, regionIdx): scalar = self._scalars.get(regionIdx) if scalar is None: support = self._regions[regionIdx].get_support(self.fvar_axes) scalar = supportScalar(self.location, support) self._scalars[regionIdx] = scalar return scalar @staticmethod def interpolateFromDeltasAndScalars(deltas, scalars): delta = 0.0 for d, s in zip(deltas, scalars): if not s: continue delta += d * s return delta def __getitem__(self, varidx): major, minor = varidx >> 16, varidx & 0xFFFF if varidx == NO_VARIATION_INDEX: return 0.0 varData = self._varData scalars = [self._getScalar(ri) for ri in varData[major].VarRegionIndex] deltas = varData[major].Item[minor] return self.interpolateFromDeltasAndScalars(deltas, scalars) def interpolateFromDeltas(self, varDataIndex, deltas): varData = self._varData scalars = [self._getScalar(ri) for ri in varData[varDataIndex].VarRegionIndex] return self.interpolateFromDeltasAndScalars(deltas, scalars) # # Optimizations # # retainFirstMap - If true, major 0 mappings are retained. Deltas for unused indices are zeroed # advIdxes - Set of major 0 indices for advance deltas to be listed first. Other major 0 indices follow. def VarStore_subset_varidxes( self, varIdxes, optimize=True, retainFirstMap=False, advIdxes=set() ): # Sort out used varIdxes by major/minor. used = {} for varIdx in varIdxes: if varIdx == NO_VARIATION_INDEX: continue major = varIdx >> 16 minor = varIdx & 0xFFFF d = used.get(major) if d is None: d = used[major] = set() d.add(minor) del varIdxes # # Subset VarData # varData = self.VarData newVarData = [] varDataMap = {NO_VARIATION_INDEX: NO_VARIATION_INDEX} for major, data in enumerate(varData): usedMinors = used.get(major) if usedMinors is None: continue newMajor = len(newVarData) newVarData.append(data) items = data.Item newItems = [] if major == 0 and retainFirstMap: for minor in range(len(items)): newItems.append( items[minor] if minor in usedMinors else [0] * len(items[minor]) ) varDataMap[minor] = minor else: if major == 0: minors = sorted(advIdxes) + sorted(usedMinors - advIdxes) else: minors = sorted(usedMinors) for minor in minors: newMinor = len(newItems) newItems.append(items[minor]) varDataMap[(major << 16) + minor] = (newMajor << 16) + newMinor data.Item = newItems data.ItemCount = len(data.Item) data.calculateNumShorts(optimize=optimize) self.VarData = newVarData self.VarDataCount = len(self.VarData) self.prune_regions() return varDataMap ot.VarStore.subset_varidxes = VarStore_subset_varidxes def VarStore_prune_regions(self): """Remove unused VarRegions.""" # # Subset VarRegionList # # Collect. usedRegions = set() for data in self.VarData: usedRegions.update(data.VarRegionIndex) # Subset. regionList = self.VarRegionList regions = regionList.Region newRegions = [] regionMap = {} for i in sorted(usedRegions): regionMap[i] = len(newRegions) newRegions.append(regions[i]) regionList.Region = newRegions regionList.RegionCount = len(regionList.Region) # Map. for data in self.VarData: data.VarRegionIndex = [regionMap[i] for i in data.VarRegionIndex] ot.VarStore.prune_regions = VarStore_prune_regions def _visit(self, func): """Recurse down from self, if type of an object is ot.Device, call func() on it. Works on otData-style classes.""" if type(self) == ot.Device: func(self) elif isinstance(self, list): for that in self: _visit(that, func) elif hasattr(self, "getConverters") and not hasattr(self, "postRead"): for conv in self.getConverters(): that = getattr(self, conv.name, None) if that is not None: _visit(that, func) elif isinstance(self, ot.ValueRecord): for that in self.__dict__.values(): _visit(that, func) def _Device_recordVarIdx(self, s): """Add VarIdx in this Device table (if any) to the set s.""" if self.DeltaFormat == 0x8000: s.add((self.StartSize << 16) + self.EndSize) def Object_collect_device_varidxes(self, varidxes): adder = partial(_Device_recordVarIdx, s=varidxes) _visit(self, adder) ot.GDEF.collect_device_varidxes = Object_collect_device_varidxes ot.GPOS.collect_device_varidxes = Object_collect_device_varidxes def _Device_mapVarIdx(self, mapping, done): """Map VarIdx in this Device table (if any) through mapping.""" if id(self) in done: return done.add(id(self)) if self.DeltaFormat == 0x8000: varIdx = mapping[(self.StartSize << 16) + self.EndSize] self.StartSize = varIdx >> 16 self.EndSize = varIdx & 0xFFFF def Object_remap_device_varidxes(self, varidxes_map): mapper = partial(_Device_mapVarIdx, mapping=varidxes_map, done=set()) _visit(self, mapper) ot.GDEF.remap_device_varidxes = Object_remap_device_varidxes ot.GPOS.remap_device_varidxes = Object_remap_device_varidxes class _Encoding(object): def __init__(self, chars): self.chars = chars self.width = bit_count(chars) self.columns = self._columns(chars) self.overhead = self._characteristic_overhead(self.columns) self.items = set() def append(self, row): self.items.add(row) def extend(self, lst): self.items.update(lst) def get_room(self): """Maximum number of bytes that can be added to characteristic while still being beneficial to merge it into another one.""" count = len(self.items) return max(0, (self.overhead - 1) // count - self.width) room = property(get_room) def get_gain(self): """Maximum possible byte gain from merging this into another characteristic.""" count = len(self.items) return max(0, self.overhead - count) gain = property(get_gain) def gain_sort_key(self): return self.gain, self.chars def width_sort_key(self): return self.width, self.chars @staticmethod def _characteristic_overhead(columns): """Returns overhead in bytes of encoding this characteristic as a VarData.""" c = 4 + 6 # 4 bytes for LOffset, 6 bytes for VarData header c += bit_count(columns) * 2 return c @staticmethod def _columns(chars): cols = 0 i = 1 while chars: if chars & 0b1111: cols |= i chars >>= 4 i <<= 1 return cols def gain_from_merging(self, other_encoding): combined_chars = other_encoding.chars | self.chars combined_width = bit_count(combined_chars) combined_columns = self.columns | other_encoding.columns combined_overhead = _Encoding._characteristic_overhead(combined_columns) combined_gain = ( +self.overhead + other_encoding.overhead - combined_overhead - (combined_width - self.width) * len(self.items) - (combined_width - other_encoding.width) * len(other_encoding.items) ) return combined_gain class _EncodingDict(dict): def __missing__(self, chars): r = self[chars] = _Encoding(chars) return r def add_row(self, row): chars = self._row_characteristics(row) self[chars].append(row) @staticmethod def _row_characteristics(row): """Returns encoding characteristics for a row.""" longWords = False chars = 0 i = 1 for v in row: if v: chars += i if not (-128 <= v <= 127): chars += i * 0b0010 if not (-32768 <= v <= 32767): longWords = True break i <<= 4 if longWords: # Redo; only allow 2byte/4byte encoding chars = 0 i = 1 for v in row: if v: chars += i * 0b0011 if not (-32768 <= v <= 32767): chars += i * 0b1100 i <<= 4 return chars def VarStore_optimize(self, use_NO_VARIATION_INDEX=True, quantization=1): """Optimize storage. Returns mapping from old VarIdxes to new ones.""" # Overview: # # For each VarData row, we first extend it with zeroes to have # one column per region in VarRegionList. We then group the # rows into _Encoding objects, by their "characteristic" bitmap. # The characteristic bitmap is a binary number representing how # many bytes each column of the data takes up to encode. Each # column is encoded in four bits. For example, if a column has # only values in the range -128..127, it would only have a single # bit set in the characteristic bitmap for that column. If it has # values in the range -32768..32767, it would have two bits set. # The number of ones in the characteristic bitmap is the "width" # of the encoding. # # Each encoding as such has a number of "active" (ie. non-zero) # columns. The overhead of encoding the characteristic bitmap # is 10 bytes, plus 2 bytes per active column. # # When an encoding is merged into another one, if the characteristic # of the old encoding is a subset of the new one, then the overhead # of the old encoding is completely eliminated. However, each row # now would require more bytes to encode, to the tune of one byte # per characteristic bit that is active in the new encoding but not # in the old one. The number of bits that can be added to an encoding # while still beneficial to merge it into another encoding is called # the "room" for that encoding. # # The "gain" of an encodings is the maximum number of bytes we can # save by merging it into another encoding. The "gain" of merging # two encodings is how many bytes we save by doing so. # # High-level algorithm: # # - Each encoding has a minimal way to encode it. However, because # of the overhead of encoding the characteristic bitmap, it may # be beneficial to merge two encodings together, if there is # gain in doing so. As such, we need to search for the best # such successive merges. # # Algorithm: # # - Put all encodings into a "todo" list. # # - Sort todo list by decreasing gain (for stability). # # - Make a priority-queue of the gain from combining each two # encodings in the todo list. The priority queue is sorted by # decreasing gain. Only positive gains are included. # # - While priority queue is not empty: # - Pop the first item from the priority queue, # - Merge the two encodings it represents, # - Remove the two encodings from the todo list, # - Insert positive gains from combining the new encoding with # all existing todo list items into the priority queue, # - If a todo list item with the same characteristic bitmap as # the new encoding exists, remove it from the todo list and # merge it into the new encoding. # - Insert the new encoding into the todo list, # # - Encode all remaining items in the todo list. # # The output is then sorted for stability, in the following way: # - The VarRegionList of the input is kept intact. # - All encodings are sorted before the main algorithm, by # gain_key_sort(), which is a tuple of the following items: # * The gain of the encoding. # * The characteristic bitmap of the encoding, with higher-numbered # columns compared first. # - The VarData is sorted by width_sort_key(), which is a tuple # of the following items: # * The "width" of the encoding. # * The characteristic bitmap of the encoding, with higher-numbered # columns compared first. # - Within each VarData, the items are sorted as vectors of numbers. # # Finally, each VarData is optimized to remove the empty columns and # reorder columns as needed. # TODO # Check that no two VarRegions are the same; if they are, fold them. n = len(self.VarRegionList.Region) # Number of columns zeroes = [0] * n front_mapping = {} # Map from old VarIdxes to full row tuples encodings = _EncodingDict() # Collect all items into a set of full rows (with lots of zeroes.) for major, data in enumerate(self.VarData): regionIndices = data.VarRegionIndex for minor, item in enumerate(data.Item): row = list(zeroes) if quantization == 1: for regionIdx, v in zip(regionIndices, item): row[regionIdx] += v else: for regionIdx, v in zip(regionIndices, item): row[regionIdx] += ( round(v / quantization) * quantization ) # TODO https://github.com/fonttools/fonttools/pull/3126#discussion_r1205439785 row = tuple(row) if use_NO_VARIATION_INDEX and not any(row): front_mapping[(major << 16) + minor] = None continue encodings.add_row(row) front_mapping[(major << 16) + minor] = row # Prepare for the main algorithm. todo = sorted(encodings.values(), key=_Encoding.gain_sort_key) del encodings # Repeatedly pick two best encodings to combine, and combine them. heap = [] for i, encoding in enumerate(todo): for j in range(i + 1, len(todo)): other_encoding = todo[j] combining_gain = encoding.gain_from_merging(other_encoding) if combining_gain > 0: heappush(heap, (-combining_gain, i, j)) while heap: _, i, j = heappop(heap) if todo[i] is None or todo[j] is None: continue encoding, other_encoding = todo[i], todo[j] todo[i], todo[j] = None, None # Combine the two encodings combined_chars = other_encoding.chars | encoding.chars combined_encoding = _Encoding(combined_chars) combined_encoding.extend(encoding.items) combined_encoding.extend(other_encoding.items) for k, enc in enumerate(todo): if enc is None: continue # In the unlikely event that the same encoding exists already, # combine it. if enc.chars == combined_chars: combined_encoding.extend(enc.items) todo[k] = None continue combining_gain = combined_encoding.gain_from_merging(enc) if combining_gain > 0: heappush(heap, (-combining_gain, k, len(todo))) todo.append(combined_encoding) encodings = [encoding for encoding in todo if encoding is not None] # Assemble final store. back_mapping = {} # Mapping from full rows to new VarIdxes encodings.sort(key=_Encoding.width_sort_key) self.VarData = [] for encoding in encodings: items = sorted(encoding.items) while items: major = len(self.VarData) data = ot.VarData() self.VarData.append(data) data.VarRegionIndex = range(n) data.VarRegionCount = len(data.VarRegionIndex) # Each major can only encode up to 0xFFFF entries. data.Item, items = items[:0xFFFF], items[0xFFFF:] for minor, item in enumerate(data.Item): back_mapping[item] = (major << 16) + minor # Compile final mapping. varidx_map = {NO_VARIATION_INDEX: NO_VARIATION_INDEX} for k, v in front_mapping.items(): varidx_map[k] = back_mapping[v] if v is not None else NO_VARIATION_INDEX # Recalculate things and go home. self.VarRegionList.RegionCount = len(self.VarRegionList.Region) self.VarDataCount = len(self.VarData) for data in self.VarData: data.ItemCount = len(data.Item) data.optimize() # Remove unused regions. self.prune_regions() return varidx_map ot.VarStore.optimize = VarStore_optimize def main(args=None): """Optimize a font's GDEF variation store""" from argparse import ArgumentParser from fontTools import configLogger from fontTools.ttLib import TTFont from fontTools.ttLib.tables.otBase import OTTableWriter parser = ArgumentParser(prog="varLib.varStore", description=main.__doc__) parser.add_argument("--quantization", type=int, default=1) parser.add_argument("fontfile") parser.add_argument("outfile", nargs="?") options = parser.parse_args(args) # TODO: allow user to configure logging via command-line options configLogger(level="INFO") quantization = options.quantization fontfile = options.fontfile outfile = options.outfile font = TTFont(fontfile) gdef = font["GDEF"] store = gdef.table.VarStore writer = OTTableWriter() store.compile(writer, font) size = len(writer.getAllData()) print("Before: %7d bytes" % size) varidx_map = store.optimize(quantization=quantization) writer = OTTableWriter() store.compile(writer, font) size = len(writer.getAllData()) print("After: %7d bytes" % size) if outfile is not None: gdef.table.remap_device_varidxes(varidx_map) if "GPOS" in font: font["GPOS"].table.remap_device_varidxes(varidx_map) font.save(outfile) if __name__ == "__main__": import sys if len(sys.argv) > 1: sys.exit(main()) import doctest sys.exit(doctest.testmod().failed) PKaZZZ��Cg����&fontTools/varLib/instancer/__init__.py""" Partially instantiate a variable font. The module exports an `instantiateVariableFont` function and CLI that allow to create full instances (i.e. static fonts) from variable fonts, as well as "partial" variable fonts that only contain a subset of the original variation space. For example, if you wish to pin the width axis to a given location while also restricting the weight axis to 400..700 range, you can do:: $ fonttools varLib.instancer ./NotoSans-VF.ttf wdth=85 wght=400:700 See `fonttools varLib.instancer --help` for more info on the CLI options. The module's entry point is the `instantiateVariableFont` function, which takes a TTFont object and a dict specifying either axis coodinates or (min, max) ranges, and returns a new TTFont representing either a partial VF, or full instance if all the VF axes were given an explicit coordinate. E.g. here's how to pin the wght axis at a given location in a wght+wdth variable font, keeping only the deltas associated with the wdth axis:: | >>> from fontTools import ttLib | >>> from fontTools.varLib import instancer | >>> varfont = ttLib.TTFont("path/to/MyVariableFont.ttf") | >>> [a.axisTag for a in varfont["fvar"].axes] # the varfont's current axes | ['wght', 'wdth'] | >>> partial = instancer.instantiateVariableFont(varfont, {"wght": 300}) | >>> [a.axisTag for a in partial["fvar"].axes] # axes left after pinning 'wght' | ['wdth'] If the input location specifies all the axes, the resulting instance is no longer 'variable' (same as using fontools varLib.mutator): | >>> instance = instancer.instantiateVariableFont( | ... varfont, {"wght": 700, "wdth": 67.5} | ... ) | >>> "fvar" not in instance | True If one just want to drop an axis at the default location, without knowing in advance what the default value for that axis is, one can pass a `None` value: | >>> instance = instancer.instantiateVariableFont(varfont, {"wght": None}) | >>> len(varfont["fvar"].axes) | 1 From the console script, this is equivalent to passing `wght=drop` as input. This module is similar to fontTools.varLib.mutator, which it's intended to supersede. Note that, unlike varLib.mutator, when an axis is not mentioned in the input location, the varLib.instancer will keep the axis and the corresponding deltas, whereas mutator implicitly drops the axis at its default coordinate. The module supports all the following "levels" of instancing, which can of course be combined: L1 dropping one or more axes while leaving the default tables unmodified; | >>> font = instancer.instantiateVariableFont(varfont, {"wght": None}) L2 dropping one or more axes while pinning them at non-default locations; | >>> font = instancer.instantiateVariableFont(varfont, {"wght": 700}) L3 restricting the range of variation of one or more axes, by setting either a new minimum or maximum, potentially -- though not necessarily -- dropping entire regions of variations that fall completely outside this new range. | >>> font = instancer.instantiateVariableFont(varfont, {"wght": (100, 300)}) L4 moving the default location of an axis, by specifying (min,defalt,max) values: | >>> font = instancer.instantiateVariableFont(varfont, {"wght": (100, 300, 700)}) Currently only TrueType-flavored variable fonts (i.e. containing 'glyf' table) are supported, but support for CFF2 variable fonts will be added soon. The discussion and implementation of these features are tracked at https://github.com/fonttools/fonttools/issues/1537 """ from fontTools.misc.fixedTools import ( floatToFixedToFloat, strToFixedToFloat, otRound, ) from fontTools.varLib.models import normalizeValue, piecewiseLinearMap from fontTools.ttLib import TTFont from fontTools.ttLib.tables.TupleVariation import TupleVariation from fontTools.ttLib.tables import _g_l_y_f from fontTools import varLib # we import the `subset` module because we use the `prune_lookups` method on the GSUB # table class, and that method is only defined dynamically upon importing `subset` from fontTools import subset # noqa: F401 from fontTools.varLib import builder from fontTools.varLib.mvar import MVAR_ENTRIES from fontTools.varLib.merger import MutatorMerger from fontTools.varLib.instancer import names from .featureVars import instantiateFeatureVariations from fontTools.misc.cliTools import makeOutputFileName from fontTools.varLib.instancer import solver import collections import dataclasses from contextlib import contextmanager from copy import deepcopy from enum import IntEnum import logging import os import re from typing import Dict, Iterable, Mapping, Optional, Sequence, Tuple, Union import warnings log = logging.getLogger("fontTools.varLib.instancer") def AxisRange(minimum, maximum): warnings.warn( "AxisRange is deprecated; use AxisTriple instead", DeprecationWarning, stacklevel=2, ) return AxisTriple(minimum, None, maximum) def NormalizedAxisRange(minimum, maximum): warnings.warn( "NormalizedAxisRange is deprecated; use AxisTriple instead", DeprecationWarning, stacklevel=2, ) return NormalizedAxisTriple(minimum, None, maximum) @dataclasses.dataclass(frozen=True, order=True, repr=False) class AxisTriple(Sequence): """A triple of (min, default, max) axis values. Any of the values can be None, in which case the limitRangeAndPopulateDefaults() method can be used to fill in the missing values based on the fvar axis values. """ minimum: Optional[float] default: Optional[float] maximum: Optional[float] def __post_init__(self): if self.default is None and self.minimum == self.maximum: object.__setattr__(self, "default", self.minimum) if ( ( self.minimum is not None and self.default is not None and self.minimum > self.default ) or ( self.default is not None and self.maximum is not None and self.default > self.maximum ) or ( self.minimum is not None and self.maximum is not None and self.minimum > self.maximum ) ): raise ValueError( f"{type(self).__name__} minimum ({self.minimum}), default ({self.default}), maximum ({self.maximum}) must be in sorted order" ) def __getitem__(self, i): fields = dataclasses.fields(self) return getattr(self, fields[i].name) def __len__(self): return len(dataclasses.fields(self)) def _replace(self, **kwargs): return dataclasses.replace(self, **kwargs) def __repr__(self): return ( f"({', '.join(format(v, 'g') if v is not None else 'None' for v in self)})" ) @classmethod def expand( cls, v: Union[ "AxisTriple", float, # pin axis at single value, same as min==default==max Tuple[float, float], # (min, max), restrict axis and keep default Tuple[float, float, float], # (min, default, max) ], ) -> "AxisTriple": """Convert a single value or a tuple into an AxisTriple. If the input is a single value, it is interpreted as a pin at that value. If the input is a tuple, it is interpreted as (min, max) or (min, default, max). """ if isinstance(v, cls): return v if isinstance(v, (int, float)): return cls(v, v, v) try: n = len(v) except TypeError as e: raise ValueError( f"expected float, 2- or 3-tuple of floats; got {type(v)}: {v!r}" ) from e default = None if n == 2: minimum, maximum = v elif n >= 3: return cls(*v) else: raise ValueError(f"expected sequence of 2 or 3; got {n}: {v!r}") return cls(minimum, default, maximum) def limitRangeAndPopulateDefaults(self, fvarTriple) -> "AxisTriple": """Return a new AxisTriple with the default value filled in. Set default to fvar axis default if the latter is within the min/max range, otherwise set default to the min or max value, whichever is closer to the fvar axis default. If the default value is already set, return self. """ minimum = self.minimum if minimum is None: minimum = fvarTriple[0] default = self.default if default is None: default = fvarTriple[1] maximum = self.maximum if maximum is None: maximum = fvarTriple[2] minimum = max(minimum, fvarTriple[0]) maximum = max(maximum, fvarTriple[0]) minimum = min(minimum, fvarTriple[2]) maximum = min(maximum, fvarTriple[2]) default = max(minimum, min(maximum, default)) return AxisTriple(minimum, default, maximum) @dataclasses.dataclass(frozen=True, order=True, repr=False) class NormalizedAxisTriple(AxisTriple): """A triple of (min, default, max) normalized axis values.""" minimum: float default: float maximum: float def __post_init__(self): if self.default is None: object.__setattr__(self, "default", max(self.minimum, min(self.maximum, 0))) if not (-1.0 <= self.minimum <= self.default <= self.maximum <= 1.0): raise ValueError( "Normalized axis values not in -1..+1 range; got " f"minimum={self.minimum:g}, default={self.default:g}, maximum={self.maximum:g})" ) @dataclasses.dataclass(frozen=True, order=True, repr=False) class NormalizedAxisTripleAndDistances(AxisTriple): """A triple of (min, default, max) normalized axis values, with distances between min and default, and default and max, in the *pre-normalized* space.""" minimum: float default: float maximum: float distanceNegative: Optional[float] = 1 distancePositive: Optional[float] = 1 def __post_init__(self): if self.default is None: object.__setattr__(self, "default", max(self.minimum, min(self.maximum, 0))) if not (-1.0 <= self.minimum <= self.default <= self.maximum <= 1.0): raise ValueError( "Normalized axis values not in -1..+1 range; got " f"minimum={self.minimum:g}, default={self.default:g}, maximum={self.maximum:g})" ) def reverse_negate(self): v = self return self.__class__(-v[2], -v[1], -v[0], v[4], v[3]) def renormalizeValue(self, v, extrapolate=True): """Renormalizes a normalized value v to the range of this axis, considering the pre-normalized distances as well as the new axis limits.""" lower, default, upper, distanceNegative, distancePositive = self assert lower <= default <= upper if not extrapolate: v = max(lower, min(upper, v)) if v == default: return 0 if default < 0: return -self.reverse_negate().renormalizeValue(-v, extrapolate=extrapolate) # default >= 0 and v != default if v > default: return (v - default) / (upper - default) # v < default if lower >= 0: return (v - default) / (default - lower) # lower < 0 and v < default totalDistance = distanceNegative * -lower + distancePositive * default if v >= 0: vDistance = (default - v) * distancePositive else: vDistance = -v * distanceNegative + distancePositive * default return -vDistance / totalDistance class _BaseAxisLimits(Mapping[str, AxisTriple]): def __getitem__(self, key: str) -> AxisTriple: return self._data[key] def __iter__(self) -> Iterable[str]: return iter(self._data) def __len__(self) -> int: return len(self._data) def __repr__(self) -> str: return f"{type(self).__name__}({self._data!r})" def __str__(self) -> str: return str(self._data) def defaultLocation(self) -> Dict[str, float]: """Return a dict of default axis values.""" return {k: v.default for k, v in self.items()} def pinnedLocation(self) -> Dict[str, float]: """Return a location dict with only the pinned axes.""" return {k: v.default for k, v in self.items() if v.minimum == v.maximum} class AxisLimits(_BaseAxisLimits): """Maps axis tags (str) to AxisTriple values.""" def __init__(self, *args, **kwargs): self._data = data = {} for k, v in dict(*args, **kwargs).items(): if v is None: # will be filled in by limitAxesAndPopulateDefaults data[k] = v else: try: triple = AxisTriple.expand(v) except ValueError as e: raise ValueError(f"Invalid axis limits for {k!r}: {v!r}") from e data[k] = triple def limitAxesAndPopulateDefaults(self, varfont) -> "AxisLimits": """Return a new AxisLimits with defaults filled in from fvar table. If all axis limits already have defaults, return self. """ fvar = varfont["fvar"] fvarTriples = { a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in fvar.axes } newLimits = {} for axisTag, triple in self.items(): fvarTriple = fvarTriples[axisTag] default = fvarTriple[1] if triple is None: newLimits[axisTag] = AxisTriple(default, default, default) else: newLimits[axisTag] = triple.limitRangeAndPopulateDefaults(fvarTriple) return type(self)(newLimits) def normalize(self, varfont, usingAvar=True) -> "NormalizedAxisLimits": """Return a new NormalizedAxisLimits with normalized -1..0..+1 values. If usingAvar is True, the avar table is used to warp the default normalization. """ fvar = varfont["fvar"] badLimits = set(self.keys()).difference(a.axisTag for a in fvar.axes) if badLimits: raise ValueError("Cannot limit: {} not present in fvar".format(badLimits)) axes = { a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in fvar.axes if a.axisTag in self } avarSegments = {} if usingAvar and "avar" in varfont: avarSegments = varfont["avar"].segments normalizedLimits = {} for axis_tag, triple in axes.items(): distanceNegative = triple[1] - triple[0] distancePositive = triple[2] - triple[1] if self[axis_tag] is None: normalizedLimits[axis_tag] = NormalizedAxisTripleAndDistances( 0, 0, 0, distanceNegative, distancePositive ) continue minV, defaultV, maxV = self[axis_tag] if defaultV is None: defaultV = triple[1] avarMapping = avarSegments.get(axis_tag, None) normalizedLimits[axis_tag] = NormalizedAxisTripleAndDistances( *(normalize(v, triple, avarMapping) for v in (minV, defaultV, maxV)), distanceNegative, distancePositive, ) return NormalizedAxisLimits(normalizedLimits) class NormalizedAxisLimits(_BaseAxisLimits): """Maps axis tags (str) to NormalizedAxisTriple values.""" def __init__(self, *args, **kwargs): self._data = data = {} for k, v in dict(*args, **kwargs).items(): try: triple = NormalizedAxisTripleAndDistances.expand(v) except ValueError as e: raise ValueError(f"Invalid axis limits for {k!r}: {v!r}") from e data[k] = triple class OverlapMode(IntEnum): KEEP_AND_DONT_SET_FLAGS = 0 KEEP_AND_SET_FLAGS = 1 REMOVE = 2 REMOVE_AND_IGNORE_ERRORS = 3 def instantiateTupleVariationStore( variations, axisLimits, origCoords=None, endPts=None ): """Instantiate TupleVariation list at the given location, or limit axes' min/max. The 'variations' list of TupleVariation objects is modified in-place. The 'axisLimits' (dict) maps axis tags (str) to NormalizedAxisTriple namedtuples specifying (minimum, default, maximum) in the -1,0,+1 normalized space. Pinned axes have minimum == default == maximum. A 'full' instance (i.e. static font) is produced when all the axes are pinned to single coordinates; a 'partial' instance (i.e. a less variable font) is produced when some of the axes are omitted, or restricted with a new range. Tuples that do not participate are kept as they are. Those that have 0 influence at the given location are removed from the variation store. Those that are fully instantiated (i.e. all their axes are being pinned) are also removed from the variation store, their scaled deltas accummulated and returned, so that they can be added by the caller to the default instance's coordinates. Tuples that are only partially instantiated (i.e. not all the axes that they participate in are being pinned) are kept in the store, and their deltas multiplied by the scalar support of the axes to be pinned at the desired location. Args: variations: List[TupleVariation] from either 'gvar' or 'cvar'. axisLimits: NormalizedAxisLimits: map from axis tags to (min, default, max) normalized coordinates for the full or partial instance. origCoords: GlyphCoordinates: default instance's coordinates for computing 'gvar' inferred points (cf. table__g_l_y_f._getCoordinatesAndControls). endPts: List[int]: indices of contour end points, for inferring 'gvar' deltas. Returns: List[float]: the overall delta adjustment after applicable deltas were summed. """ newVariations = changeTupleVariationsAxisLimits(variations, axisLimits) mergedVariations = collections.OrderedDict() for var in newVariations: # compute inferred deltas only for gvar ('origCoords' is None for cvar) if origCoords is not None: var.calcInferredDeltas(origCoords, endPts) # merge TupleVariations with overlapping "tents" axes = frozenset(var.axes.items()) if axes in mergedVariations: mergedVariations[axes] += var else: mergedVariations[axes] = var # drop TupleVariation if all axes have been pinned (var.axes.items() is empty); # its deltas will be added to the default instance's coordinates defaultVar = mergedVariations.pop(frozenset(), None) for var in mergedVariations.values(): var.roundDeltas() variations[:] = list(mergedVariations.values()) return defaultVar.coordinates if defaultVar is not None else [] def changeTupleVariationsAxisLimits(variations, axisLimits): for axisTag, axisLimit in sorted(axisLimits.items()): newVariations = [] for var in variations: newVariations.extend(changeTupleVariationAxisLimit(var, axisTag, axisLimit)) variations = newVariations return variations def changeTupleVariationAxisLimit(var, axisTag, axisLimit): assert isinstance(axisLimit, NormalizedAxisTripleAndDistances) # Skip when current axis is missing or peaks at 0 (i.e. doesn't participate) lower, peak, upper = var.axes.get(axisTag, (-1, 0, 1)) if peak == 0: # explicitly defined, no-op axes can be omitted # https://github.com/fonttools/fonttools/issues/3453 if axisTag in var.axes: del var.axes[axisTag] return [var] # Drop if the var 'tent' isn't well-formed if not (lower <= peak <= upper) or (lower < 0 and upper > 0): return [] if axisTag not in var.axes: return [var] tent = var.axes[axisTag] solutions = solver.rebaseTent(tent, axisLimit) out = [] for scalar, tent in solutions: newVar = ( TupleVariation(var.axes, var.coordinates) if len(solutions) > 1 else var ) if tent is None: newVar.axes.pop(axisTag) else: assert tent[1] != 0, tent newVar.axes[axisTag] = tent newVar *= scalar out.append(newVar) return out def _instantiateGvarGlyph( glyphname, glyf, gvar, hMetrics, vMetrics, axisLimits, optimize=True ): coordinates, ctrl = glyf._getCoordinatesAndControls(glyphname, hMetrics, vMetrics) endPts = ctrl.endPts # Not every glyph may have variations tupleVarStore = gvar.variations.get(glyphname) if tupleVarStore: defaultDeltas = instantiateTupleVariationStore( tupleVarStore, axisLimits, coordinates, endPts ) if defaultDeltas: coordinates += _g_l_y_f.GlyphCoordinates(defaultDeltas) glyph = glyf[glyphname] if glyph.isVarComposite(): for component in glyph.components: newLocation = {} for tag, loc in component.location.items(): if tag not in axisLimits: newLocation[tag] = loc continue if component.flags & _g_l_y_f.VarComponentFlags.AXES_HAVE_VARIATION: raise NotImplementedError( "Instancing accross VarComposite axes with variation is not supported." ) limits = axisLimits[tag] loc = limits.renormalizeValue(loc, extrapolate=False) newLocation[tag] = loc component.location = newLocation # _setCoordinates also sets the hmtx/vmtx advance widths and sidebearings from # the four phantom points and glyph bounding boxes. # We call it unconditionally even if a glyph has no variations or no deltas are # applied at this location, in case the glyph's xMin and in turn its sidebearing # have changed. E.g. a composite glyph has no deltas for the component's (x, y) # offset nor for the 4 phantom points (e.g. it's monospaced). Thus its entry in # gvar table is empty; however, the composite's base glyph may have deltas # applied, hence the composite's bbox and left/top sidebearings may need updating # in the instanced font. glyf._setCoordinates(glyphname, coordinates, hMetrics, vMetrics) if not tupleVarStore: if glyphname in gvar.variations: del gvar.variations[glyphname] return if optimize: isComposite = glyf[glyphname].isComposite() for var in tupleVarStore: var.optimize(coordinates, endPts, isComposite=isComposite) def instantiateGvarGlyph(varfont, glyphname, axisLimits, optimize=True): """Remove? https://github.com/fonttools/fonttools/pull/2266""" gvar = varfont["gvar"] glyf = varfont["glyf"] hMetrics = varfont["hmtx"].metrics vMetrics = getattr(varfont.get("vmtx"), "metrics", None) _instantiateGvarGlyph( glyphname, glyf, gvar, hMetrics, vMetrics, axisLimits, optimize=optimize ) def instantiateGvar(varfont, axisLimits, optimize=True): log.info("Instantiating glyf/gvar tables") gvar = varfont["gvar"] glyf = varfont["glyf"] hMetrics = varfont["hmtx"].metrics vMetrics = getattr(varfont.get("vmtx"), "metrics", None) # Get list of glyph names sorted by component depth. # If a composite glyph is processed before its base glyph, the bounds may # be calculated incorrectly because deltas haven't been applied to the # base glyph yet. glyphnames = sorted( glyf.glyphOrder, key=lambda name: ( ( glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth if glyf[name].isComposite() or glyf[name].isVarComposite() else 0 ), name, ), ) for glyphname in glyphnames: _instantiateGvarGlyph( glyphname, glyf, gvar, hMetrics, vMetrics, axisLimits, optimize=optimize ) if not gvar.variations: del varfont["gvar"] def setCvarDeltas(cvt, deltas): for i, delta in enumerate(deltas): if delta: cvt[i] += otRound(delta) def instantiateCvar(varfont, axisLimits): log.info("Instantiating cvt/cvar tables") cvar = varfont["cvar"] defaultDeltas = instantiateTupleVariationStore(cvar.variations, axisLimits) if defaultDeltas: setCvarDeltas(varfont["cvt "], defaultDeltas) if not cvar.variations: del varfont["cvar"] def setMvarDeltas(varfont, deltas): mvar = varfont["MVAR"].table records = mvar.ValueRecord for rec in records: mvarTag = rec.ValueTag if mvarTag not in MVAR_ENTRIES: continue tableTag, itemName = MVAR_ENTRIES[mvarTag] delta = deltas[rec.VarIdx] if delta != 0: setattr( varfont[tableTag], itemName, getattr(varfont[tableTag], itemName) + otRound(delta), ) @contextmanager def verticalMetricsKeptInSync(varfont): """Ensure hhea vertical metrics stay in sync with OS/2 ones after instancing. When applying MVAR deltas to the OS/2 table, if the ascender, descender and line gap change but they were the same as the respective hhea metrics in the original font, this context manager ensures that hhea metrcs also get updated accordingly. The MVAR spec only has tags for the OS/2 metrics, but it is common in fonts to have the hhea metrics be equal to those for compat reasons. https://learn.microsoft.com/en-us/typography/opentype/spec/mvar https://googlefonts.github.io/gf-guide/metrics.html#7-hhea-and-typo-metrics-should-be-equal https://github.com/fonttools/fonttools/issues/3297 """ current_os2_vmetrics = [ getattr(varfont["OS/2"], attr) for attr in ("sTypoAscender", "sTypoDescender", "sTypoLineGap") ] metrics_are_synced = current_os2_vmetrics == [ getattr(varfont["hhea"], attr) for attr in ("ascender", "descender", "lineGap") ] yield metrics_are_synced if metrics_are_synced: new_os2_vmetrics = [ getattr(varfont["OS/2"], attr) for attr in ("sTypoAscender", "sTypoDescender", "sTypoLineGap") ] if current_os2_vmetrics != new_os2_vmetrics: for attr, value in zip( ("ascender", "descender", "lineGap"), new_os2_vmetrics ): setattr(varfont["hhea"], attr, value) def instantiateMVAR(varfont, axisLimits): log.info("Instantiating MVAR table") mvar = varfont["MVAR"].table fvarAxes = varfont["fvar"].axes varStore = mvar.VarStore defaultDeltas = instantiateItemVariationStore(varStore, fvarAxes, axisLimits) with verticalMetricsKeptInSync(varfont): setMvarDeltas(varfont, defaultDeltas) if varStore.VarRegionList.Region: varIndexMapping = varStore.optimize() for rec in mvar.ValueRecord: rec.VarIdx = varIndexMapping[rec.VarIdx] else: del varfont["MVAR"] def _remapVarIdxMap(table, attrName, varIndexMapping, glyphOrder): oldMapping = getattr(table, attrName).mapping newMapping = [varIndexMapping[oldMapping[glyphName]] for glyphName in glyphOrder] setattr(table, attrName, builder.buildVarIdxMap(newMapping, glyphOrder)) # TODO(anthrotype) Add support for HVAR/VVAR in CFF2 def _instantiateVHVAR(varfont, axisLimits, tableFields): location = axisLimits.pinnedLocation() tableTag = tableFields.tableTag fvarAxes = varfont["fvar"].axes # Deltas from gvar table have already been applied to the hmtx/vmtx. For full # instances (i.e. all axes pinned), we can simply drop HVAR/VVAR and return if set(location).issuperset(axis.axisTag for axis in fvarAxes): log.info("Dropping %s table", tableTag) del varfont[tableTag] return log.info("Instantiating %s table", tableTag) vhvar = varfont[tableTag].table varStore = vhvar.VarStore # since deltas were already applied, the return value here is ignored instantiateItemVariationStore(varStore, fvarAxes, axisLimits) if varStore.VarRegionList.Region: # Only re-optimize VarStore if the HVAR/VVAR already uses indirect AdvWidthMap # or AdvHeightMap. If a direct, implicit glyphID->VariationIndex mapping is # used for advances, skip re-optimizing and maintain original VariationIndex. if getattr(vhvar, tableFields.advMapping): varIndexMapping = varStore.optimize(use_NO_VARIATION_INDEX=False) glyphOrder = varfont.getGlyphOrder() _remapVarIdxMap(vhvar, tableFields.advMapping, varIndexMapping, glyphOrder) if getattr(vhvar, tableFields.sb1): # left or top sidebearings _remapVarIdxMap(vhvar, tableFields.sb1, varIndexMapping, glyphOrder) if getattr(vhvar, tableFields.sb2): # right or bottom sidebearings _remapVarIdxMap(vhvar, tableFields.sb2, varIndexMapping, glyphOrder) if tableTag == "VVAR" and getattr(vhvar, tableFields.vOrigMapping): _remapVarIdxMap( vhvar, tableFields.vOrigMapping, varIndexMapping, glyphOrder ) def instantiateHVAR(varfont, axisLimits): return _instantiateVHVAR(varfont, axisLimits, varLib.HVAR_FIELDS) def instantiateVVAR(varfont, axisLimits): return _instantiateVHVAR(varfont, axisLimits, varLib.VVAR_FIELDS) class _TupleVarStoreAdapter(object): def __init__(self, regions, axisOrder, tupleVarData, itemCounts): self.regions = regions self.axisOrder = axisOrder self.tupleVarData = tupleVarData self.itemCounts = itemCounts @classmethod def fromItemVarStore(cls, itemVarStore, fvarAxes): axisOrder = [axis.axisTag for axis in fvarAxes] regions = [ region.get_support(fvarAxes) for region in itemVarStore.VarRegionList.Region ] tupleVarData = [] itemCounts = [] for varData in itemVarStore.VarData: variations = [] varDataRegions = (regions[i] for i in varData.VarRegionIndex) for axes, coordinates in zip(varDataRegions, zip(*varData.Item)): variations.append(TupleVariation(axes, list(coordinates))) tupleVarData.append(variations) itemCounts.append(varData.ItemCount) return cls(regions, axisOrder, tupleVarData, itemCounts) def rebuildRegions(self): # Collect the set of all unique region axes from the current TupleVariations. # We use an OrderedDict to de-duplicate regions while keeping the order. uniqueRegions = collections.OrderedDict.fromkeys( ( frozenset(var.axes.items()) for variations in self.tupleVarData for var in variations ) ) # Maintain the original order for the regions that pre-existed, appending # the new regions at the end of the region list. newRegions = [] for region in self.regions: regionAxes = frozenset(region.items()) if regionAxes in uniqueRegions: newRegions.append(region) del uniqueRegions[regionAxes] if uniqueRegions: newRegions.extend(dict(region) for region in uniqueRegions) self.regions = newRegions def instantiate(self, axisLimits): defaultDeltaArray = [] for variations, itemCount in zip(self.tupleVarData, self.itemCounts): defaultDeltas = instantiateTupleVariationStore(variations, axisLimits) if not defaultDeltas: defaultDeltas = [0] * itemCount defaultDeltaArray.append(defaultDeltas) # rebuild regions whose axes were dropped or limited self.rebuildRegions() pinnedAxes = set(axisLimits.pinnedLocation()) self.axisOrder = [ axisTag for axisTag in self.axisOrder if axisTag not in pinnedAxes ] return defaultDeltaArray def asItemVarStore(self): regionOrder = [frozenset(axes.items()) for axes in self.regions] varDatas = [] for variations, itemCount in zip(self.tupleVarData, self.itemCounts): if variations: assert len(variations[0].coordinates) == itemCount varRegionIndices = [ regionOrder.index(frozenset(var.axes.items())) for var in variations ] varDataItems = list(zip(*(var.coordinates for var in variations))) varDatas.append( builder.buildVarData(varRegionIndices, varDataItems, optimize=False) ) else: varDatas.append( builder.buildVarData([], [[] for _ in range(itemCount)]) ) regionList = builder.buildVarRegionList(self.regions, self.axisOrder) itemVarStore = builder.buildVarStore(regionList, varDatas) # remove unused regions from VarRegionList itemVarStore.prune_regions() return itemVarStore def instantiateItemVariationStore(itemVarStore, fvarAxes, axisLimits): """Compute deltas at partial location, and update varStore in-place. Remove regions in which all axes were instanced, or fall outside the new axis limits. Scale the deltas of the remaining regions where only some of the axes were instanced. The number of VarData subtables, and the number of items within each, are not modified, in order to keep the existing VariationIndex valid. One may call VarStore.optimize() method after this to further optimize those. Args: varStore: An otTables.VarStore object (Item Variation Store) fvarAxes: list of fvar's Axis objects axisLimits: NormalizedAxisLimits: mapping axis tags to normalized min/default/max axis coordinates. May not specify coordinates/ranges for all the fvar axes. Returns: defaultDeltas: to be added to the default instance, of type dict of floats keyed by VariationIndex compound values: i.e. (outer << 16) + inner. """ tupleVarStore = _TupleVarStoreAdapter.fromItemVarStore(itemVarStore, fvarAxes) defaultDeltaArray = tupleVarStore.instantiate(axisLimits) newItemVarStore = tupleVarStore.asItemVarStore() itemVarStore.VarRegionList = newItemVarStore.VarRegionList assert itemVarStore.VarDataCount == newItemVarStore.VarDataCount itemVarStore.VarData = newItemVarStore.VarData defaultDeltas = { ((major << 16) + minor): delta for major, deltas in enumerate(defaultDeltaArray) for minor, delta in enumerate(deltas) } defaultDeltas[itemVarStore.NO_VARIATION_INDEX] = 0 return defaultDeltas def instantiateOTL(varfont, axisLimits): # TODO(anthrotype) Support partial instancing of JSTF and BASE tables if ( "GDEF" not in varfont or varfont["GDEF"].table.Version < 0x00010003 or not varfont["GDEF"].table.VarStore ): return if "GPOS" in varfont: msg = "Instantiating GDEF and GPOS tables" else: msg = "Instantiating GDEF table" log.info(msg) gdef = varfont["GDEF"].table varStore = gdef.VarStore fvarAxes = varfont["fvar"].axes defaultDeltas = instantiateItemVariationStore(varStore, fvarAxes, axisLimits) # When VF are built, big lookups may overflow and be broken into multiple # subtables. MutatorMerger (which inherits from AligningMerger) reattaches # them upon instancing, in case they can now fit a single subtable (if not, # they will be split again upon compilation). # This 'merger' also works as a 'visitor' that traverses the OTL tables and # calls specific methods when instances of a given type are found. # Specifically, it adds default deltas to GPOS Anchors/ValueRecords and GDEF # LigatureCarets, and optionally deletes all VariationIndex tables if the # VarStore is fully instanced. merger = MutatorMerger( varfont, defaultDeltas, deleteVariations=(not varStore.VarRegionList.Region) ) merger.mergeTables(varfont, [varfont], ["GDEF", "GPOS"]) if varStore.VarRegionList.Region: varIndexMapping = varStore.optimize() gdef.remap_device_varidxes(varIndexMapping) if "GPOS" in varfont: varfont["GPOS"].table.remap_device_varidxes(varIndexMapping) else: # Downgrade GDEF. del gdef.VarStore gdef.Version = 0x00010002 if gdef.MarkGlyphSetsDef is None: del gdef.MarkGlyphSetsDef gdef.Version = 0x00010000 if not ( gdef.LigCaretList or gdef.MarkAttachClassDef or gdef.GlyphClassDef or gdef.AttachList or (gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef) ): del varfont["GDEF"] def _isValidAvarSegmentMap(axisTag, segmentMap): if not segmentMap: return True if not {(-1.0, -1.0), (0, 0), (1.0, 1.0)}.issubset(segmentMap.items()): log.warning( f"Invalid avar SegmentMap record for axis '{axisTag}': does not " "include all required value maps {-1.0: -1.0, 0: 0, 1.0: 1.0}" ) return False previousValue = None for fromCoord, toCoord in sorted(segmentMap.items()): if previousValue is not None and previousValue > toCoord: log.warning( f"Invalid avar AxisValueMap({fromCoord}, {toCoord}) record " f"for axis '{axisTag}': the toCoordinate value must be >= to " f"the toCoordinate value of the preceding record ({previousValue})." ) return False previousValue = toCoord return True def instantiateAvar(varfont, axisLimits): # 'axisLimits' dict must contain user-space (non-normalized) coordinates. segments = varfont["avar"].segments # drop table if we instantiate all the axes pinnedAxes = set(axisLimits.pinnedLocation()) if pinnedAxes.issuperset(segments): log.info("Dropping avar table") del varfont["avar"] return log.info("Instantiating avar table") for axis in pinnedAxes: if axis in segments: del segments[axis] # First compute the default normalization for axisLimits coordinates: i.e. # min = -1.0, default = 0, max = +1.0, and in between values interpolated linearly, # without using the avar table's mappings. # Then, for each SegmentMap, if we are restricting its axis, compute the new # mappings by dividing the key/value pairs by the desired new min/max values, # dropping any mappings that fall outside the restricted range. # The keys ('fromCoord') are specified in default normalized coordinate space, # whereas the values ('toCoord') are "mapped forward" using the SegmentMap. normalizedRanges = axisLimits.normalize(varfont, usingAvar=False) newSegments = {} for axisTag, mapping in segments.items(): if not _isValidAvarSegmentMap(axisTag, mapping): continue if mapping and axisTag in normalizedRanges: axisRange = normalizedRanges[axisTag] mappedMin = floatToFixedToFloat( piecewiseLinearMap(axisRange.minimum, mapping), 14 ) mappedDef = floatToFixedToFloat( piecewiseLinearMap(axisRange.default, mapping), 14 ) mappedMax = floatToFixedToFloat( piecewiseLinearMap(axisRange.maximum, mapping), 14 ) mappedAxisLimit = NormalizedAxisTripleAndDistances( mappedMin, mappedDef, mappedMax, axisRange.distanceNegative, axisRange.distancePositive, ) newMapping = {} for fromCoord, toCoord in mapping.items(): if fromCoord < axisRange.minimum or fromCoord > axisRange.maximum: continue fromCoord = axisRange.renormalizeValue(fromCoord) assert mappedMin <= toCoord <= mappedMax toCoord = mappedAxisLimit.renormalizeValue(toCoord) fromCoord = floatToFixedToFloat(fromCoord, 14) toCoord = floatToFixedToFloat(toCoord, 14) newMapping[fromCoord] = toCoord newMapping.update({-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}) newSegments[axisTag] = newMapping else: newSegments[axisTag] = mapping varfont["avar"].segments = newSegments def isInstanceWithinAxisRanges(location, axisRanges): for axisTag, coord in location.items(): if axisTag in axisRanges: axisRange = axisRanges[axisTag] if coord < axisRange.minimum or coord > axisRange.maximum: return False return True def instantiateFvar(varfont, axisLimits): # 'axisLimits' dict must contain user-space (non-normalized) coordinates location = axisLimits.pinnedLocation() fvar = varfont["fvar"] # drop table if we instantiate all the axes if set(location).issuperset(axis.axisTag for axis in fvar.axes): log.info("Dropping fvar table") del varfont["fvar"] return log.info("Instantiating fvar table") axes = [] for axis in fvar.axes: axisTag = axis.axisTag if axisTag in location: continue if axisTag in axisLimits: triple = axisLimits[axisTag] if triple.default is None: triple = (triple.minimum, axis.defaultValue, triple.maximum) axis.minValue, axis.defaultValue, axis.maxValue = triple axes.append(axis) fvar.axes = axes # only keep NamedInstances whose coordinates == pinned axis location instances = [] for instance in fvar.instances: if any(instance.coordinates[axis] != value for axis, value in location.items()): continue for axisTag in location: del instance.coordinates[axisTag] if not isInstanceWithinAxisRanges(instance.coordinates, axisLimits): continue instances.append(instance) fvar.instances = instances def instantiateSTAT(varfont, axisLimits): # 'axisLimits' dict must contain user-space (non-normalized) coordinates stat = varfont["STAT"].table if not stat.DesignAxisRecord or not ( stat.AxisValueArray and stat.AxisValueArray.AxisValue ): return # STAT table empty, nothing to do log.info("Instantiating STAT table") newAxisValueTables = axisValuesFromAxisLimits(stat, axisLimits) stat.AxisValueCount = len(newAxisValueTables) if stat.AxisValueCount: stat.AxisValueArray.AxisValue = newAxisValueTables else: stat.AxisValueArray = None def axisValuesFromAxisLimits(stat, axisLimits): def isAxisValueOutsideLimits(axisTag, axisValue): if axisTag in axisLimits: triple = axisLimits[axisTag] if axisValue < triple.minimum or axisValue > triple.maximum: return True return False # only keep AxisValues whose axis is not pinned nor restricted, or is pinned at the # exact (nominal) value, or is restricted but the value is within the new range designAxes = stat.DesignAxisRecord.Axis newAxisValueTables = [] for axisValueTable in stat.AxisValueArray.AxisValue: axisValueFormat = axisValueTable.Format if axisValueFormat in (1, 2, 3): axisTag = designAxes[axisValueTable.AxisIndex].AxisTag if axisValueFormat == 2: axisValue = axisValueTable.NominalValue else: axisValue = axisValueTable.Value if isAxisValueOutsideLimits(axisTag, axisValue): continue elif axisValueFormat == 4: # drop 'non-analytic' AxisValue if _any_ AxisValueRecord doesn't match # the pinned location or is outside range dropAxisValueTable = False for rec in axisValueTable.AxisValueRecord: axisTag = designAxes[rec.AxisIndex].AxisTag axisValue = rec.Value if isAxisValueOutsideLimits(axisTag, axisValue): dropAxisValueTable = True break if dropAxisValueTable: continue else: log.warning("Unknown AxisValue table format (%s); ignored", axisValueFormat) newAxisValueTables.append(axisValueTable) return newAxisValueTables def setMacOverlapFlags(glyfTable): flagOverlapCompound = _g_l_y_f.OVERLAP_COMPOUND flagOverlapSimple = _g_l_y_f.flagOverlapSimple for glyphName in glyfTable.keys(): glyph = glyfTable[glyphName] # Set OVERLAP_COMPOUND bit for compound glyphs if glyph.isComposite(): glyph.components[0].flags |= flagOverlapCompound # Set OVERLAP_SIMPLE bit for simple glyphs elif glyph.numberOfContours > 0: glyph.flags[0] |= flagOverlapSimple def normalize(value, triple, avarMapping): value = normalizeValue(value, triple) if avarMapping: value = piecewiseLinearMap(value, avarMapping) # Quantize to F2Dot14, to avoid surprise interpolations. return floatToFixedToFloat(value, 14) def sanityCheckVariableTables(varfont): if "fvar" not in varfont: raise ValueError("Missing required table fvar") if "gvar" in varfont: if "glyf" not in varfont: raise ValueError("Can't have gvar without glyf") # TODO(anthrotype) Remove once we do support partial instancing CFF2 if "CFF2" in varfont: raise NotImplementedError("Instancing CFF2 variable fonts is not supported yet") def instantiateVariableFont( varfont, axisLimits, inplace=False, optimize=True, overlap=OverlapMode.KEEP_AND_SET_FLAGS, updateFontNames=False, ): """Instantiate variable font, either fully or partially. Depending on whether the `axisLimits` dictionary references all or some of the input varfont's axes, the output font will either be a full instance (static font) or a variable font with possibly less variation data. Args: varfont: a TTFont instance, which must contain at least an 'fvar' table. Note that variable fonts with 'CFF2' table are not supported yet. axisLimits: a dict keyed by axis tags (str) containing the coordinates (float) along one or more axes where the desired instance will be located. If the value is `None`, the default coordinate as per 'fvar' table for that axis is used. The limit values can also be (min, max) tuples for restricting an axis's variation range. The default axis value must be included in the new range. inplace (bool): whether to modify input TTFont object in-place instead of returning a distinct object. optimize (bool): if False, do not perform IUP-delta optimization on the remaining 'gvar' table's deltas. Possibly faster, and might work around rendering issues in some buggy environments, at the cost of a slightly larger file size. overlap (OverlapMode): variable fonts usually contain overlapping contours, and some font rendering engines on Apple platforms require that the `OVERLAP_SIMPLE` and `OVERLAP_COMPOUND` flags in the 'glyf' table be set to force rendering using a non-zero fill rule. Thus we always set these flags on all glyphs to maximise cross-compatibility of the generated instance. You can disable this by passing OverlapMode.KEEP_AND_DONT_SET_FLAGS. If you want to remove the overlaps altogether and merge overlapping contours and components, you can pass OverlapMode.REMOVE (or REMOVE_AND_IGNORE_ERRORS to not hard-fail on tricky glyphs). Note that this requires the skia-pathops package (available to pip install). The overlap parameter only has effect when generating full static instances. updateFontNames (bool): if True, update the instantiated font's name table using the Axis Value Tables from the STAT table. The name table and the style bits in the head and OS/2 table will be updated so they conform to the R/I/B/BI model. If the STAT table is missing or an Axis Value table is missing for a given axis coordinate, a ValueError will be raised. """ # 'overlap' used to be bool and is now enum; for backward compat keep accepting bool overlap = OverlapMode(int(overlap)) sanityCheckVariableTables(varfont) axisLimits = AxisLimits(axisLimits).limitAxesAndPopulateDefaults(varfont) log.info("Restricted limits: %s", axisLimits) normalizedLimits = axisLimits.normalize(varfont) log.info("Normalized limits: %s", normalizedLimits) if not inplace: varfont = deepcopy(varfont) if "DSIG" in varfont: del varfont["DSIG"] if updateFontNames: log.info("Updating name table") names.updateNameTable(varfont, axisLimits) if "gvar" in varfont: instantiateGvar(varfont, normalizedLimits, optimize=optimize) if "cvar" in varfont: instantiateCvar(varfont, normalizedLimits) if "MVAR" in varfont: instantiateMVAR(varfont, normalizedLimits) if "HVAR" in varfont: instantiateHVAR(varfont, normalizedLimits) if "VVAR" in varfont: instantiateVVAR(varfont, normalizedLimits) instantiateOTL(varfont, normalizedLimits) instantiateFeatureVariations(varfont, normalizedLimits) if "avar" in varfont: instantiateAvar(varfont, axisLimits) with names.pruningUnusedNames(varfont): if "STAT" in varfont: instantiateSTAT(varfont, axisLimits) instantiateFvar(varfont, axisLimits) if "fvar" not in varfont: if "glyf" in varfont: if overlap == OverlapMode.KEEP_AND_SET_FLAGS: setMacOverlapFlags(varfont["glyf"]) elif overlap in (OverlapMode.REMOVE, OverlapMode.REMOVE_AND_IGNORE_ERRORS): from fontTools.ttLib.removeOverlaps import removeOverlaps log.info("Removing overlaps from glyf table") removeOverlaps( varfont, ignoreErrors=(overlap == OverlapMode.REMOVE_AND_IGNORE_ERRORS), ) if "OS/2" in varfont: varfont["OS/2"].recalcAvgCharWidth(varfont) varLib.set_default_weight_width_slant( varfont, location=axisLimits.defaultLocation() ) if updateFontNames: # Set Regular/Italic/Bold/Bold Italic bits as appropriate, after the # name table has been updated. setRibbiBits(varfont) return varfont def setRibbiBits(font): """Set the `head.macStyle` and `OS/2.fsSelection` style bits appropriately.""" english_ribbi_style = font["name"].getName(names.NameID.SUBFAMILY_NAME, 3, 1, 0x409) if english_ribbi_style is None: return styleMapStyleName = english_ribbi_style.toStr().lower() if styleMapStyleName not in {"regular", "bold", "italic", "bold italic"}: return if styleMapStyleName == "bold": font["head"].macStyle = 0b01 elif styleMapStyleName == "bold italic": font["head"].macStyle = 0b11 elif styleMapStyleName == "italic": font["head"].macStyle = 0b10 selection = font["OS/2"].fsSelection # First clear... selection &= ~(1 << 0) selection &= ~(1 << 5) selection &= ~(1 << 6) # ...then re-set the bits. if styleMapStyleName == "regular": selection |= 1 << 6 elif styleMapStyleName == "bold": selection |= 1 << 5 elif styleMapStyleName == "italic": selection |= 1 << 0 elif styleMapStyleName == "bold italic": selection |= 1 << 0 selection |= 1 << 5 font["OS/2"].fsSelection = selection def parseLimits(limits: Iterable[str]) -> Dict[str, Optional[AxisTriple]]: result = {} for limitString in limits: match = re.match( r"^(\w{1,4})=(?:(drop)|(?:([^:]*)(?:[:]([^:]*))?(?:[:]([^:]*))?))$", limitString, ) if not match: raise ValueError("invalid location format: %r" % limitString) tag = match.group(1).ljust(4) if match.group(2): # 'drop' result[tag] = None continue triple = match.group(3, 4, 5) if triple[1] is None: # "value" syntax triple = (triple[0], triple[0], triple[0]) elif triple[2] is None: # "min:max" syntax triple = (triple[0], None, triple[1]) triple = tuple(float(v) if v else None for v in triple) result[tag] = AxisTriple(*triple) return result def parseArgs(args): """Parse argv. Returns: 3-tuple (infile, axisLimits, options) axisLimits is either a Dict[str, Optional[float]], for pinning variation axes to specific coordinates along those axes (with `None` as a placeholder for an axis' default value); or a Dict[str, Tuple(float, float)], meaning limit this axis to min/max range. Axes locations are in user-space coordinates, as defined in the "fvar" table. """ from fontTools import configLogger import argparse parser = argparse.ArgumentParser( "fonttools varLib.instancer", description="Partially instantiate a variable font", ) parser.add_argument("input", metavar="INPUT.ttf", help="Input variable TTF file.") parser.add_argument( "locargs", metavar="AXIS=LOC", nargs="*", help="List of space separated locations. A location consists of " "the tag of a variation axis, followed by '=' and the literal, " "string 'drop', or colon-separated list of one to three values, " "each of which is the empty string, or a number. " "E.g.: wdth=100 or wght=75.0:125.0 or wght=100:400:700 or wght=:500: " "or wght=drop", ) parser.add_argument( "-o", "--output", metavar="OUTPUT.ttf", default=None, help="Output instance TTF file (default: INPUT-instance.ttf).", ) parser.add_argument( "--no-optimize", dest="optimize", action="store_false", help="Don't perform IUP optimization on the remaining gvar TupleVariations", ) parser.add_argument( "--no-overlap-flag", dest="overlap", action="store_false", help="Don't set OVERLAP_SIMPLE/OVERLAP_COMPOUND glyf flags (only applicable " "when generating a full instance)", ) parser.add_argument( "--remove-overlaps", dest="remove_overlaps", action="store_true", help="Merge overlapping contours and components (only applicable " "when generating a full instance). Requires skia-pathops", ) parser.add_argument( "--ignore-overlap-errors", dest="ignore_overlap_errors", action="store_true", help="Don't crash if the remove-overlaps operation fails for some glyphs.", ) parser.add_argument( "--update-name-table", action="store_true", help="Update the instantiated font's `name` table. Input font must have " "a STAT table with Axis Value Tables", ) parser.add_argument( "--no-recalc-timestamp", dest="recalc_timestamp", action="store_false", help="Don't set the output font's timestamp to the current time.", ) parser.add_argument( "--no-recalc-bounds", dest="recalc_bounds", action="store_false", help="Don't recalculate font bounding boxes", ) loggingGroup = parser.add_mutually_exclusive_group(required=False) loggingGroup.add_argument( "-v", "--verbose", action="store_true", help="Run more verbosely." ) loggingGroup.add_argument( "-q", "--quiet", action="store_true", help="Turn verbosity off." ) options = parser.parse_args(args) if options.remove_overlaps: if options.ignore_overlap_errors: options.overlap = OverlapMode.REMOVE_AND_IGNORE_ERRORS else: options.overlap = OverlapMode.REMOVE else: options.overlap = OverlapMode(int(options.overlap)) infile = options.input if not os.path.isfile(infile): parser.error("No such file '{}'".format(infile)) configLogger( level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO") ) try: axisLimits = parseLimits(options.locargs) except ValueError as e: parser.error(str(e)) if len(axisLimits) != len(options.locargs): parser.error("Specified multiple limits for the same axis") return (infile, axisLimits, options) def main(args=None): """Partially instantiate a variable font""" infile, axisLimits, options = parseArgs(args) log.info("Restricting axes: %s", axisLimits) log.info("Loading variable font") varfont = TTFont( infile, recalcTimestamp=options.recalc_timestamp, recalcBBoxes=options.recalc_bounds, ) isFullInstance = { axisTag for axisTag, limit in axisLimits.items() if not isinstance(limit, tuple) }.issuperset(axis.axisTag for axis in varfont["fvar"].axes) instantiateVariableFont( varfont, axisLimits, inplace=True, optimize=options.optimize, overlap=options.overlap, updateFontNames=options.update_name_table, ) suffix = "-instance" if isFullInstance else "-partial" outfile = ( makeOutputFileName(infile, overWrite=True, suffix=suffix) if not options.output else options.output ) log.info( "Saving %s font %s", "instance" if isFullInstance else "partial variable", outfile, ) varfont.save(outfile) PKaZZZv؁�hh&fontTools/varLib/instancer/__main__.pyimport sys from fontTools.varLib.instancer import main if __name__ == "__main__": sys.exit(main()) PKaZZZ��k��)fontTools/varLib/instancer/featureVars.pyfrom fontTools.ttLib.tables import otTables as ot from copy import deepcopy import logging log = logging.getLogger("fontTools.varLib.instancer") def _featureVariationRecordIsUnique(rec, seen): conditionSet = [] conditionSets = ( rec.ConditionSet.ConditionTable if rec.ConditionSet is not None else [] ) for cond in conditionSets: if cond.Format != 1: # can't tell whether this is duplicate, assume is unique return True conditionSet.append( (cond.AxisIndex, cond.FilterRangeMinValue, cond.FilterRangeMaxValue) ) # besides the set of conditions, we also include the FeatureTableSubstitution # version to identify unique FeatureVariationRecords, even though only one # version is currently defined. It's theoretically possible that multiple # records with same conditions but different substitution table version be # present in the same font for backward compatibility. recordKey = frozenset([rec.FeatureTableSubstitution.Version] + conditionSet) if recordKey in seen: return False else: seen.add(recordKey) # side effect return True def _limitFeatureVariationConditionRange(condition, axisLimit): minValue = condition.FilterRangeMinValue maxValue = condition.FilterRangeMaxValue if ( minValue > maxValue or minValue > axisLimit.maximum or maxValue < axisLimit.minimum ): # condition invalid or out of range return return tuple( axisLimit.renormalizeValue(v, extrapolate=False) for v in (minValue, maxValue) ) def _instantiateFeatureVariationRecord( record, recIdx, axisLimits, fvarAxes, axisIndexMap ): applies = True shouldKeep = False newConditions = [] from fontTools.varLib.instancer import NormalizedAxisTripleAndDistances default_triple = NormalizedAxisTripleAndDistances(-1, 0, +1) if record.ConditionSet is None: record.ConditionSet = ot.ConditionSet() record.ConditionSet.ConditionTable = [] record.ConditionSet.ConditionCount = 0 for i, condition in enumerate(record.ConditionSet.ConditionTable): if condition.Format == 1: axisIdx = condition.AxisIndex axisTag = fvarAxes[axisIdx].axisTag minValue = condition.FilterRangeMinValue maxValue = condition.FilterRangeMaxValue triple = axisLimits.get(axisTag, default_triple) if not (minValue <= triple.default <= maxValue): applies = False # if condition not met, remove entire record if triple.minimum > maxValue or triple.maximum < minValue: newConditions = None break if axisTag in axisIndexMap: # remap axis index condition.AxisIndex = axisIndexMap[axisTag] # remap condition limits newRange = _limitFeatureVariationConditionRange(condition, triple) if newRange: # keep condition with updated limits minimum, maximum = newRange condition.FilterRangeMinValue = minimum condition.FilterRangeMaxValue = maximum shouldKeep = True if minimum != -1 or maximum != +1: newConditions.append(condition) else: # condition out of range, remove entire record newConditions = None break else: log.warning( "Condition table {0} of FeatureVariationRecord {1} has " "unsupported format ({2}); ignored".format(i, recIdx, condition.Format) ) applies = False newConditions.append(condition) if newConditions is not None and shouldKeep: record.ConditionSet.ConditionTable = newConditions if not newConditions: record.ConditionSet = None shouldKeep = True else: shouldKeep = False # Does this *always* apply? universal = shouldKeep and not newConditions return applies, shouldKeep, universal def _instantiateFeatureVariations(table, fvarAxes, axisLimits): pinnedAxes = set(axisLimits.pinnedLocation()) axisOrder = [axis.axisTag for axis in fvarAxes if axis.axisTag not in pinnedAxes] axisIndexMap = {axisTag: axisOrder.index(axisTag) for axisTag in axisOrder} featureVariationApplied = False uniqueRecords = set() newRecords = [] defaultsSubsts = None for i, record in enumerate(table.FeatureVariations.FeatureVariationRecord): applies, shouldKeep, universal = _instantiateFeatureVariationRecord( record, i, axisLimits, fvarAxes, axisIndexMap ) if shouldKeep and _featureVariationRecordIsUnique(record, uniqueRecords): newRecords.append(record) if applies and not featureVariationApplied: assert record.FeatureTableSubstitution.Version == 0x00010000 defaultsSubsts = deepcopy(record.FeatureTableSubstitution) for default, rec in zip( defaultsSubsts.SubstitutionRecord, record.FeatureTableSubstitution.SubstitutionRecord, ): default.Feature = deepcopy( table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature ) table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = deepcopy( rec.Feature ) # Set variations only once featureVariationApplied = True # Further records don't have a chance to apply after a universal record if universal: break # Insert a catch-all record to reinstate the old features if necessary if featureVariationApplied and newRecords and not universal: defaultRecord = ot.FeatureVariationRecord() defaultRecord.ConditionSet = ot.ConditionSet() defaultRecord.ConditionSet.ConditionTable = [] defaultRecord.ConditionSet.ConditionCount = 0 defaultRecord.FeatureTableSubstitution = defaultsSubsts newRecords.append(defaultRecord) if newRecords: table.FeatureVariations.FeatureVariationRecord = newRecords table.FeatureVariations.FeatureVariationCount = len(newRecords) else: del table.FeatureVariations # downgrade table version if there are no FeatureVariations left table.Version = 0x00010000 def instantiateFeatureVariations(varfont, axisLimits): for tableTag in ("GPOS", "GSUB"): if tableTag not in varfont or not getattr( varfont[tableTag].table, "FeatureVariations", None ): continue log.info("Instantiating FeatureVariations of %s table", tableTag) _instantiateFeatureVariations( varfont[tableTag].table, varfont["fvar"].axes, axisLimits ) # remove unreferenced lookups varfont[tableTag].prune_lookups() PKaZZZ�‹�f:f:#fontTools/varLib/instancer/names.py"""Helpers for instantiating name table records.""" from contextlib import contextmanager from copy import deepcopy from enum import IntEnum import re class NameID(IntEnum): FAMILY_NAME = 1 SUBFAMILY_NAME = 2 UNIQUE_FONT_IDENTIFIER = 3 FULL_FONT_NAME = 4 VERSION_STRING = 5 POSTSCRIPT_NAME = 6 TYPOGRAPHIC_FAMILY_NAME = 16 TYPOGRAPHIC_SUBFAMILY_NAME = 17 VARIATIONS_POSTSCRIPT_NAME_PREFIX = 25 ELIDABLE_AXIS_VALUE_NAME = 2 def getVariationNameIDs(varfont): used = [] if "fvar" in varfont: fvar = varfont["fvar"] for axis in fvar.axes: used.append(axis.axisNameID) for instance in fvar.instances: used.append(instance.subfamilyNameID) if instance.postscriptNameID != 0xFFFF: used.append(instance.postscriptNameID) if "STAT" in varfont: stat = varfont["STAT"].table for axis in stat.DesignAxisRecord.Axis if stat.DesignAxisRecord else (): used.append(axis.AxisNameID) for value in stat.AxisValueArray.AxisValue if stat.AxisValueArray else (): used.append(value.ValueNameID) elidedFallbackNameID = getattr(stat, "ElidedFallbackNameID", None) if elidedFallbackNameID is not None: used.append(elidedFallbackNameID) # nameIDs <= 255 are reserved by OT spec so we don't touch them return {nameID for nameID in used if nameID > 255} @contextmanager def pruningUnusedNames(varfont): from . import log origNameIDs = getVariationNameIDs(varfont) yield log.info("Pruning name table") exclude = origNameIDs - getVariationNameIDs(varfont) varfont["name"].names[:] = [ record for record in varfont["name"].names if record.nameID not in exclude ] if "ltag" in varfont: # Drop the whole 'ltag' table if all the language-dependent Unicode name # records that reference it have been dropped. # TODO: Only prune unused ltag tags, renumerating langIDs accordingly. # Note ltag can also be used by feat or morx tables, so check those too. if not any( record for record in varfont["name"].names if record.platformID == 0 and record.langID != 0xFFFF ): del varfont["ltag"] def updateNameTable(varfont, axisLimits): """Update instatiated variable font's name table using STAT AxisValues. Raises ValueError if the STAT table is missing or an Axis Value table is missing for requested axis locations. First, collect all STAT AxisValues that match the new default axis locations (excluding "elided" ones); concatenate the strings in design axis order, while giving priority to "synthetic" values (Format 4), to form the typographic subfamily name associated with the new default instance. Finally, update all related records in the name table, making sure that legacy family/sub-family names conform to the the R/I/B/BI (Regular, Italic, Bold, Bold Italic) naming model. Example: Updating a partial variable font: | >>> ttFont = TTFont("OpenSans[wdth,wght].ttf") | >>> updateNameTable(ttFont, {"wght": (400, 900), "wdth": 75}) The name table records will be updated in the following manner: NameID 1 familyName: "Open Sans" --> "Open Sans Condensed" NameID 2 subFamilyName: "Regular" --> "Regular" NameID 3 Unique font identifier: "3.000;GOOG;OpenSans-Regular" --> \ "3.000;GOOG;OpenSans-Condensed" NameID 4 Full font name: "Open Sans Regular" --> "Open Sans Condensed" NameID 6 PostScript name: "OpenSans-Regular" --> "OpenSans-Condensed" NameID 16 Typographic Family name: None --> "Open Sans" NameID 17 Typographic Subfamily name: None --> "Condensed" References: https://docs.microsoft.com/en-us/typography/opentype/spec/stat https://docs.microsoft.com/en-us/typography/opentype/spec/name#name-ids """ from . import AxisLimits, axisValuesFromAxisLimits if "STAT" not in varfont: raise ValueError("Cannot update name table since there is no STAT table.") stat = varfont["STAT"].table if not stat.AxisValueArray: raise ValueError("Cannot update name table since there are no STAT Axis Values") fvar = varfont["fvar"] # The updated name table will reflect the new 'zero origin' of the font. # If we're instantiating a partial font, we will populate the unpinned # axes with their default axis values from fvar. axisLimits = AxisLimits(axisLimits).limitAxesAndPopulateDefaults(varfont) partialDefaults = axisLimits.defaultLocation() fvarDefaults = {a.axisTag: a.defaultValue for a in fvar.axes} defaultAxisCoords = AxisLimits({**fvarDefaults, **partialDefaults}) assert all(v.minimum == v.maximum for v in defaultAxisCoords.values()) axisValueTables = axisValuesFromAxisLimits(stat, defaultAxisCoords) checkAxisValuesExist(stat, axisValueTables, defaultAxisCoords.pinnedLocation()) # ignore "elidable" axis values, should be omitted in application font menus. axisValueTables = [ v for v in axisValueTables if not v.Flags & ELIDABLE_AXIS_VALUE_NAME ] axisValueTables = _sortAxisValues(axisValueTables) _updateNameRecords(varfont, axisValueTables) def checkAxisValuesExist(stat, axisValues, axisCoords): seen = set() designAxes = stat.DesignAxisRecord.Axis hasValues = set() for value in stat.AxisValueArray.AxisValue: if value.Format in (1, 2, 3): hasValues.add(designAxes[value.AxisIndex].AxisTag) elif value.Format == 4: for rec in value.AxisValueRecord: hasValues.add(designAxes[rec.AxisIndex].AxisTag) for axisValueTable in axisValues: axisValueFormat = axisValueTable.Format if axisValueTable.Format in (1, 2, 3): axisTag = designAxes[axisValueTable.AxisIndex].AxisTag if axisValueFormat == 2: axisValue = axisValueTable.NominalValue else: axisValue = axisValueTable.Value if axisTag in axisCoords and axisValue == axisCoords[axisTag]: seen.add(axisTag) elif axisValueTable.Format == 4: for rec in axisValueTable.AxisValueRecord: axisTag = designAxes[rec.AxisIndex].AxisTag if axisTag in axisCoords and rec.Value == axisCoords[axisTag]: seen.add(axisTag) missingAxes = (set(axisCoords) - seen) & hasValues if missingAxes: missing = ", ".join(f"'{i}': {axisCoords[i]}" for i in missingAxes) raise ValueError(f"Cannot find Axis Values {{{missing}}}") def _sortAxisValues(axisValues): # Sort by axis index, remove duplicates and ensure that format 4 AxisValues # are dominant. # The MS Spec states: "if a format 1, format 2 or format 3 table has a # (nominal) value used in a format 4 table that also has values for # other axes, the format 4 table, being the more specific match, is used", # https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4 results = [] seenAxes = set() # Sort format 4 axes so the tables with the most AxisValueRecords are first format4 = sorted( [v for v in axisValues if v.Format == 4], key=lambda v: len(v.AxisValueRecord), reverse=True, ) for val in format4: axisIndexes = set(r.AxisIndex for r in val.AxisValueRecord) minIndex = min(axisIndexes) if not seenAxes & axisIndexes: seenAxes |= axisIndexes results.append((minIndex, val)) for val in axisValues: if val in format4: continue axisIndex = val.AxisIndex if axisIndex not in seenAxes: seenAxes.add(axisIndex) results.append((axisIndex, val)) return [axisValue for _, axisValue in sorted(results)] def _updateNameRecords(varfont, axisValues): # Update nametable based on the axisValues using the R/I/B/BI model. nametable = varfont["name"] stat = varfont["STAT"].table axisValueNameIDs = [a.ValueNameID for a in axisValues] ribbiNameIDs = [n for n in axisValueNameIDs if _isRibbi(nametable, n)] nonRibbiNameIDs = [n for n in axisValueNameIDs if n not in ribbiNameIDs] elidedNameID = stat.ElidedFallbackNameID elidedNameIsRibbi = _isRibbi(nametable, elidedNameID) getName = nametable.getName platforms = set((r.platformID, r.platEncID, r.langID) for r in nametable.names) for platform in platforms: if not all(getName(i, *platform) for i in (1, 2, elidedNameID)): # Since no family name and subfamily name records were found, # we cannot update this set of name Records. continue subFamilyName = " ".join( getName(n, *platform).toUnicode() for n in ribbiNameIDs ) if nonRibbiNameIDs: typoSubFamilyName = " ".join( getName(n, *platform).toUnicode() for n in axisValueNameIDs ) else: typoSubFamilyName = None # If neither subFamilyName and typographic SubFamilyName exist, # we will use the STAT's elidedFallbackName if not typoSubFamilyName and not subFamilyName: if elidedNameIsRibbi: subFamilyName = getName(elidedNameID, *platform).toUnicode() else: typoSubFamilyName = getName(elidedNameID, *platform).toUnicode() familyNameSuffix = " ".join( getName(n, *platform).toUnicode() for n in nonRibbiNameIDs ) _updateNameTableStyleRecords( varfont, familyNameSuffix, subFamilyName, typoSubFamilyName, *platform, ) def _isRibbi(nametable, nameID): englishRecord = nametable.getName(nameID, 3, 1, 0x409) return ( True if englishRecord is not None and englishRecord.toUnicode() in ("Regular", "Italic", "Bold", "Bold Italic") else False ) def _updateNameTableStyleRecords( varfont, familyNameSuffix, subFamilyName, typoSubFamilyName, platformID=3, platEncID=1, langID=0x409, ): # TODO (Marc F) It may be nice to make this part a standalone # font renamer in the future. nametable = varfont["name"] platform = (platformID, platEncID, langID) currentFamilyName = nametable.getName( NameID.TYPOGRAPHIC_FAMILY_NAME, *platform ) or nametable.getName(NameID.FAMILY_NAME, *platform) currentStyleName = nametable.getName( NameID.TYPOGRAPHIC_SUBFAMILY_NAME, *platform ) or nametable.getName(NameID.SUBFAMILY_NAME, *platform) if not all([currentFamilyName, currentStyleName]): raise ValueError(f"Missing required NameIDs 1 and 2 for platform {platform}") currentFamilyName = currentFamilyName.toUnicode() currentStyleName = currentStyleName.toUnicode() nameIDs = { NameID.FAMILY_NAME: currentFamilyName, NameID.SUBFAMILY_NAME: subFamilyName or "Regular", } if typoSubFamilyName: nameIDs[NameID.FAMILY_NAME] = f"{currentFamilyName} {familyNameSuffix}".strip() nameIDs[NameID.TYPOGRAPHIC_FAMILY_NAME] = currentFamilyName nameIDs[NameID.TYPOGRAPHIC_SUBFAMILY_NAME] = typoSubFamilyName else: # Remove previous Typographic Family and SubFamily names since they're # no longer required for nameID in ( NameID.TYPOGRAPHIC_FAMILY_NAME, NameID.TYPOGRAPHIC_SUBFAMILY_NAME, ): nametable.removeNames(nameID=nameID) newFamilyName = ( nameIDs.get(NameID.TYPOGRAPHIC_FAMILY_NAME) or nameIDs[NameID.FAMILY_NAME] ) newStyleName = ( nameIDs.get(NameID.TYPOGRAPHIC_SUBFAMILY_NAME) or nameIDs[NameID.SUBFAMILY_NAME] ) nameIDs[NameID.FULL_FONT_NAME] = f"{newFamilyName} {newStyleName}" nameIDs[NameID.POSTSCRIPT_NAME] = _updatePSNameRecord( varfont, newFamilyName, newStyleName, platform ) uniqueID = _updateUniqueIdNameRecord(varfont, nameIDs, platform) if uniqueID: nameIDs[NameID.UNIQUE_FONT_IDENTIFIER] = uniqueID for nameID, string in nameIDs.items(): assert string, nameID nametable.setName(string, nameID, *platform) if "fvar" not in varfont: nametable.removeNames(NameID.VARIATIONS_POSTSCRIPT_NAME_PREFIX) def _updatePSNameRecord(varfont, familyName, styleName, platform): # Implementation based on Adobe Technical Note #5902 : # https://wwwimages2.adobe.com/content/dam/acom/en/devnet/font/pdfs/5902.AdobePSNameGeneration.pdf nametable = varfont["name"] family_prefix = nametable.getName( NameID.VARIATIONS_POSTSCRIPT_NAME_PREFIX, *platform ) if family_prefix: family_prefix = family_prefix.toUnicode() else: family_prefix = familyName psName = f"{family_prefix}-{styleName}" # Remove any characters other than uppercase Latin letters, lowercase # Latin letters, digits and hyphens. psName = re.sub(r"[^A-Za-z0-9-]", r"", psName) if len(psName) > 127: # Abbreviating the stylename so it fits within 127 characters whilst # conforming to every vendor's specification is too complex. Instead # we simply truncate the psname and add the required "..." return f"{psName[:124]}..." return psName def _updateUniqueIdNameRecord(varfont, nameIDs, platform): nametable = varfont["name"] currentRecord = nametable.getName(NameID.UNIQUE_FONT_IDENTIFIER, *platform) if not currentRecord: return None # Check if full name and postscript name are a substring of currentRecord for nameID in (NameID.FULL_FONT_NAME, NameID.POSTSCRIPT_NAME): nameRecord = nametable.getName(nameID, *platform) if not nameRecord: continue if nameRecord.toUnicode() in currentRecord.toUnicode(): return currentRecord.toUnicode().replace( nameRecord.toUnicode(), nameIDs[nameRecord.nameID] ) # Create a new string since we couldn't find any substrings. fontVersion = _fontVersion(varfont, platform) achVendID = varfont["OS/2"].achVendID # Remove non-ASCII characers and trailing spaces vendor = re.sub(r"[^\x00-\x7F]", "", achVendID).strip() psName = nameIDs[NameID.POSTSCRIPT_NAME] return f"{fontVersion};{vendor};{psName}" def _fontVersion(font, platform=(3, 1, 0x409)): nameRecord = font["name"].getName(NameID.VERSION_STRING, *platform) if nameRecord is None: return f'{font["head"].fontRevision:.3f}' # "Version 1.101; ttfautohint (v1.8.1.43-b0c9)" --> "1.101" # Also works fine with inputs "Version 1.101" or "1.101" etc versionNumber = nameRecord.toUnicode().split(";")[0] return versionNumber.lstrip("Version ").strip() PKaZZZŁ=��*�*$fontTools/varLib/instancer/solver.pyfrom fontTools.varLib.models import supportScalar from fontTools.misc.fixedTools import MAX_F2DOT14 from functools import lru_cache __all__ = ["rebaseTent"] EPSILON = 1 / (1 << 14) def _reverse_negate(v): return (-v[2], -v[1], -v[0]) def _solve(tent, axisLimit, negative=False): axisMin, axisDef, axisMax, _distanceNegative, _distancePositive = axisLimit lower, peak, upper = tent # Mirror the problem such that axisDef <= peak if axisDef > peak: return [ (scalar, _reverse_negate(t) if t is not None else None) for scalar, t in _solve( _reverse_negate(tent), axisLimit.reverse_negate(), not negative, ) ] # axisDef <= peak # case 1: The whole deltaset falls outside the new limit; we can drop it # # peak # 1.........................................o.......... # / \ # / \ # / \ # / \ # 0---|-----------|----------|-------- o o----1 # axisMin axisDef axisMax lower upper # if axisMax <= lower and axisMax < peak: return [] # No overlap # case 2: Only the peak and outermost bound fall outside the new limit; # we keep the deltaset, update peak and outermost bound and and scale deltas # by the scalar value for the restricted axis at the new limit, and solve # recursively. # # |peak # 1...............................|.o.......... # |/ \ # / \ # /| \ # / | \ # 0--------------------------- o | o----1 # lower | upper # | # axisMax # # Convert to: # # 1............................................ # | # o peak # /| # /x| # 0--------------------------- o o upper ----1 # lower | # | # axisMax if axisMax < peak: mult = supportScalar({"tag": axisMax}, {"tag": tent}) tent = (lower, axisMax, axisMax) return [(scalar * mult, t) for scalar, t in _solve(tent, axisLimit)] # lower <= axisDef <= peak <= axisMax gain = supportScalar({"tag": axisDef}, {"tag": tent}) out = [(gain, None)] # First, the positive side # outGain is the scalar of axisMax at the tent. outGain = supportScalar({"tag": axisMax}, {"tag": tent}) # Case 3a: Gain is more than outGain. The tent down-slope crosses # the axis into negative. We have to split it into multiples. # # | peak | # 1...................|.o.....|.............. # |/x\_ | # gain................+....+_.|.............. # /| |y\| # ................../.|....|..+_......outGain # / | | | \ # 0---|-----------o | | | o----------1 # axisMin lower | | | upper # | | | # axisDef | axisMax # | # crossing if gain >= outGain: # Note that this is the branch taken if both gain and outGain are 0. # Crossing point on the axis. crossing = peak + (1 - gain) * (upper - peak) loc = (max(lower, axisDef), peak, crossing) scalar = 1 # The part before the crossing point. out.append((scalar - gain, loc)) # The part after the crossing point may use one or two tents, # depending on whether upper is before axisMax or not, in one # case we need to keep it down to eternity. # Case 3a1, similar to case 1neg; just one tent needed, as in # the drawing above. if upper >= axisMax: loc = (crossing, axisMax, axisMax) scalar = outGain out.append((scalar - gain, loc)) # Case 3a2: Similar to case 2neg; two tents needed, to keep # down to eternity. # # | peak | # 1...................|.o................|... # |/ \_ | # gain................+....+_............|... # /| | \xxxxxxxxxxy| # / | | \_xxxxxyyyy| # / | | \xxyyyyyy| # 0---|-----------o | | o-------|--1 # axisMin lower | | upper | # | | | # axisDef | axisMax # | # crossing else: # A tent's peak cannot fall on axis default. Nudge it. if upper == axisDef: upper += EPSILON # Downslope. loc1 = (crossing, upper, axisMax) scalar1 = 0 # Eternity justify. loc2 = (upper, axisMax, axisMax) scalar2 = 0 out.append((scalar1 - gain, loc1)) out.append((scalar2 - gain, loc2)) else: # Special-case if peak is at axisMax. if axisMax == peak: upper = peak # Case 3: # We keep delta as is and only scale the axis upper to achieve # the desired new tent if feasible. # # peak # 1.....................o.................... # / \_| # ..................../....+_.........outGain # / | \ # gain..............+......|..+_............. # /| | | \ # 0---|-----------o | | | o----------1 # axisMin lower| | | upper # | | newUpper # axisDef axisMax # newUpper = peak + (1 - gain) * (upper - peak) assert axisMax <= newUpper # Because outGain > gain # Disabled because ots doesn't like us: # https://github.com/fonttools/fonttools/issues/3350 if False and newUpper <= axisDef + (axisMax - axisDef) * 2: upper = newUpper if not negative and axisDef + (axisMax - axisDef) * MAX_F2DOT14 < upper: # we clamp +2.0 to the max F2Dot14 (~1.99994) for convenience upper = axisDef + (axisMax - axisDef) * MAX_F2DOT14 assert peak < upper loc = (max(axisDef, lower), peak, upper) scalar = 1 out.append((scalar - gain, loc)) # Case 4: New limit doesn't fit; we need to chop into two tents, # because the shape of a triangle with part of one side cut off # cannot be represented as a triangle itself. # # | peak | # 1.........|......o.|.................... # ..........|...../x\|.............outGain # | |xxy|\_ # | /xxxy| \_ # | |xxxxy| \_ # | /xxxxy| \_ # 0---|-----|-oxxxxxx| o----------1 # axisMin | lower | upper # | | # axisDef axisMax # else: loc1 = (max(axisDef, lower), peak, axisMax) scalar1 = 1 loc2 = (peak, axisMax, axisMax) scalar2 = outGain out.append((scalar1 - gain, loc1)) # Don't add a dirac delta! if peak < axisMax: out.append((scalar2 - gain, loc2)) # Now, the negative side # Case 1neg: Lower extends beyond axisMin: we chop. Simple. # # | |peak # 1..................|...|.o................. # | |/ \ # gain...............|...+...\............... # |x_/| \ # |/ | \ # _/| | \ # 0---------------o | | o----------1 # lower | | upper # | | # axisMin axisDef # if lower <= axisMin: loc = (axisMin, axisMin, axisDef) scalar = supportScalar({"tag": axisMin}, {"tag": tent}) out.append((scalar - gain, loc)) # Case 2neg: Lower is betwen axisMin and axisDef: we add two # tents to keep it down all the way to eternity. # # | |peak # 1...|...............|.o................. # | |/ \ # gain|...............+...\............... # |yxxxxxxxxxxxxx/| \ # |yyyyyyxxxxxxx/ | \ # |yyyyyyyyyyyx/ | \ # 0---|-----------o | o----------1 # axisMin lower | upper # | # axisDef # else: # A tent's peak cannot fall on axis default. Nudge it. if lower == axisDef: lower -= EPSILON # Downslope. loc1 = (axisMin, lower, axisDef) scalar1 = 0 # Eternity justify. loc2 = (axisMin, axisMin, lower) scalar2 = 0 out.append((scalar1 - gain, loc1)) out.append((scalar2 - gain, loc2)) return out @lru_cache(128) def rebaseTent(tent, axisLimit): """Given a tuple (lower,peak,upper) "tent" and new axis limits (axisMin,axisDefault,axisMax), solves how to represent the tent under the new axis configuration. All values are in normalized -1,0,+1 coordinate system. Tent values can be outside this range. Return value is a list of tuples. Each tuple is of the form (scalar,tent), where scalar is a multipler to multiply any delta-sets by, and tent is a new tent for that output delta-set. If tent value is None, that is a special deltaset that should be always-enabled (called "gain").""" axisMin, axisDef, axisMax, _distanceNegative, _distancePositive = axisLimit assert -1 <= axisMin <= axisDef <= axisMax <= +1 lower, peak, upper = tent assert -2 <= lower <= peak <= upper <= +2 assert peak != 0 sols = _solve(tent, axisLimit) n = lambda v: axisLimit.renormalizeValue(v) sols = [ (scalar, (n(v[0]), n(v[1]), n(v[2])) if v is not None else None) for scalar, v in sols if scalar ] return sols PKaZZZ�e���fontTools/voltLib/__init__.py"""fontTools.voltLib -- a package for dealing with Visual OpenType Layout Tool (VOLT) files.""" # See # http://www.microsoft.com/typography/VOLT.mspx PKaZZZ�ޮ.�3�3fontTools/voltLib/ast.pyfrom fontTools.voltLib.error import VoltLibError from typing import NamedTuple class Pos(NamedTuple): adv: int dx: int dy: int adv_adjust_by: dict dx_adjust_by: dict dy_adjust_by: dict def __str__(self): res = " POS" for attr in ("adv", "dx", "dy"): value = getattr(self, attr) if value is not None: res += f" {attr.upper()} {value}" adjust_by = getattr(self, f"{attr}_adjust_by", {}) for size, adjustment in adjust_by.items(): res += f" ADJUST_BY {adjustment} AT {size}" res += " END_POS" return res class Element(object): def __init__(self, location=None): self.location = location def build(self, builder): pass def __str__(self): raise NotImplementedError class Statement(Element): pass class Expression(Element): pass class VoltFile(Statement): def __init__(self): Statement.__init__(self, location=None) self.statements = [] def build(self, builder): for s in self.statements: s.build(builder) def __str__(self): return "\n" + "\n".join(str(s) for s in self.statements) + " END\n" class GlyphDefinition(Statement): def __init__(self, name, gid, gunicode, gtype, components, location=None): Statement.__init__(self, location) self.name = name self.id = gid self.unicode = gunicode self.type = gtype self.components = components def __str__(self): res = f'DEF_GLYPH "{self.name}" ID {self.id}' if self.unicode is not None: if len(self.unicode) > 1: unicodes = ",".join(f"U+{u:04X}" for u in self.unicode) res += f' UNICODEVALUES "{unicodes}"' else: res += f" UNICODE {self.unicode[0]}" if self.type is not None: res += f" TYPE {self.type}" if self.components is not None: res += f" COMPONENTS {self.components}" res += " END_GLYPH" return res class GroupDefinition(Statement): def __init__(self, name, enum, location=None): Statement.__init__(self, location) self.name = name self.enum = enum self.glyphs_ = None def glyphSet(self, groups=None): if groups is not None and self.name in groups: raise VoltLibError( 'Group "%s" contains itself.' % (self.name), self.location ) if self.glyphs_ is None: if groups is None: groups = set({self.name}) else: groups.add(self.name) self.glyphs_ = self.enum.glyphSet(groups) return self.glyphs_ def __str__(self): enum = self.enum and str(self.enum) or "" return f'DEF_GROUP "{self.name}"\n{enum}\nEND_GROUP' class GlyphName(Expression): """A single glyph name, such as cedilla.""" def __init__(self, glyph, location=None): Expression.__init__(self, location) self.glyph = glyph def glyphSet(self): return (self.glyph,) def __str__(self): return f' GLYPH "{self.glyph}"' class Enum(Expression): """An enum""" def __init__(self, enum, location=None): Expression.__init__(self, location) self.enum = enum def __iter__(self): for e in self.glyphSet(): yield e def glyphSet(self, groups=None): glyphs = [] for element in self.enum: if isinstance(element, (GroupName, Enum)): glyphs.extend(element.glyphSet(groups)) else: glyphs.extend(element.glyphSet()) return tuple(glyphs) def __str__(self): enum = "".join(str(e) for e in self.enum) return f" ENUM{enum} END_ENUM" class GroupName(Expression): """A glyph group""" def __init__(self, group, parser, location=None): Expression.__init__(self, location) self.group = group self.parser_ = parser def glyphSet(self, groups=None): group = self.parser_.resolve_group(self.group) if group is not None: self.glyphs_ = group.glyphSet(groups) return self.glyphs_ else: raise VoltLibError( 'Group "%s" is used but undefined.' % (self.group), self.location ) def __str__(self): return f' GROUP "{self.group}"' class Range(Expression): """A glyph range""" def __init__(self, start, end, parser, location=None): Expression.__init__(self, location) self.start = start self.end = end self.parser = parser def glyphSet(self): return tuple(self.parser.glyph_range(self.start, self.end)) def __str__(self): return f' RANGE "{self.start}" TO "{self.end}"' class ScriptDefinition(Statement): def __init__(self, name, tag, langs, location=None): Statement.__init__(self, location) self.name = name self.tag = tag self.langs = langs def __str__(self): res = "DEF_SCRIPT" if self.name is not None: res += f' NAME "{self.name}"' res += f' TAG "{self.tag}"\n\n' for lang in self.langs: res += f"{lang}" res += "END_SCRIPT" return res class LangSysDefinition(Statement): def __init__(self, name, tag, features, location=None): Statement.__init__(self, location) self.name = name self.tag = tag self.features = features def __str__(self): res = "DEF_LANGSYS" if self.name is not None: res += f' NAME "{self.name}"' res += f' TAG "{self.tag}"\n\n' for feature in self.features: res += f"{feature}" res += "END_LANGSYS\n" return res class FeatureDefinition(Statement): def __init__(self, name, tag, lookups, location=None): Statement.__init__(self, location) self.name = name self.tag = tag self.lookups = lookups def __str__(self): res = f'DEF_FEATURE NAME "{self.name}" TAG "{self.tag}"\n' res += " " + " ".join(f'LOOKUP "{l}"' for l in self.lookups) + "\n" res += "END_FEATURE\n" return res class LookupDefinition(Statement): def __init__( self, name, process_base, process_marks, mark_glyph_set, direction, reversal, comments, context, sub, pos, location=None, ): Statement.__init__(self, location) self.name = name self.process_base = process_base self.process_marks = process_marks self.mark_glyph_set = mark_glyph_set self.direction = direction self.reversal = reversal self.comments = comments self.context = context self.sub = sub self.pos = pos def __str__(self): res = f'DEF_LOOKUP "{self.name}"' res += f' {self.process_base and "PROCESS_BASE" or "SKIP_BASE"}' if self.process_marks: res += " PROCESS_MARKS " if self.mark_glyph_set: res += f'MARK_GLYPH_SET "{self.mark_glyph_set}"' elif isinstance(self.process_marks, str): res += f'"{self.process_marks}"' else: res += "ALL" else: res += " SKIP_MARKS" if self.direction is not None: res += f" DIRECTION {self.direction}" if self.reversal: res += " REVERSAL" if self.comments is not None: comments = self.comments.replace("\n", r"\n") res += f'\nCOMMENTS "{comments}"' if self.context: res += "\n" + "\n".join(str(c) for c in self.context) else: res += "\nIN_CONTEXT\nEND_CONTEXT" if self.sub: res += f"\n{self.sub}" if self.pos: res += f"\n{self.pos}" return res class SubstitutionDefinition(Statement): def __init__(self, mapping, location=None): Statement.__init__(self, location) self.mapping = mapping def __str__(self): res = "AS_SUBSTITUTION\n" for src, dst in self.mapping.items(): src = "".join(str(s) for s in src) dst = "".join(str(d) for d in dst) res += f"SUB{src}\nWITH{dst}\nEND_SUB\n" res += "END_SUBSTITUTION" return res class SubstitutionSingleDefinition(SubstitutionDefinition): pass class SubstitutionMultipleDefinition(SubstitutionDefinition): pass class SubstitutionLigatureDefinition(SubstitutionDefinition): pass class SubstitutionReverseChainingSingleDefinition(SubstitutionDefinition): pass class PositionAttachDefinition(Statement): def __init__(self, coverage, coverage_to, location=None): Statement.__init__(self, location) self.coverage = coverage self.coverage_to = coverage_to def __str__(self): coverage = "".join(str(c) for c in self.coverage) res = f"AS_POSITION\nATTACH{coverage}\nTO" for coverage, anchor in self.coverage_to: coverage = "".join(str(c) for c in coverage) res += f'{coverage} AT ANCHOR "{anchor}"' res += "\nEND_ATTACH\nEND_POSITION" return res class PositionAttachCursiveDefinition(Statement): def __init__(self, coverages_exit, coverages_enter, location=None): Statement.__init__(self, location) self.coverages_exit = coverages_exit self.coverages_enter = coverages_enter def __str__(self): res = "AS_POSITION\nATTACH_CURSIVE" for coverage in self.coverages_exit: coverage = "".join(str(c) for c in coverage) res += f"\nEXIT {coverage}" for coverage in self.coverages_enter: coverage = "".join(str(c) for c in coverage) res += f"\nENTER {coverage}" res += "\nEND_ATTACH\nEND_POSITION" return res class PositionAdjustPairDefinition(Statement): def __init__(self, coverages_1, coverages_2, adjust_pair, location=None): Statement.__init__(self, location) self.coverages_1 = coverages_1 self.coverages_2 = coverages_2 self.adjust_pair = adjust_pair def __str__(self): res = "AS_POSITION\nADJUST_PAIR\n" for coverage in self.coverages_1: coverage = " ".join(str(c) for c in coverage) res += f" FIRST {coverage}" res += "\n" for coverage in self.coverages_2: coverage = " ".join(str(c) for c in coverage) res += f" SECOND {coverage}" res += "\n" for (id_1, id_2), (pos_1, pos_2) in self.adjust_pair.items(): res += f" {id_1} {id_2} BY{pos_1}{pos_2}\n" res += "\nEND_ADJUST\nEND_POSITION" return res class PositionAdjustSingleDefinition(Statement): def __init__(self, adjust_single, location=None): Statement.__init__(self, location) self.adjust_single = adjust_single def __str__(self): res = "AS_POSITION\nADJUST_SINGLE" for coverage, pos in self.adjust_single: coverage = "".join(str(c) for c in coverage) res += f"{coverage} BY{pos}" res += "\nEND_ADJUST\nEND_POSITION" return res class ContextDefinition(Statement): def __init__(self, ex_or_in, left=None, right=None, location=None): Statement.__init__(self, location) self.ex_or_in = ex_or_in self.left = left if left is not None else [] self.right = right if right is not None else [] def __str__(self): res = self.ex_or_in + "\n" for coverage in self.left: coverage = "".join(str(c) for c in coverage) res += f" LEFT{coverage}\n" for coverage in self.right: coverage = "".join(str(c) for c in coverage) res += f" RIGHT{coverage}\n" res += "END_CONTEXT" return res class AnchorDefinition(Statement): def __init__(self, name, gid, glyph_name, component, locked, pos, location=None): Statement.__init__(self, location) self.name = name self.gid = gid self.glyph_name = glyph_name self.component = component self.locked = locked self.pos = pos def __str__(self): locked = self.locked and " LOCKED" or "" return ( f'DEF_ANCHOR "{self.name}"' f" ON {self.gid}" f" GLYPH {self.glyph_name}" f" COMPONENT {self.component}" f"{locked}" f" AT {self.pos} END_ANCHOR" ) class SettingDefinition(Statement): def __init__(self, name, value, location=None): Statement.__init__(self, location) self.name = name self.value = value def __str__(self): if self.value is True: return f"{self.name}" if isinstance(self.value, (tuple, list)): value = " ".join(str(v) for v in self.value) return f"{self.name} {value}" return f"{self.name} {self.value}" PKaZZZ��U{��fontTools/voltLib/error.pyclass VoltLibError(Exception): def __init__(self, message, location): Exception.__init__(self, message) self.location = location def __str__(self): message = Exception.__str__(self) if self.location: path, line, column = self.location return "%s:%d:%d: %s" % (path, line, column, message) else: return message PKaZZZolai( ( fontTools/voltLib/lexer.pyfrom fontTools.voltLib.error import VoltLibError class Lexer(object): NUMBER = "NUMBER" STRING = "STRING" NAME = "NAME" NEWLINE = "NEWLINE" CHAR_WHITESPACE_ = " \t" CHAR_NEWLINE_ = "\r\n" CHAR_DIGIT_ = "0123456789" CHAR_UC_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" CHAR_LC_LETTER_ = "abcdefghijklmnopqrstuvwxyz" CHAR_UNDERSCORE_ = "_" CHAR_PERIOD_ = "." CHAR_NAME_START_ = ( CHAR_UC_LETTER_ + CHAR_LC_LETTER_ + CHAR_PERIOD_ + CHAR_UNDERSCORE_ ) CHAR_NAME_CONTINUATION_ = CHAR_NAME_START_ + CHAR_DIGIT_ def __init__(self, text, filename): self.filename_ = filename self.line_ = 1 self.pos_ = 0 self.line_start_ = 0 self.text_ = text self.text_length_ = len(text) def __iter__(self): return self def next(self): # Python 2 return self.__next__() def __next__(self): # Python 3 while True: token_type, token, location = self.next_() if token_type not in {Lexer.NEWLINE}: return (token_type, token, location) def location_(self): column = self.pos_ - self.line_start_ + 1 return (self.filename_ or "<volt>", self.line_, column) def next_(self): self.scan_over_(Lexer.CHAR_WHITESPACE_) location = self.location_() start = self.pos_ text = self.text_ limit = len(text) if start >= limit: raise StopIteration() cur_char = text[start] next_char = text[start + 1] if start + 1 < limit else None if cur_char == "\n": self.pos_ += 1 self.line_ += 1 self.line_start_ = self.pos_ return (Lexer.NEWLINE, None, location) if cur_char == "\r": self.pos_ += 2 if next_char == "\n" else 1 self.line_ += 1 self.line_start_ = self.pos_ return (Lexer.NEWLINE, None, location) if cur_char == '"': self.pos_ += 1 self.scan_until_('"\r\n') if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': self.pos_ += 1 return (Lexer.STRING, text[start + 1 : self.pos_ - 1], location) else: raise VoltLibError("Expected '\"' to terminate string", location) if cur_char in Lexer.CHAR_NAME_START_: self.pos_ += 1 self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) token = text[start : self.pos_] return (Lexer.NAME, token, location) if cur_char in Lexer.CHAR_DIGIT_: self.scan_over_(Lexer.CHAR_DIGIT_) return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: self.pos_ += 1 self.scan_over_(Lexer.CHAR_DIGIT_) return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) raise VoltLibError("Unexpected character: '%s'" % cur_char, location) def scan_over_(self, valid): p = self.pos_ while p < self.text_length_ and self.text_[p] in valid: p += 1 self.pos_ = p def scan_until_(self, stop_at): p = self.pos_ while p < self.text_length_ and self.text_[p] not in stop_at: p += 1 self.pos_ = p PKaZZZ��;�TaTafontTools/voltLib/parser.pyimport fontTools.voltLib.ast as ast from fontTools.voltLib.lexer import Lexer from fontTools.voltLib.error import VoltLibError from io import open PARSE_FUNCS = { "DEF_GLYPH": "parse_def_glyph_", "DEF_GROUP": "parse_def_group_", "DEF_SCRIPT": "parse_def_script_", "DEF_LOOKUP": "parse_def_lookup_", "DEF_ANCHOR": "parse_def_anchor_", "GRID_PPEM": "parse_ppem_", "PRESENTATION_PPEM": "parse_ppem_", "PPOSITIONING_PPEM": "parse_ppem_", "COMPILER_USEEXTENSIONLOOKUPS": "parse_noarg_option_", "COMPILER_USEPAIRPOSFORMAT2": "parse_noarg_option_", "CMAP_FORMAT": "parse_cmap_format", "DO_NOT_TOUCH_CMAP": "parse_noarg_option_", } class Parser(object): def __init__(self, path): self.doc_ = ast.VoltFile() self.glyphs_ = OrderedSymbolTable() self.groups_ = SymbolTable() self.anchors_ = {} # dictionary of SymbolTable() keyed by glyph self.scripts_ = SymbolTable() self.langs_ = SymbolTable() self.lookups_ = SymbolTable() self.next_token_type_, self.next_token_ = (None, None) self.next_token_location_ = None self.make_lexer_(path) self.advance_lexer_() def make_lexer_(self, file_or_path): if hasattr(file_or_path, "read"): filename = getattr(file_or_path, "name", None) data = file_or_path.read() else: filename = file_or_path with open(file_or_path, "r") as f: data = f.read() self.lexer_ = Lexer(data, filename) def parse(self): statements = self.doc_.statements while self.next_token_type_ is not None: self.advance_lexer_() if self.cur_token_ in PARSE_FUNCS.keys(): func = getattr(self, PARSE_FUNCS[self.cur_token_]) statements.append(func()) elif self.is_cur_keyword_("END"): break else: raise VoltLibError( "Expected " + ", ".join(sorted(PARSE_FUNCS.keys())), self.cur_token_location_, ) return self.doc_ def parse_def_glyph_(self): assert self.is_cur_keyword_("DEF_GLYPH") location = self.cur_token_location_ name = self.expect_string_() self.expect_keyword_("ID") gid = self.expect_number_() if gid < 0: raise VoltLibError("Invalid glyph ID", self.cur_token_location_) gunicode = None if self.next_token_ == "UNICODE": self.expect_keyword_("UNICODE") gunicode = [self.expect_number_()] if gunicode[0] < 0: raise VoltLibError("Invalid glyph UNICODE", self.cur_token_location_) elif self.next_token_ == "UNICODEVALUES": self.expect_keyword_("UNICODEVALUES") gunicode = self.parse_unicode_values_() gtype = None if self.next_token_ == "TYPE": self.expect_keyword_("TYPE") gtype = self.expect_name_() assert gtype in ("BASE", "LIGATURE", "MARK", "COMPONENT") components = None if self.next_token_ == "COMPONENTS": self.expect_keyword_("COMPONENTS") components = self.expect_number_() self.expect_keyword_("END_GLYPH") if self.glyphs_.resolve(name) is not None: raise VoltLibError( 'Glyph "%s" (gid %i) already defined' % (name, gid), location ) def_glyph = ast.GlyphDefinition( name, gid, gunicode, gtype, components, location=location ) self.glyphs_.define(name, def_glyph) return def_glyph def parse_def_group_(self): assert self.is_cur_keyword_("DEF_GROUP") location = self.cur_token_location_ name = self.expect_string_() enum = None if self.next_token_ == "ENUM": enum = self.parse_enum_() self.expect_keyword_("END_GROUP") if self.groups_.resolve(name) is not None: raise VoltLibError( 'Glyph group "%s" already defined, ' "group names are case insensitive" % name, location, ) def_group = ast.GroupDefinition(name, enum, location=location) self.groups_.define(name, def_group) return def_group def parse_def_script_(self): assert self.is_cur_keyword_("DEF_SCRIPT") location = self.cur_token_location_ name = None if self.next_token_ == "NAME": self.expect_keyword_("NAME") name = self.expect_string_() self.expect_keyword_("TAG") tag = self.expect_string_() if self.scripts_.resolve(tag) is not None: raise VoltLibError( 'Script "%s" already defined, ' "script tags are case insensitive" % tag, location, ) self.langs_.enter_scope() langs = [] while self.next_token_ != "END_SCRIPT": self.advance_lexer_() lang = self.parse_langsys_() self.expect_keyword_("END_LANGSYS") if self.langs_.resolve(lang.tag) is not None: raise VoltLibError( 'Language "%s" already defined in script "%s", ' "language tags are case insensitive" % (lang.tag, tag), location, ) self.langs_.define(lang.tag, lang) langs.append(lang) self.expect_keyword_("END_SCRIPT") self.langs_.exit_scope() def_script = ast.ScriptDefinition(name, tag, langs, location=location) self.scripts_.define(tag, def_script) return def_script def parse_langsys_(self): assert self.is_cur_keyword_("DEF_LANGSYS") location = self.cur_token_location_ name = None if self.next_token_ == "NAME": self.expect_keyword_("NAME") name = self.expect_string_() self.expect_keyword_("TAG") tag = self.expect_string_() features = [] while self.next_token_ != "END_LANGSYS": self.advance_lexer_() feature = self.parse_feature_() self.expect_keyword_("END_FEATURE") features.append(feature) def_langsys = ast.LangSysDefinition(name, tag, features, location=location) return def_langsys def parse_feature_(self): assert self.is_cur_keyword_("DEF_FEATURE") location = self.cur_token_location_ self.expect_keyword_("NAME") name = self.expect_string_() self.expect_keyword_("TAG") tag = self.expect_string_() lookups = [] while self.next_token_ != "END_FEATURE": # self.advance_lexer_() self.expect_keyword_("LOOKUP") lookup = self.expect_string_() lookups.append(lookup) feature = ast.FeatureDefinition(name, tag, lookups, location=location) return feature def parse_def_lookup_(self): assert self.is_cur_keyword_("DEF_LOOKUP") location = self.cur_token_location_ name = self.expect_string_() if not name[0].isalpha(): raise VoltLibError( 'Lookup name "%s" must start with a letter' % name, location ) if self.lookups_.resolve(name) is not None: raise VoltLibError( 'Lookup "%s" already defined, ' "lookup names are case insensitive" % name, location, ) process_base = True if self.next_token_ == "PROCESS_BASE": self.advance_lexer_() elif self.next_token_ == "SKIP_BASE": self.advance_lexer_() process_base = False process_marks = True mark_glyph_set = None if self.next_token_ == "PROCESS_MARKS": self.advance_lexer_() if self.next_token_ == "MARK_GLYPH_SET": self.advance_lexer_() mark_glyph_set = self.expect_string_() elif self.next_token_ == "ALL": self.advance_lexer_() elif self.next_token_ == "NONE": self.advance_lexer_() process_marks = False elif self.next_token_type_ == Lexer.STRING: process_marks = self.expect_string_() else: raise VoltLibError( "Expected ALL, NONE, MARK_GLYPH_SET or an ID. " "Got %s" % (self.next_token_type_), location, ) elif self.next_token_ == "SKIP_MARKS": self.advance_lexer_() process_marks = False direction = None if self.next_token_ == "DIRECTION": self.expect_keyword_("DIRECTION") direction = self.expect_name_() assert direction in ("LTR", "RTL") reversal = None if self.next_token_ == "REVERSAL": self.expect_keyword_("REVERSAL") reversal = True comments = None if self.next_token_ == "COMMENTS": self.expect_keyword_("COMMENTS") comments = self.expect_string_().replace(r"\n", "\n") context = [] while self.next_token_ in ("EXCEPT_CONTEXT", "IN_CONTEXT"): context = self.parse_context_() as_pos_or_sub = self.expect_name_() sub = None pos = None if as_pos_or_sub == "AS_SUBSTITUTION": sub = self.parse_substitution_(reversal) elif as_pos_or_sub == "AS_POSITION": pos = self.parse_position_() else: raise VoltLibError( "Expected AS_SUBSTITUTION or AS_POSITION. " "Got %s" % (as_pos_or_sub), location, ) def_lookup = ast.LookupDefinition( name, process_base, process_marks, mark_glyph_set, direction, reversal, comments, context, sub, pos, location=location, ) self.lookups_.define(name, def_lookup) return def_lookup def parse_context_(self): location = self.cur_token_location_ contexts = [] while self.next_token_ in ("EXCEPT_CONTEXT", "IN_CONTEXT"): side = None coverage = None ex_or_in = self.expect_name_() # side_contexts = [] # XXX if self.next_token_ != "END_CONTEXT": left = [] right = [] while self.next_token_ in ("LEFT", "RIGHT"): side = self.expect_name_() coverage = self.parse_coverage_() if side == "LEFT": left.append(coverage) else: right.append(coverage) self.expect_keyword_("END_CONTEXT") context = ast.ContextDefinition( ex_or_in, left, right, location=location ) contexts.append(context) else: self.expect_keyword_("END_CONTEXT") return contexts def parse_substitution_(self, reversal): assert self.is_cur_keyword_("AS_SUBSTITUTION") location = self.cur_token_location_ src = [] dest = [] if self.next_token_ != "SUB": raise VoltLibError("Expected SUB", location) while self.next_token_ == "SUB": self.expect_keyword_("SUB") src.append(self.parse_coverage_()) self.expect_keyword_("WITH") dest.append(self.parse_coverage_()) self.expect_keyword_("END_SUB") self.expect_keyword_("END_SUBSTITUTION") max_src = max([len(cov) for cov in src]) max_dest = max([len(cov) for cov in dest]) # many to many or mixed is invalid if (max_src > 1 and max_dest > 1) or ( reversal and (max_src > 1 or max_dest > 1) ): raise VoltLibError("Invalid substitution type", location) mapping = dict(zip(tuple(src), tuple(dest))) if max_src == 1 and max_dest == 1: if reversal: sub = ast.SubstitutionReverseChainingSingleDefinition( mapping, location=location ) else: sub = ast.SubstitutionSingleDefinition(mapping, location=location) elif max_src == 1 and max_dest > 1: sub = ast.SubstitutionMultipleDefinition(mapping, location=location) elif max_src > 1 and max_dest == 1: sub = ast.SubstitutionLigatureDefinition(mapping, location=location) return sub def parse_position_(self): assert self.is_cur_keyword_("AS_POSITION") location = self.cur_token_location_ pos_type = self.expect_name_() if pos_type not in ("ATTACH", "ATTACH_CURSIVE", "ADJUST_PAIR", "ADJUST_SINGLE"): raise VoltLibError( "Expected ATTACH, ATTACH_CURSIVE, ADJUST_PAIR, ADJUST_SINGLE", location ) if pos_type == "ATTACH": position = self.parse_attach_() elif pos_type == "ATTACH_CURSIVE": position = self.parse_attach_cursive_() elif pos_type == "ADJUST_PAIR": position = self.parse_adjust_pair_() elif pos_type == "ADJUST_SINGLE": position = self.parse_adjust_single_() self.expect_keyword_("END_POSITION") return position def parse_attach_(self): assert self.is_cur_keyword_("ATTACH") location = self.cur_token_location_ coverage = self.parse_coverage_() coverage_to = [] self.expect_keyword_("TO") while self.next_token_ != "END_ATTACH": cov = self.parse_coverage_() self.expect_keyword_("AT") self.expect_keyword_("ANCHOR") anchor_name = self.expect_string_() coverage_to.append((cov, anchor_name)) self.expect_keyword_("END_ATTACH") position = ast.PositionAttachDefinition( coverage, coverage_to, location=location ) return position def parse_attach_cursive_(self): assert self.is_cur_keyword_("ATTACH_CURSIVE") location = self.cur_token_location_ coverages_exit = [] coverages_enter = [] while self.next_token_ != "ENTER": self.expect_keyword_("EXIT") coverages_exit.append(self.parse_coverage_()) while self.next_token_ != "END_ATTACH": self.expect_keyword_("ENTER") coverages_enter.append(self.parse_coverage_()) self.expect_keyword_("END_ATTACH") position = ast.PositionAttachCursiveDefinition( coverages_exit, coverages_enter, location=location ) return position def parse_adjust_pair_(self): assert self.is_cur_keyword_("ADJUST_PAIR") location = self.cur_token_location_ coverages_1 = [] coverages_2 = [] adjust_pair = {} while self.next_token_ == "FIRST": self.advance_lexer_() coverage_1 = self.parse_coverage_() coverages_1.append(coverage_1) while self.next_token_ == "SECOND": self.advance_lexer_() coverage_2 = self.parse_coverage_() coverages_2.append(coverage_2) while self.next_token_ != "END_ADJUST": id_1 = self.expect_number_() id_2 = self.expect_number_() self.expect_keyword_("BY") pos_1 = self.parse_pos_() pos_2 = self.parse_pos_() adjust_pair[(id_1, id_2)] = (pos_1, pos_2) self.expect_keyword_("END_ADJUST") position = ast.PositionAdjustPairDefinition( coverages_1, coverages_2, adjust_pair, location=location ) return position def parse_adjust_single_(self): assert self.is_cur_keyword_("ADJUST_SINGLE") location = self.cur_token_location_ adjust_single = [] while self.next_token_ != "END_ADJUST": coverages = self.parse_coverage_() self.expect_keyword_("BY") pos = self.parse_pos_() adjust_single.append((coverages, pos)) self.expect_keyword_("END_ADJUST") position = ast.PositionAdjustSingleDefinition(adjust_single, location=location) return position def parse_def_anchor_(self): assert self.is_cur_keyword_("DEF_ANCHOR") location = self.cur_token_location_ name = self.expect_string_() self.expect_keyword_("ON") gid = self.expect_number_() self.expect_keyword_("GLYPH") glyph_name = self.expect_name_() self.expect_keyword_("COMPONENT") component = self.expect_number_() # check for duplicate anchor names on this glyph if glyph_name in self.anchors_: anchor = self.anchors_[glyph_name].resolve(name) if anchor is not None and anchor.component == component: raise VoltLibError( 'Anchor "%s" already defined, ' "anchor names are case insensitive" % name, location, ) if self.next_token_ == "LOCKED": locked = True self.advance_lexer_() else: locked = False self.expect_keyword_("AT") pos = self.parse_pos_() self.expect_keyword_("END_ANCHOR") anchor = ast.AnchorDefinition( name, gid, glyph_name, component, locked, pos, location=location ) if glyph_name not in self.anchors_: self.anchors_[glyph_name] = SymbolTable() self.anchors_[glyph_name].define(name, anchor) return anchor def parse_adjust_by_(self): self.advance_lexer_() assert self.is_cur_keyword_("ADJUST_BY") adjustment = self.expect_number_() self.expect_keyword_("AT") size = self.expect_number_() return adjustment, size def parse_pos_(self): # VOLT syntax doesn't seem to take device Y advance self.advance_lexer_() location = self.cur_token_location_ assert self.is_cur_keyword_("POS"), location adv = None dx = None dy = None adv_adjust_by = {} dx_adjust_by = {} dy_adjust_by = {} if self.next_token_ == "ADV": self.advance_lexer_() adv = self.expect_number_() while self.next_token_ == "ADJUST_BY": adjustment, size = self.parse_adjust_by_() adv_adjust_by[size] = adjustment if self.next_token_ == "DX": self.advance_lexer_() dx = self.expect_number_() while self.next_token_ == "ADJUST_BY": adjustment, size = self.parse_adjust_by_() dx_adjust_by[size] = adjustment if self.next_token_ == "DY": self.advance_lexer_() dy = self.expect_number_() while self.next_token_ == "ADJUST_BY": adjustment, size = self.parse_adjust_by_() dy_adjust_by[size] = adjustment self.expect_keyword_("END_POS") return ast.Pos(adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by) def parse_unicode_values_(self): location = self.cur_token_location_ try: unicode_values = self.expect_string_().split(",") unicode_values = [int(uni[2:], 16) for uni in unicode_values if uni != ""] except ValueError as err: raise VoltLibError(str(err), location) return unicode_values if unicode_values != [] else None def parse_enum_(self): self.expect_keyword_("ENUM") location = self.cur_token_location_ enum = ast.Enum(self.parse_coverage_(), location=location) self.expect_keyword_("END_ENUM") return enum def parse_coverage_(self): coverage = [] location = self.cur_token_location_ while self.next_token_ in ("GLYPH", "GROUP", "RANGE", "ENUM"): if self.next_token_ == "ENUM": enum = self.parse_enum_() coverage.append(enum) elif self.next_token_ == "GLYPH": self.expect_keyword_("GLYPH") name = self.expect_string_() coverage.append(ast.GlyphName(name, location=location)) elif self.next_token_ == "GROUP": self.expect_keyword_("GROUP") name = self.expect_string_() coverage.append(ast.GroupName(name, self, location=location)) elif self.next_token_ == "RANGE": self.expect_keyword_("RANGE") start = self.expect_string_() self.expect_keyword_("TO") end = self.expect_string_() coverage.append(ast.Range(start, end, self, location=location)) return tuple(coverage) def resolve_group(self, group_name): return self.groups_.resolve(group_name) def glyph_range(self, start, end): return self.glyphs_.range(start, end) def parse_ppem_(self): location = self.cur_token_location_ ppem_name = self.cur_token_ value = self.expect_number_() setting = ast.SettingDefinition(ppem_name, value, location=location) return setting def parse_noarg_option_(self): location = self.cur_token_location_ name = self.cur_token_ value = True setting = ast.SettingDefinition(name, value, location=location) return setting def parse_cmap_format(self): location = self.cur_token_location_ name = self.cur_token_ value = (self.expect_number_(), self.expect_number_(), self.expect_number_()) setting = ast.SettingDefinition(name, value, location=location) return setting def is_cur_keyword_(self, k): return (self.cur_token_type_ is Lexer.NAME) and (self.cur_token_ == k) def expect_string_(self): self.advance_lexer_() if self.cur_token_type_ is not Lexer.STRING: raise VoltLibError("Expected a string", self.cur_token_location_) return self.cur_token_ def expect_keyword_(self, keyword): self.advance_lexer_() if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword: return self.cur_token_ raise VoltLibError('Expected "%s"' % keyword, self.cur_token_location_) def expect_name_(self): self.advance_lexer_() if self.cur_token_type_ is Lexer.NAME: return self.cur_token_ raise VoltLibError("Expected a name", self.cur_token_location_) def expect_number_(self): self.advance_lexer_() if self.cur_token_type_ is not Lexer.NUMBER: raise VoltLibError("Expected a number", self.cur_token_location_) return self.cur_token_ def advance_lexer_(self): self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( self.next_token_type_, self.next_token_, self.next_token_location_, ) try: if self.is_cur_keyword_("END"): raise StopIteration ( self.next_token_type_, self.next_token_, self.next_token_location_, ) = self.lexer_.next() except StopIteration: self.next_token_type_, self.next_token_ = (None, None) class SymbolTable(object): def __init__(self): self.scopes_ = [{}] def enter_scope(self): self.scopes_.append({}) def exit_scope(self): self.scopes_.pop() def define(self, name, item): self.scopes_[-1][name] = item def resolve(self, name, case_insensitive=True): for scope in reversed(self.scopes_): item = scope.get(name) if item: return item if case_insensitive: for key in scope: if key.lower() == name.lower(): return scope[key] return None class OrderedSymbolTable(SymbolTable): def __init__(self): self.scopes_ = [{}] def enter_scope(self): self.scopes_.append({}) def resolve(self, name, case_insensitive=False): SymbolTable.resolve(self, name, case_insensitive=case_insensitive) def range(self, start, end): for scope in reversed(self.scopes_): if start in scope and end in scope: start_idx = list(scope.keys()).index(start) end_idx = list(scope.keys()).index(end) return list(scope.keys())[start_idx : end_idx + 1] return None PKaZZZ�y5R1o1ofontTools/voltLib/voltToFea.py"""\ MS VOLT ``.vtp`` to AFDKO ``.fea`` OpenType Layout converter. Usage ----- To convert a VTP project file: $ fonttools voltLib.voltToFea input.vtp output.fea It is also possible convert font files with `TSIV` table (as saved from Volt), in this case the glyph names used in the Volt project will be mapped to the actual glyph names in the font files when written to the feature file: $ fonttools voltLib.voltToFea input.ttf output.fea The ``--quiet`` option can be used to suppress warnings. The ``--traceback`` can be used to get Python traceback in case of exceptions, instead of suppressing the traceback. Limitations ----------- * Not all VOLT features are supported, the script will error if it it encounters something it does not understand. Please report an issue if this happens. * AFDKO feature file syntax for mark positioning is awkward and does not allow setting the mark coverage. It also defines mark anchors globally, as a result some mark positioning lookups might cover many marks than what was in the VOLT file. This should not be an issue in practice, but if it is then the only way is to modify the VOLT file or the generated feature file manually to use unique mark anchors for each lookup. * VOLT allows subtable breaks in any lookup type, but AFDKO feature file implementations vary in their support; currently AFDKO’s makeOTF supports subtable breaks in pair positioning lookups only, while FontTools’ feaLib support it for most substitution lookups and only some positioning lookups. """ import logging import re from io import StringIO from fontTools.feaLib import ast from fontTools.ttLib import TTFont, TTLibError from fontTools.voltLib import ast as VAst from fontTools.voltLib.parser import Parser as VoltParser log = logging.getLogger("fontTools.voltLib.voltToFea") TABLES = ["GDEF", "GSUB", "GPOS"] class MarkClassDefinition(ast.MarkClassDefinition): def asFea(self, indent=""): res = "" if not getattr(self, "used", False): res += "#" res += ast.MarkClassDefinition.asFea(self, indent) return res # For sorting voltLib.ast.GlyphDefinition, see its use below. class Group: def __init__(self, group): self.name = group.name.lower() self.groups = [ x.group.lower() for x in group.enum.enum if isinstance(x, VAst.GroupName) ] def __lt__(self, other): if self.name in other.groups: return True if other.name in self.groups: return False if self.groups and not other.groups: return False if not self.groups and other.groups: return True class VoltToFea: _NOT_LOOKUP_NAME_RE = re.compile(r"[^A-Za-z_0-9.]") _NOT_CLASS_NAME_RE = re.compile(r"[^A-Za-z_0-9.\-]") def __init__(self, file_or_path, font=None): self._file_or_path = file_or_path self._font = font self._glyph_map = {} self._glyph_order = None self._gdef = {} self._glyphclasses = {} self._features = {} self._lookups = {} self._marks = set() self._ligatures = {} self._markclasses = {} self._anchors = {} self._settings = {} self._lookup_names = {} self._class_names = {} def _lookupName(self, name): if name not in self._lookup_names: res = self._NOT_LOOKUP_NAME_RE.sub("_", name) while res in self._lookup_names.values(): res += "_" self._lookup_names[name] = res return self._lookup_names[name] def _className(self, name): if name not in self._class_names: res = self._NOT_CLASS_NAME_RE.sub("_", name) while res in self._class_names.values(): res += "_" self._class_names[name] = res return self._class_names[name] def _collectStatements(self, doc, tables): # Collect and sort group definitions first, to make sure a group # definition that references other groups comes after them since VOLT # does not enforce such ordering, and feature file require it. groups = [s for s in doc.statements if isinstance(s, VAst.GroupDefinition)] for statement in sorted(groups, key=lambda x: Group(x)): self._groupDefinition(statement) for statement in doc.statements: if isinstance(statement, VAst.GlyphDefinition): self._glyphDefinition(statement) elif isinstance(statement, VAst.AnchorDefinition): if "GPOS" in tables: self._anchorDefinition(statement) elif isinstance(statement, VAst.SettingDefinition): self._settingDefinition(statement) elif isinstance(statement, VAst.GroupDefinition): pass # Handled above elif isinstance(statement, VAst.ScriptDefinition): self._scriptDefinition(statement) elif not isinstance(statement, VAst.LookupDefinition): raise NotImplementedError(statement) # Lookup definitions need to be handled last as they reference glyph # and mark classes that might be defined after them. for statement in doc.statements: if isinstance(statement, VAst.LookupDefinition): if statement.pos and "GPOS" not in tables: continue if statement.sub and "GSUB" not in tables: continue self._lookupDefinition(statement) def _buildFeatureFile(self, tables): doc = ast.FeatureFile() statements = doc.statements if self._glyphclasses: statements.append(ast.Comment("# Glyph classes")) statements.extend(self._glyphclasses.values()) if self._markclasses: statements.append(ast.Comment("\n# Mark classes")) statements.extend(c[1] for c in sorted(self._markclasses.items())) if self._lookups: statements.append(ast.Comment("\n# Lookups")) for lookup in self._lookups.values(): statements.extend(getattr(lookup, "targets", [])) statements.append(lookup) # Prune features features = self._features.copy() for ftag in features: scripts = features[ftag] for stag in scripts: langs = scripts[stag] for ltag in langs: langs[ltag] = [l for l in langs[ltag] if l.lower() in self._lookups] scripts[stag] = {t: l for t, l in langs.items() if l} features[ftag] = {t: s for t, s in scripts.items() if s} features = {t: f for t, f in features.items() if f} if features: statements.append(ast.Comment("# Features")) for ftag, scripts in features.items(): feature = ast.FeatureBlock(ftag) stags = sorted(scripts, key=lambda k: 0 if k == "DFLT" else 1) for stag in stags: feature.statements.append(ast.ScriptStatement(stag)) ltags = sorted(scripts[stag], key=lambda k: 0 if k == "dflt" else 1) for ltag in ltags: include_default = True if ltag == "dflt" else False feature.statements.append( ast.LanguageStatement(ltag, include_default=include_default) ) for name in scripts[stag][ltag]: lookup = self._lookups[name.lower()] lookupref = ast.LookupReferenceStatement(lookup) feature.statements.append(lookupref) statements.append(feature) if self._gdef and "GDEF" in tables: classes = [] for name in ("BASE", "MARK", "LIGATURE", "COMPONENT"): if name in self._gdef: classname = "GDEF_" + name.lower() glyphclass = ast.GlyphClassDefinition(classname, self._gdef[name]) statements.append(glyphclass) classes.append(ast.GlyphClassName(glyphclass)) else: classes.append(None) gdef = ast.TableBlock("GDEF") gdef.statements.append(ast.GlyphClassDefStatement(*classes)) statements.append(gdef) return doc def convert(self, tables=None): doc = VoltParser(self._file_or_path).parse() if tables is None: tables = TABLES if self._font is not None: self._glyph_order = self._font.getGlyphOrder() self._collectStatements(doc, tables) fea = self._buildFeatureFile(tables) return fea.asFea() def _glyphName(self, glyph): try: name = glyph.glyph except AttributeError: name = glyph return ast.GlyphName(self._glyph_map.get(name, name)) def _groupName(self, group): try: name = group.group except AttributeError: name = group return ast.GlyphClassName(self._glyphclasses[name.lower()]) def _coverage(self, coverage): items = [] for item in coverage: if isinstance(item, VAst.GlyphName): items.append(self._glyphName(item)) elif isinstance(item, VAst.GroupName): items.append(self._groupName(item)) elif isinstance(item, VAst.Enum): items.append(self._enum(item)) elif isinstance(item, VAst.Range): items.append((item.start, item.end)) else: raise NotImplementedError(item) return items def _enum(self, enum): return ast.GlyphClass(self._coverage(enum.enum)) def _context(self, context): out = [] for item in context: coverage = self._coverage(item) if not isinstance(coverage, (tuple, list)): coverage = [coverage] out.extend(coverage) return out def _groupDefinition(self, group): name = self._className(group.name) glyphs = self._enum(group.enum) glyphclass = ast.GlyphClassDefinition(name, glyphs) self._glyphclasses[group.name.lower()] = glyphclass def _glyphDefinition(self, glyph): try: self._glyph_map[glyph.name] = self._glyph_order[glyph.id] except TypeError: pass if glyph.type in ("BASE", "MARK", "LIGATURE", "COMPONENT"): if glyph.type not in self._gdef: self._gdef[glyph.type] = ast.GlyphClass() self._gdef[glyph.type].glyphs.append(self._glyphName(glyph.name)) if glyph.type == "MARK": self._marks.add(glyph.name) elif glyph.type == "LIGATURE": self._ligatures[glyph.name] = glyph.components def _scriptDefinition(self, script): stag = script.tag for lang in script.langs: ltag = lang.tag for feature in lang.features: lookups = {l.split("\\")[0]: True for l in feature.lookups} ftag = feature.tag if ftag not in self._features: self._features[ftag] = {} if stag not in self._features[ftag]: self._features[ftag][stag] = {} assert ltag not in self._features[ftag][stag] self._features[ftag][stag][ltag] = lookups.keys() def _settingDefinition(self, setting): if setting.name.startswith("COMPILER_"): self._settings[setting.name] = setting.value else: log.warning(f"Unsupported setting ignored: {setting.name}") def _adjustment(self, adjustment): adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by = adjustment adv_device = adv_adjust_by and adv_adjust_by.items() or None dx_device = dx_adjust_by and dx_adjust_by.items() or None dy_device = dy_adjust_by and dy_adjust_by.items() or None return ast.ValueRecord( xPlacement=dx, yPlacement=dy, xAdvance=adv, xPlaDevice=dx_device, yPlaDevice=dy_device, xAdvDevice=adv_device, ) def _anchor(self, adjustment): adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by = adjustment assert not adv_adjust_by dx_device = dx_adjust_by and dx_adjust_by.items() or None dy_device = dy_adjust_by and dy_adjust_by.items() or None return ast.Anchor( dx or 0, dy or 0, xDeviceTable=dx_device or None, yDeviceTable=dy_device or None, ) def _anchorDefinition(self, anchordef): anchorname = anchordef.name glyphname = anchordef.glyph_name anchor = self._anchor(anchordef.pos) if anchorname.startswith("MARK_"): name = "_".join(anchorname.split("_")[1:]) markclass = ast.MarkClass(self._className(name)) glyph = self._glyphName(glyphname) markdef = MarkClassDefinition(markclass, anchor, glyph) self._markclasses[(glyphname, anchorname)] = markdef else: if glyphname not in self._anchors: self._anchors[glyphname] = {} if anchorname not in self._anchors[glyphname]: self._anchors[glyphname][anchorname] = {} self._anchors[glyphname][anchorname][anchordef.component] = anchor def _gposLookup(self, lookup, fealookup): statements = fealookup.statements pos = lookup.pos if isinstance(pos, VAst.PositionAdjustPairDefinition): for (idx1, idx2), (pos1, pos2) in pos.adjust_pair.items(): coverage_1 = pos.coverages_1[idx1 - 1] coverage_2 = pos.coverages_2[idx2 - 1] # If not both are groups, use “enum pos” otherwise makeotf will # fail. enumerated = False for item in coverage_1 + coverage_2: if not isinstance(item, VAst.GroupName): enumerated = True glyphs1 = self._coverage(coverage_1) glyphs2 = self._coverage(coverage_2) record1 = self._adjustment(pos1) record2 = self._adjustment(pos2) assert len(glyphs1) == 1 assert len(glyphs2) == 1 statements.append( ast.PairPosStatement( glyphs1[0], record1, glyphs2[0], record2, enumerated=enumerated ) ) elif isinstance(pos, VAst.PositionAdjustSingleDefinition): for a, b in pos.adjust_single: glyphs = self._coverage(a) record = self._adjustment(b) assert len(glyphs) == 1 statements.append( ast.SinglePosStatement([(glyphs[0], record)], [], [], False) ) elif isinstance(pos, VAst.PositionAttachDefinition): anchors = {} for marks, classname in pos.coverage_to: for mark in marks: # Set actually used mark classes. Basically a hack to get # around the feature file syntax limitation of making mark # classes global and not allowing mark positioning to # specify mark coverage. for name in mark.glyphSet(): key = (name, "MARK_" + classname) self._markclasses[key].used = True markclass = ast.MarkClass(self._className(classname)) for base in pos.coverage: for name in base.glyphSet(): if name not in anchors: anchors[name] = [] if classname not in anchors[name]: anchors[name].append(classname) for name in anchors: components = 1 if name in self._ligatures: components = self._ligatures[name] marks = [] for mark in anchors[name]: markclass = ast.MarkClass(self._className(mark)) for component in range(1, components + 1): if len(marks) < component: marks.append([]) anchor = None if component in self._anchors[name][mark]: anchor = self._anchors[name][mark][component] marks[component - 1].append((anchor, markclass)) base = self._glyphName(name) if name in self._marks: mark = ast.MarkMarkPosStatement(base, marks[0]) elif name in self._ligatures: mark = ast.MarkLigPosStatement(base, marks) else: mark = ast.MarkBasePosStatement(base, marks[0]) statements.append(mark) elif isinstance(pos, VAst.PositionAttachCursiveDefinition): # Collect enter and exit glyphs enter_coverage = [] for coverage in pos.coverages_enter: for base in coverage: for name in base.glyphSet(): enter_coverage.append(name) exit_coverage = [] for coverage in pos.coverages_exit: for base in coverage: for name in base.glyphSet(): exit_coverage.append(name) # Write enter anchors, also check if the glyph has exit anchor and # write it, too. for name in enter_coverage: glyph = self._glyphName(name) entry = self._anchors[name]["entry"][1] exit = None if name in exit_coverage: exit = self._anchors[name]["exit"][1] exit_coverage.pop(exit_coverage.index(name)) statements.append(ast.CursivePosStatement(glyph, entry, exit)) # Write any remaining exit anchors. for name in exit_coverage: glyph = self._glyphName(name) exit = self._anchors[name]["exit"][1] statements.append(ast.CursivePosStatement(glyph, None, exit)) else: raise NotImplementedError(pos) def _gposContextLookup( self, lookup, prefix, suffix, ignore, fealookup, targetlookup ): statements = fealookup.statements assert not lookup.reversal pos = lookup.pos if isinstance(pos, VAst.PositionAdjustPairDefinition): for (idx1, idx2), (pos1, pos2) in pos.adjust_pair.items(): glyphs1 = self._coverage(pos.coverages_1[idx1 - 1]) glyphs2 = self._coverage(pos.coverages_2[idx2 - 1]) assert len(glyphs1) == 1 assert len(glyphs2) == 1 glyphs = (glyphs1[0], glyphs2[0]) if ignore: statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)]) else: lookups = (targetlookup, targetlookup) statement = ast.ChainContextPosStatement( prefix, glyphs, suffix, lookups ) statements.append(statement) elif isinstance(pos, VAst.PositionAdjustSingleDefinition): glyphs = [ast.GlyphClass()] for a, b in pos.adjust_single: glyph = self._coverage(a) glyphs[0].extend(glyph) if ignore: statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)]) else: statement = ast.ChainContextPosStatement( prefix, glyphs, suffix, [targetlookup] ) statements.append(statement) elif isinstance(pos, VAst.PositionAttachDefinition): glyphs = [ast.GlyphClass()] for coverage, _ in pos.coverage_to: glyphs[0].extend(self._coverage(coverage)) if ignore: statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)]) else: statement = ast.ChainContextPosStatement( prefix, glyphs, suffix, [targetlookup] ) statements.append(statement) else: raise NotImplementedError(pos) def _gsubLookup(self, lookup, prefix, suffix, ignore, chain, fealookup): statements = fealookup.statements sub = lookup.sub for key, val in sub.mapping.items(): if not key or not val: path, line, column = sub.location log.warning(f"{path}:{line}:{column}: Ignoring empty substitution") continue statement = None glyphs = self._coverage(key) replacements = self._coverage(val) if ignore: chain_context = (prefix, glyphs, suffix) statement = ast.IgnoreSubstStatement([chain_context]) elif isinstance(sub, VAst.SubstitutionSingleDefinition): assert len(glyphs) == 1 assert len(replacements) == 1 statement = ast.SingleSubstStatement( glyphs, replacements, prefix, suffix, chain ) elif isinstance(sub, VAst.SubstitutionReverseChainingSingleDefinition): assert len(glyphs) == 1 assert len(replacements) == 1 statement = ast.ReverseChainSingleSubstStatement( prefix, suffix, glyphs, replacements ) elif isinstance(sub, VAst.SubstitutionMultipleDefinition): assert len(glyphs) == 1 statement = ast.MultipleSubstStatement( prefix, glyphs[0], suffix, replacements, chain ) elif isinstance(sub, VAst.SubstitutionLigatureDefinition): assert len(replacements) == 1 statement = ast.LigatureSubstStatement( prefix, glyphs, suffix, replacements[0], chain ) else: raise NotImplementedError(sub) statements.append(statement) def _lookupDefinition(self, lookup): mark_attachement = None mark_filtering = None flags = 0 if lookup.direction == "RTL": flags |= 1 if not lookup.process_base: flags |= 2 # FIXME: Does VOLT support this? # if not lookup.process_ligatures: # flags |= 4 if not lookup.process_marks: flags |= 8 elif isinstance(lookup.process_marks, str): mark_attachement = self._groupName(lookup.process_marks) elif lookup.mark_glyph_set is not None: mark_filtering = self._groupName(lookup.mark_glyph_set) lookupflags = None if flags or mark_attachement is not None or mark_filtering is not None: lookupflags = ast.LookupFlagStatement( flags, mark_attachement, mark_filtering ) if "\\" in lookup.name: # Merge sub lookups as subtables (lookups named “base\sub”), # makeotf/feaLib will issue a warning and ignore the subtable # statement if it is not a pairpos lookup, though. name = lookup.name.split("\\")[0] if name.lower() not in self._lookups: fealookup = ast.LookupBlock(self._lookupName(name)) if lookupflags is not None: fealookup.statements.append(lookupflags) fealookup.statements.append(ast.Comment("# " + lookup.name)) else: fealookup = self._lookups[name.lower()] fealookup.statements.append(ast.SubtableStatement()) fealookup.statements.append(ast.Comment("# " + lookup.name)) self._lookups[name.lower()] = fealookup else: fealookup = ast.LookupBlock(self._lookupName(lookup.name)) if lookupflags is not None: fealookup.statements.append(lookupflags) self._lookups[lookup.name.lower()] = fealookup if lookup.comments is not None: fealookup.statements.append(ast.Comment("# " + lookup.comments)) contexts = [] if lookup.context: for context in lookup.context: prefix = self._context(context.left) suffix = self._context(context.right) ignore = context.ex_or_in == "EXCEPT_CONTEXT" contexts.append([prefix, suffix, ignore, False]) # It seems that VOLT will create contextual substitution using # only the input if there is no other contexts in this lookup. if ignore and len(lookup.context) == 1: contexts.append([[], [], False, True]) else: contexts.append([[], [], False, False]) targetlookup = None for prefix, suffix, ignore, chain in contexts: if lookup.sub is not None: self._gsubLookup(lookup, prefix, suffix, ignore, chain, fealookup) if lookup.pos is not None: if self._settings.get("COMPILER_USEEXTENSIONLOOKUPS"): fealookup.use_extension = True if prefix or suffix or chain or ignore: if not ignore and targetlookup is None: targetname = self._lookupName(lookup.name + " target") targetlookup = ast.LookupBlock(targetname) fealookup.targets = getattr(fealookup, "targets", []) fealookup.targets.append(targetlookup) self._gposLookup(lookup, targetlookup) self._gposContextLookup( lookup, prefix, suffix, ignore, fealookup, targetlookup ) else: self._gposLookup(lookup, fealookup) def main(args=None): """Convert MS VOLT to AFDKO feature files.""" import argparse from pathlib import Path from fontTools import configLogger parser = argparse.ArgumentParser( "fonttools voltLib.voltToFea", description=main.__doc__ ) parser.add_argument( "input", metavar="INPUT", type=Path, help="input font/VTP file to process" ) parser.add_argument( "featurefile", metavar="OUTPUT", type=Path, help="output feature file" ) parser.add_argument( "-t", "--table", action="append", choices=TABLES, dest="tables", help="List of tables to write, by default all tables are written", ) parser.add_argument( "-q", "--quiet", action="store_true", help="Suppress non-error messages" ) parser.add_argument( "--traceback", action="store_true", help="Don’t catch exceptions" ) options = parser.parse_args(args) configLogger(level=("ERROR" if options.quiet else "INFO")) file_or_path = options.input font = None try: font = TTFont(file_or_path) if "TSIV" in font: file_or_path = StringIO(font["TSIV"].data.decode("utf-8")) else: log.error('"TSIV" table is missing, font was not saved from VOLT?') return 1 except TTLibError: pass converter = VoltToFea(file_or_path, font) try: fea = converter.convert(options.tables) except NotImplementedError as e: if options.traceback: raise location = getattr(e.args[0], "location", None) message = f'"{e}" is not supported' if location: path, line, column = location log.error(f"{path}:{line}:{column}: {message}") else: log.error(message) return 1 with open(options.featurefile, "w") as feafile: feafile.write(fea) if __name__ == "__main__": import sys sys.exit(main()) PKaZZZb���/fonttools-4.51.0.data/data/share/man/man1/ttx.1.Dd May 18, 2004 .\" ttx is not specific to any OS, but contrary to what groff_mdoc(7) .\" seems to imply, entirely omitting the .Os macro causes 'BSD' to .\" be used, so I give a zero-width space as its argument. .Os \& .\" The "FontTools Manual" argument apparently has no effect in .\" groff 1.18.1. I think it is a bug in the -mdoc groff package. .Dt TTX 1 "FontTools Manual" .Sh NAME .Nm ttx .Nd tool for manipulating TrueType and OpenType fonts .Sh SYNOPSIS .Nm .Bk .Op Ar option ... .Ek .Bk .Ar file ... .Ek .Sh DESCRIPTION .Nm is a tool for manipulating TrueType and OpenType fonts. It can convert TrueType and OpenType fonts to and from an .Tn XML Ns -based format called .Tn TTX . .Tn TTX files have a .Ql .ttx extension. .Pp For each .Ar file argument it is given, .Nm detects whether it is a .Ql .ttf , .Ql .otf or .Ql .ttx file and acts accordingly: if it is a .Ql .ttf or .Ql .otf file, it generates a .Ql .ttx file; if it is a .Ql .ttx file, it generates a .Ql .ttf or .Ql .otf file. .Pp By default, every output file is created in the same directory as the corresponding input file and with the same name except for the extension, which is substituted appropriately. .Nm never overwrites existing files; if necessary, it appends a suffix to the output file name before the extension, as in .Pa Arial#1.ttf . .Ss "General options" .Bl -tag -width ".Fl t Ar table" .It Fl h Display usage information. .It Fl d Ar dir Write the output files to directory .Ar dir instead of writing every output file to the same directory as the corresponding input file. .It Fl o Ar file Write the output to .Ar file instead of writing it to the same directory as the corresponding input file. .It Fl v Be verbose. Write more messages to the standard output describing what is being done. .It Fl a Allow virtual glyphs ID's on compile or decompile. .El .Ss "Dump options" The following options control the process of dumping font files (TrueType or OpenType) to .Tn TTX files. .Bl -tag -width ".Fl t Ar table" .It Fl l List table information. Instead of dumping the font to a .Tn TTX file, display minimal information about each table. .It Fl t Ar table Dump table .Ar table . This option may be given multiple times to dump several tables at once. When not specified, all tables are dumped. .It Fl x Ar table Exclude table .Ar table from the list of tables to dump. This option may be given multiple times to exclude several tables from the dump. The .Fl t and .Fl x options are mutually exclusive. .It Fl s Split tables. Dump each table to a separate .Tn TTX file and write (under the name that would have been used for the output file if the .Fl s option had not been given) one small .Tn TTX file containing references to the individual table dump files. This file can be used as input to .Nm as long as the referenced files can be found in the same directory. .It Fl i .\" XXX: I suppose OpenType programs (exist and) are also affected. Don't disassemble TrueType instructions. When this option is specified, all TrueType programs (glyph programs, the font program and the pre-program) are written to the .Tn TTX file as hexadecimal data instead of assembly. This saves some time and results in smaller .Tn TTX files. .It Fl y Ar n When decompiling a TrueType Collection (TTC) file, decompile font number .Ar n , starting from 0. .El .Ss "Compilation options" The following options control the process of compiling .Tn TTX files into font files (TrueType or OpenType): .Bl -tag -width ".Fl t Ar table" .It Fl m Ar fontfile Merge the input .Tn TTX file .Ar file with .Ar fontfile . No more than one .Ar file argument can be specified when this option is used. .It Fl b Don't recalculate glyph bounding boxes. Use the values in the .Tn TTX file as is. .El .Sh "THE TTX FILE FORMAT" You can find some information about the .Tn TTX file format in .Pa documentation.html . In particular, you will find in that file the list of tables understood by .Nm and the relations between TrueType GlyphIDs and the glyph names used in .Tn TTX files. .Sh EXAMPLES In the following examples, all files are read from and written to the current directory. Additionally, the name given for the output file assumes in every case that it did not exist before .Nm was invoked. .Pp Dump the TrueType font contained in .Pa FreeSans.ttf to .Pa FreeSans.ttx : .Pp .Dl ttx FreeSans.ttf .Pp Compile .Pa MyFont.ttx into a TrueType or OpenType font file: .Pp .Dl ttx MyFont.ttx .Pp List the tables in .Pa FreeSans.ttf along with some information: .Pp .Dl ttx -l FreeSans.ttf .Pp Dump the .Sq cmap table from .Pa FreeSans.ttf to .Pa FreeSans.ttx : .Pp .Dl ttx -t cmap FreeSans.ttf .Sh NOTES On MS\-Windows and MacOS, .Nm is available as a graphical application to which files can be dropped. .Sh SEE ALSO .Pa documentation.html .Pp .Xr fontforge 1 , .Xr ftinfo 1 , .Xr gfontview 1 , .Xr xmbdfed 1 , .Xr Font::TTF 3pm .Sh AUTHORS .Nm was written by .An -nosplit .An "Just van Rossum" Aq just@letterror.com . .Pp This manual page was written by .An "Florent Rougon" Aq f.rougon@free.fr for the Debian GNU/Linux system based on the existing FontTools documentation. It may be freely used, modified and distributed without restrictions. .\" For Emacs: .\" Local Variables: .\" fill-column: 72 .\" sentence-end: "[.?!][]\"')}]*\\($\\| $\\| \\| \\)[ \n]*" .\" sentence-end-double-space: t .\" End:PKaZZZ~�u00"fonttools-4.51.0.dist-info/LICENSEMIT License Copyright (c) 2017 Just van Rossum Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. PKaZZZ�w��oo#fonttools-4.51.0.dist-info/METADATAMetadata-Version: 2.1 Name: fonttools Version: 4.51.0 Summary: Tools to manipulate font files Home-page: http://github.com/fonttools/fonttools Author: Just van Rossum Author-email: just@letterror.com Maintainer: Behdad Esfahbod Maintainer-email: behdad@behdad.org License: MIT Platform: Any Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Console Classifier: Environment :: Other Environment Classifier: Intended Audience :: Developers Classifier: Intended Audience :: End Users/Desktop Classifier: License :: OSI Approved :: MIT License Classifier: Natural Language :: English Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Programming Language :: Python :: 3 Classifier: Topic :: Text Processing :: Fonts Classifier: Topic :: Multimedia :: Graphics Classifier: Topic :: Multimedia :: Graphics :: Graphics Conversion Requires-Python: >=3.8 License-File: LICENSE Provides-Extra: all Requires-Dist: fs <3,>=2.2.0 ; extra == 'all' Requires-Dist: lxml >=4.0 ; extra == 'all' Requires-Dist: zopfli >=0.1.4 ; extra == 'all' Requires-Dist: lz4 >=1.7.4.2 ; extra == 'all' Requires-Dist: pycairo ; extra == 'all' Requires-Dist: matplotlib ; extra == 'all' Requires-Dist: sympy ; extra == 'all' Requires-Dist: skia-pathops >=0.5.0 ; extra == 'all' Requires-Dist: uharfbuzz >=0.23.0 ; extra == 'all' Requires-Dist: brotlicffi >=0.8.0 ; (platform_python_implementation != "CPython") and extra == 'all' Requires-Dist: scipy ; (platform_python_implementation != "PyPy") and extra == 'all' Requires-Dist: brotli >=1.0.1 ; (platform_python_implementation == "CPython") and extra == 'all' Requires-Dist: munkres ; (platform_python_implementation == "PyPy") and extra == 'all' Requires-Dist: unicodedata2 >=15.1.0 ; (python_version <= "3.12") and extra == 'all' Requires-Dist: xattr ; (sys_platform == "darwin") and extra == 'all' Provides-Extra: graphite Requires-Dist: lz4 >=1.7.4.2 ; extra == 'graphite' Provides-Extra: interpolatable Requires-Dist: pycairo ; extra == 'interpolatable' Requires-Dist: scipy ; (platform_python_implementation != "PyPy") and extra == 'interpolatable' Requires-Dist: munkres ; (platform_python_implementation == "PyPy") and extra == 'interpolatable' Provides-Extra: lxml Requires-Dist: lxml >=4.0 ; extra == 'lxml' Provides-Extra: pathops Requires-Dist: skia-pathops >=0.5.0 ; extra == 'pathops' Provides-Extra: plot Requires-Dist: matplotlib ; extra == 'plot' Provides-Extra: repacker Requires-Dist: uharfbuzz >=0.23.0 ; extra == 'repacker' Provides-Extra: symfont Requires-Dist: sympy ; extra == 'symfont' Provides-Extra: type1 Requires-Dist: xattr ; (sys_platform == "darwin") and extra == 'type1' Provides-Extra: ufo Requires-Dist: fs <3,>=2.2.0 ; extra == 'ufo' Provides-Extra: unicode Requires-Dist: unicodedata2 >=15.1.0 ; (python_version <= "3.12") and extra == 'unicode' Provides-Extra: woff Requires-Dist: zopfli >=0.1.4 ; extra == 'woff' Requires-Dist: brotlicffi >=0.8.0 ; (platform_python_implementation != "CPython") and extra == 'woff' Requires-Dist: brotli >=1.0.1 ; (platform_python_implementation == "CPython") and extra == 'woff' |CI Build Status| |Coverage Status| |PyPI| |Gitter Chat| What is this? ~~~~~~~~~~~~~ | fontTools is a library for manipulating fonts, written in Python. The project includes the TTX tool, that can convert TrueType and OpenType fonts to and from an XML text format, which is also called TTX. It supports TrueType, OpenType, AFM and to an extent Type 1 and some Mac-specific formats. The project has an `MIT open-source licence <LICENSE>`__. | Among other things this means you can use it free of charge. `User documentation <https://fonttools.readthedocs.io/en/latest/>`_ and `developer documentation <https://fonttools.readthedocs.io/en/latest/developer.html>`_ are available at `Read the Docs <https://fonttools.readthedocs.io/>`_. Installation ~~~~~~~~~~~~ FontTools requires `Python <http://www.python.org/download/>`__ 3.8 or later. We try to follow the same schedule of minimum Python version support as NumPy (see `NEP 29 <https://numpy.org/neps/nep-0029-deprecation_policy.html>`__). The package is listed in the Python Package Index (PyPI), so you can install it with `pip <https://pip.pypa.io>`__: .. code:: sh pip install fonttools If you would like to contribute to its development, you can clone the repository from GitHub, install the package in 'editable' mode and modify the source code in place. We recommend creating a virtual environment, using `virtualenv <https://virtualenv.pypa.io>`__ or Python 3 `venv <https://docs.python.org/3/library/venv.html>`__ module. .. code:: sh # download the source code to 'fonttools' folder git clone https://github.com/fonttools/fonttools.git cd fonttools # create new virtual environment called e.g. 'fonttools-venv', or anything you like python -m virtualenv fonttools-venv # source the `activate` shell script to enter the environment (Unix-like); to exit, just type `deactivate` . fonttools-venv/bin/activate # to activate the virtual environment in Windows `cmd.exe`, do fonttools-venv\Scripts\activate.bat # install in 'editable' mode pip install -e . Optional Requirements --------------------- The ``fontTools`` package currently has no (required) external dependencies besides the modules included in the Python Standard Library. However, a few extra dependencies are required by some of its modules, which are needed to unlock optional features. The ``fonttools`` PyPI distribution also supports so-called "extras", i.e. a set of keywords that describe a group of additional dependencies, which can be used when installing via pip, or when specifying a requirement. For example: .. code:: sh pip install fonttools[ufo,lxml,woff,unicode] This command will install fonttools, as well as the optional dependencies that are required to unlock the extra features named "ufo", etc. - ``Lib/fontTools/misc/etree.py`` The module exports a ElementTree-like API for reading/writing XML files, and allows to use as the backend either the built-in ``xml.etree`` module or `lxml <https://lxml.de>`__. The latter is preferred whenever present, as it is generally faster and more secure. *Extra:* ``lxml`` - ``Lib/fontTools/ufoLib`` Package for reading and writing UFO source files; it requires: * `fs <https://pypi.org/pypi/fs>`__: (aka ``pyfilesystem2``) filesystem abstraction layer. * `enum34 <https://pypi.org/pypi/enum34>`__: backport for the built-in ``enum`` module (only required on Python < 3.4). *Extra:* ``ufo`` - ``Lib/fontTools/ttLib/woff2.py`` Module to compress/decompress WOFF 2.0 web fonts; it requires: * `brotli <https://pypi.python.org/pypi/Brotli>`__: Python bindings of the Brotli compression library. *Extra:* ``woff`` - ``Lib/fontTools/ttLib/sfnt.py`` To better compress WOFF 1.0 web fonts, the following module can be used instead of the built-in ``zlib`` library: * `zopfli <https://pypi.python.org/pypi/zopfli>`__: Python bindings of the Zopfli compression library. *Extra:* ``woff`` - ``Lib/fontTools/unicode.py`` To display the Unicode character names when dumping the ``cmap`` table with ``ttx`` we use the ``unicodedata`` module in the Standard Library. The version included in there varies between different Python versions. To use the latest available data, you can install: * `unicodedata2 <https://pypi.python.org/pypi/unicodedata2>`__: ``unicodedata`` backport for Python 3.x updated to the latest Unicode version 15.0. *Extra:* ``unicode`` - ``Lib/fontTools/varLib/interpolatable.py`` Module for finding wrong contour/component order between different masters. It requires one of the following packages in order to solve the so-called "minimum weight perfect matching problem in bipartite graphs", or the Assignment problem: * `scipy <https://pypi.python.org/pypi/scipy>`__: the Scientific Library for Python, which internally uses `NumPy <https://pypi.python.org/pypi/numpy>`__ arrays and hence is very fast; * `munkres <https://pypi.python.org/pypi/munkres>`__: a pure-Python module that implements the Hungarian or Kuhn-Munkres algorithm. To plot the results to a PDF or HTML format, you also need to install: * `pycairo <https://pypi.org/project/pycairo/>`__: Python bindings for the Cairo graphics library. Note that wheels are currently only available for Windows, for other platforms see pycairo's `installation instructions <https://pycairo.readthedocs.io/en/latest/getting_started.html>`__. *Extra:* ``interpolatable`` - ``Lib/fontTools/varLib/plot.py`` Module for visualizing DesignSpaceDocument and resulting VariationModel. * `matplotlib <https://pypi.org/pypi/matplotlib>`__: 2D plotting library. *Extra:* ``plot`` - ``Lib/fontTools/misc/symfont.py`` Advanced module for symbolic font statistics analysis; it requires: * `sympy <https://pypi.python.org/pypi/sympy>`__: the Python library for symbolic mathematics. *Extra:* ``symfont`` - ``Lib/fontTools/t1Lib.py`` To get the file creator and type of Macintosh PostScript Type 1 fonts on Python 3 you need to install the following module, as the old ``MacOS`` module is no longer included in Mac Python: * `xattr <https://pypi.python.org/pypi/xattr>`__: Python wrapper for extended filesystem attributes (macOS platform only). *Extra:* ``type1`` - ``Lib/fontTools/ttLib/removeOverlaps.py`` Simplify TrueType glyphs by merging overlapping contours and components. * `skia-pathops <https://pypi.python.org/pypy/skia-pathops>`__: Python bindings for the Skia library's PathOps module, performing boolean operations on paths (union, intersection, etc.). *Extra:* ``pathops`` - ``Lib/fontTools/pens/cocoaPen.py`` and ``Lib/fontTools/pens/quartzPen.py`` Pens for drawing glyphs with Cocoa ``NSBezierPath`` or ``CGPath`` require: * `PyObjC <https://pypi.python.org/pypi/pyobjc>`__: the bridge between Python and the Objective-C runtime (macOS platform only). - ``Lib/fontTools/pens/qtPen.py`` Pen for drawing glyphs with Qt's ``QPainterPath``, requires: * `PyQt5 <https://pypi.python.org/pypi/PyQt5>`__: Python bindings for the Qt cross platform UI and application toolkit. - ``Lib/fontTools/pens/reportLabPen.py`` Pen to drawing glyphs as PNG images, requires: * `reportlab <https://pypi.python.org/pypi/reportlab>`__: Python toolkit for generating PDFs and graphics. - ``Lib/fontTools/pens/freetypePen.py`` Pen to drawing glyphs with FreeType as raster images, requires: * `freetype-py <https://pypi.python.org/pypi/freetype-py>`__: Python binding for the FreeType library. - ``Lib/fontTools/ttLib/tables/otBase.py`` Use the Harfbuzz library to serialize GPOS/GSUB using ``hb_repack`` method, requires: * `uharfbuzz <https://pypi.python.org/pypi/uharfbuzz>`__: Streamlined Cython bindings for the harfbuzz shaping engine *Extra:* ``repacker`` How to make a new release ~~~~~~~~~~~~~~~~~~~~~~~~~ 1) Update ``NEWS.rst`` with all the changes since the last release. Write a changelog entry for each PR, with one or two short sentences summarizing it, as well as links to the PR and relevant issues addressed by the PR. Do not put a new title, the next command will do it for you. 2) Use semantic versioning to decide whether the new release will be a 'major', 'minor' or 'patch' release. It's usually one of the latter two, depending on whether new backward compatible APIs were added, or simply some bugs were fixed. 3) Run ``python setup.py release`` command from the tip of the ``main`` branch. By default this bumps the third or 'patch' digit only, unless you pass ``--major`` or ``--minor`` to bump respectively the first or second digit. This bumps the package version string, extracts the changes since the latest version from ``NEWS.rst``, and uses that text to create an annotated git tag (or a signed git tag if you pass the ``--sign`` option and your git and Github account are configured for `signing commits <https://docs.github.com/en/github/authenticating-to-github/managing-commit-signature-verification/signing-commits>`__ using a GPG key). It also commits an additional version bump which opens the main branch for the subsequent developmental cycle 4) Push both the tag and commit to the upstream repository, by running the command ``git push --follow-tags``. Note: it may push other local tags as well, be careful. 5) Let the CI build the wheel and source distribution packages and verify both get uploaded to the Python Package Index (PyPI). 6) [Optional] Go to fonttools `Github Releases <https://github.com/fonttools/fonttools/releases>`__ page and create a new release, copy-pasting the content of the git tag message. This way, the release notes are nicely formatted as markdown, and users watching the repo will get an email notification. One day we shall automate that too. Acknowledgements ~~~~~~~~~~~~~~~~ In alphabetical order: aschmitz, Olivier Berten, Samyak Bhuta, Erik van Blokland, Petr van Blokland, Jelle Bosma, Sascha Brawer, Tom Byrer, Antonio Cavedoni, Frédéric Coiffier, Vincent Connare, David Corbett, Simon Cozens, Dave Crossland, Simon Daniels, Peter Dekkers, Behdad Esfahbod, Behnam Esfahbod, Hannes Famira, Sam Fishman, Matt Fontaine, Takaaki Fuji, Rob Hagemans, Yannis Haralambous, Greg Hitchcock, Jeremie Hornus, Khaled Hosny, John Hudson, Denis Moyogo Jacquerye, Jack Jansen, Tom Kacvinsky, Jens Kutilek, Antoine Leca, Werner Lemberg, Tal Leming, Peter Lofting, Cosimo Lupo, Olli Meier, Masaya Nakamura, Dave Opstad, Laurence Penney, Roozbeh Pournader, Garret Rieger, Read Roberts, Colin Rofls, Guido van Rossum, Just van Rossum, Andreas Seidel, Georg Seifert, Chris Simpkins, Miguel Sousa, Adam Twardoch, Adrien Tétar, Vitaly Volkov, Paul Wise. Copyrights ~~~~~~~~~~ | Copyright (c) 1999-2004 Just van Rossum, LettError (just@letterror.com) | See `LICENSE <LICENSE>`__ for the full license. Copyright (c) 2000 BeOpen.com. All Rights Reserved. Copyright (c) 1995-2001 Corporation for National Research Initiatives. All Rights Reserved. Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam. All Rights Reserved. Have fun! .. |CI Build Status| image:: https://github.com/fonttools/fonttools/workflows/Test/badge.svg :target: https://github.com/fonttools/fonttools/actions?query=workflow%3ATest .. |Coverage Status| image:: https://codecov.io/gh/fonttools/fonttools/branch/main/graph/badge.svg :target: https://codecov.io/gh/fonttools/fonttools .. |PyPI| image:: https://img.shields.io/pypi/v/fonttools.svg :target: https://pypi.org/project/FontTools .. |Gitter Chat| image:: https://badges.gitter.im/fonttools-dev/Lobby.svg :alt: Join the chat at https://gitter.im/fonttools-dev/Lobby :target: https://gitter.im/fonttools-dev/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge Changelog ~~~~~~~~~ 4.51.0 (released 2024-04-05) ---------------------------- - [ttLib] Optimization on loading aux fields (#3464). - [ttFont] Add reorderGlyphs (#3468). 4.50.0 (released 2024-03-15) ---------------------------- - [pens] Added decomposing filter pens that draw components as regular contours (#3460). - [instancer] Drop explicit no-op axes from TupleVariations (#3457). - [cu2qu/ufo] Return set of modified glyph names from fonts_to_quadratic (#3456). 4.49.0 (released 2024-02-15) ---------------------------- - [otlLib] Add API for building ``MATH`` table (#3446) 4.48.1 (released 2024-02-06) ---------------------------- - Fixed uploading wheels to PyPI, no code changes since v4.48.0. 4.48.0 (released 2024-02-06) ---------------------------- - [varLib] Do not log when there are no OTL tables to be merged. - [setup.py] Do not restrict lxml<5 any more, tests pass just fine with lxml>=5. - [feaLib] Remove glyph and class names length restrictions in FEA (#3424). - [roundingPens] Added ``transformRoundFunc`` parameter to the rounding pens to allow for custom rounding of the components' transforms (#3426). - [feaLib] Keep declaration order of ligature components within a ligature set, instead of sorting by glyph name (#3429). - [feaLib] Fixed ordering of alternates in ``aalt`` lookups, following the declaration order of feature references within the ``aalt`` feature block (#3430). - [varLib.instancer] Fixed a bug in the instancer's IUP optimization (#3432). - [sbix] Support sbix glyphs with new graphicType "flip" (#3433). - [svgPathPen] Added ``--glyphs`` option to dump the SVG paths for the named glyphs in the font (0572f78). - [designspaceLib] Added "description" attribute to ``<mappings>`` and ``<mapping>`` elements, and allow multiple ``<mappings>`` elements to group ``<mapping>`` elements that are logically related (#3435, #3437). - [otlLib] Correctly choose the most compact GSUB contextual lookup format (#3439). 4.47.2 (released 2024-01-11) ---------------------------- Minor release to fix uploading wheels to PyPI. 4.47.1 (released 2024-01-11) ---------------------------- - [merge] Improve help message and add standard command line options (#3408) - [otlLib] Pass ``ttFont`` to ``name.addName`` in ``buildStatTable`` (#3406) - [featureVars] Re-use ``FeatureVariationRecord``'s when possible (#3413) 4.47.0 (released 2023-12-18) ---------------------------- - [varLib.models] New API for VariationModel: ``getMasterScalars`` and ``interpolateFromValuesAndScalars``. - [varLib.interpolatable] Various bugfixes and rendering improvements. In particular, add a Summary page in the front, and an Index and Table-of-Contents in the back. Change the page size to Letter. - [Docs/designspaceLib] Defined a new ``public.fontInfo`` lib key, not used anywhere yet (#3358). 4.46.0 (released 2023-12-02) ---------------------------- - [featureVars] Allow to register the same set of substitution rules to multiple features. The ``addFeatureVariations`` function can now take a list of featureTags; similarly, the lib key 'com.github.fonttools.varLib.featureVarsFeatureTag' can now take a comma-separateed string of feature tags (e.g. "salt,ss01") instead of a single tag (#3360). - [featureVars] Don't overwrite GSUB FeatureVariations, but append new records to it for features which are not already there. But raise ``VarLibError`` if the feature tag already has feature variations associated with it (#3363). - [varLib] Added ``addGSUBFeatureVariations`` function to add GSUB Feature Variations to an existing variable font from rules defined in a DesignSpace document (#3362). - [varLib.interpolatable] Various bugfixes and rendering improvements. In particular, a new test for "underweight" glyphs. The new test reports quite a few false-positives though. Please send feedback. 4.45.1 (released 2023-11-23) ---------------------------- - [varLib.interpolatable] Various bugfixes and improvements, better reporting, reduced false positives. - [ttGlyphSet] Added option to not recalculate glyf bounds (#3348). 4.45.0 (released 2023-11-20) ---------------------------- - [varLib.interpolatable] Vastly improved algorithms. Also available now is ``--pdf`` and ``--html`` options to generate a PDF or HTML report of the interpolation issues. The PDF/HTML report showcases the problematic masters, the interpolated broken glyph, as well as the proposed fixed version. 4.44.3 (released 2023-11-15) ---------------------------- - [subset] Only prune codepage ranges for OS/2.version >= 1, ignore otherwise (#3334). - [instancer] Ensure hhea vertical metrics stay in sync with OS/2 ones after instancing MVAR table containing 'hasc', 'hdsc' or 'hlgp' tags (#3297). 4.44.2 (released 2023-11-14) ---------------------------- - [glyf] Have ``Glyph.recalcBounds`` skip empty components (base glyph with no contours) when computing the bounding box of composite glyphs. This simply restores the existing behavior before some changes were introduced in fonttools 4.44.0 (#3333). 4.44.1 (released 2023-11-14) ---------------------------- - [feaLib] Ensure variable mark anchors are deep-copied while building since they get modified in-place and later reused (#3330). - [OS/2|subset] Added method to ``recalcCodePageRanges`` to OS/2 table class; added ``--prune-codepage-ranges`` to `fonttools subset` command (#3328, #2607). 4.44.0 (released 2023-11-03) ---------------------------- - [instancer] Recalc OS/2 AvgCharWidth after instancing if default changes (#3317). - [otlLib] Make ClassDefBuilder class order match varLib.merger's, i.e. large classes first, then glyph lexicographic order (#3321, #3324). - [instancer] Allow not specifying any of min:default:max values and let be filled up with fvar's values (#3322, #3323). - [instancer] When running --update-name-table ignore axes that have no STAT axis values (#3318, #3319). - [Debg] When dumping to ttx, write the embedded JSON as multi-line string with indentation (92cbfee0d). - [varStore] Handle > 65535 items per encoding by splitting VarData subtable (#3310). - [subset] Handle null-offsets in MarkLigPos subtables. - [subset] Keep East Asian spacing fatures vhal, halt, chws, vchw by default (#3305). - [instancer.solver] Fixed case where axisDef < lower and upper < axisMax (#3304). - [glyf] Speed up compilation, mostly around ``recalcBounds`` (#3301). - [varLib.interpolatable] Speed it up when working on variable fonts, plus various micro-optimizations (#3300). - Require unicodedata2 >= 15.1.0 when installed with 'unicode' extra, contains UCD 15.1. 4.43.1 (released 2023-10-06) ---------------------------- - [EBDT] Fixed TypeError exception in `_reverseBytes` method triggered when dumping some bitmap fonts with `ttx -z bitwise` option (#3162). - [v/hhea] Fixed UnboundLocalError exception in ``recalc`` method when no vmtx or hmtx tables are present (#3290). - [bezierTools] Fixed incorrectly typed cython local variable leading to TypeError when calling ``calcQuadraticArcLength`` (#3288). - [feaLib/otlLib] Better error message when building Coverage table with missing glyph (#3286). 4.43.0 (released 2023-09-29) ---------------------------- - [subset] Set up lxml ``XMLParser(resolve_entities=False)`` when parsing OT-SVG documents to prevent XML External Entity (XXE) attacks (9f61271dc): https://codeql.github.com/codeql-query-help/python/py-xxe/ - [varLib.iup] Added workaround for a Cython bug in ``iup_delta_optimize`` that was leading to IUP tolerance being incorrectly initialised, resulting in sub-optimal deltas (60126435d, cython/cython#5732). - [varLib] Added new command-line entry point ``fonttools varLib.avar`` to add an ``avar`` table to an existing VF from axes mappings in a .designspace file (0a3360e52). - [instancer] Fixed bug whereby no longer used variation regions were not correctly pruned after VarData optimization (#3268). - Added support for Python 3.12 (#3283). 4.42.1 (released 2023-08-20) ---------------------------- - [t1Lib] Fixed several Type 1 issues (#3238, #3240). - [otBase/packer] Allow sharing tables reached by different offset sizes (#3241, #3236). - [varLib/merger] Fix Cursive attachment merging error when all anchors are NULL (#3248, #3247). - [ttLib] Fixed warning when calling ``addMultilingualName`` and ``ttFont`` parameter was not passed on to ``findMultilingualName`` (#3253). 4.42.0 (released 2023-08-02) ---------------------------- - [varLib] Use sentinel value 0xFFFF to mark a glyph advance in hmtx/vmtx as non participating, allowing sparse masters to contain glyphs for variation purposes other than {H,V}VAR (#3235). - [varLib/cff] Treat empty glyphs in non-default masters as missing, thus not participating in CFF2 delta computation, similarly to how varLib already treats them for gvar (#3234). - Added varLib.avarPlanner script to deduce 'correct' avar v1 axis mappings based on glyph average weights (#3223). 4.41.1 (released 2023-07-21) ---------------------------- - [subset] Fixed perf regression in v4.41.0 by making ``NameRecordVisitor`` only visit tables that do contain nameID references (#3213, #3214). - [varLib.instancer] Support instancing fonts containing null ConditionSet offsets in FeatureVariationRecords (#3211, #3212). - [statisticsPen] Report font glyph-average weight/width and font-wide slant. - [fontBuilder] Fixed head.created date incorrectly set to 0 instead of the current timestamp, regression introduced in v4.40.0 (#3210). - [varLib.merger] Support sparse ``CursivePos`` masters (#3209). 4.41.0 (released 2023-07-12) ---------------------------- - [fontBuilder] Fixed bug in setupOS2 with default panose attribute incorrectly being set to a dict instead of a Panose object (#3201). - [name] Added method to ``removeUnusedNameRecords`` in the user range (#3185). - [varLib.instancer] Fixed issue with L4 instancing (moving default) (#3179). - [cffLib] Use latin1 so we can roundtrip non-ASCII in {Full,Font,Family}Name (#3202). - [designspaceLib] Mark <source name="..."> as optional in docs (as it is in the code). - [glyf-1] Fixed drawPoints() bug whereby last cubic segment becomes quadratic (#3189, #3190). - [fontBuilder] Propagate the 'hidden' flag to the fvar Axis instance (#3184). - [fontBuilder] Update setupAvar() to also support avar 2, fixing ``_add_avar()`` call site (#3183). - Added new ``voltLib.voltToFea`` submodule (originally Tiro Typeworks' "Volto") for converting VOLT OpenType Layout sources to FEA format (#3164). 4.40.0 (released 2023-06-12) ---------------------------- - Published native binary wheels to PyPI for all the python minor versions and platform and architectures currently supported that would benefit from this. They will include precompiled Cython-accelerated modules (e.g. cu2qu) without requiring to compile them from source. The pure-python wheel and source distribution will continue to be published as always (pip will automatically chose them when no binary wheel is available for the given platform, e.g. pypy). Use ``pip install --no-binary=fonttools fonttools`` to expliclity request pip to install from the pure-python source. - [designspaceLib|varLib] Add initial support for specifying axis mappings and build ``avar2`` table from those (#3123). - [feaLib] Support variable ligature caret position (#3130). - [varLib|glyf] Added option to --drop-implied-oncurves; test for impliable oncurve points either before or after rounding (#3146, #3147, #3155, #3156). - [TTGlyphPointPen] Don't error with empty contours, simply ignore them (#3145). - [sfnt] Fixed str vs bytes remnant of py3 transition in code dealing with de/compiling WOFF metadata (#3129). - [instancer-solver] Fixed bug when moving default instance with sparse masters (#3139, #3140). - [feaLib] Simplify variable scalars that don’t vary (#3132). - [pens] Added filter pen that explicitly emits closing line when lastPt != movePt (#3100). - [varStore] Improve optimize algorithm and better document the algorithm (#3124, #3127). Added ``quantization`` option (#3126). - Added CI workflow config file for building native binary wheels (#3121). - [fontBuilder] Added glyphDataFormat=0 option; raise error when glyphs contain cubic outlines but glyphDataFormat was not explicitly set to 1 (#3113, #3119). - [subset] Prune emptied GDEF.MarkGlyphSetsDef and remap indices; ensure GDEF is subsetted before GSUB and GPOS (#3114, #3118). - [xmlReader] Fixed issue whereby DSIG table data was incorrectly parsed (#3115, #2614). - [varLib/merger] Fixed merging of SinglePos with pos=0 (#3111, #3112). - [feaLib] Demote "Feature has not been defined" error to a warning when building aalt and referenced feature is empty (#3110). - [feaLib] Dedupe multiple substitutions with classes (#3105). 4.39.4 (released 2023-05-10) ---------------------------- - [varLib.interpolatable] Allow for sparse masters (#3075) - [merge] Handle differing default/nominalWidthX in CFF (#3070) - [ttLib] Add missing main.py file to ttLib package (#3088) - [ttx] Fix missing composite instructions in XML (#3092) - [ttx] Fix split tables option to work on filenames containing '%' (#3096) - [featureVars] Process lookups for features other than rvrn last (#3099) - [feaLib] support multiple substitution with classes (#3103) 4.39.3 (released 2023-03-28) ---------------------------- - [sbix] Fixed TypeError when compiling empty glyphs whose imageData is None, regression was introduced in v4.39 (#3059). - [ttFont] Fixed AttributeError on python <= 3.10 when opening a TTFont from a tempfile SpooledTemporaryFile, seekable method only added on python 3.11 (#3052). 4.39.2 (released 2023-03-16) ---------------------------- - [varLib] Fixed regression introduced in 4.39.1 whereby an incomplete 'STAT' table would be built even though a DesignSpace v5 did contain 'STAT' definitions (#3045, #3046). 4.39.1 (released 2023-03-16) ---------------------------- - [avar2] Added experimental support for reading/writing avar version 2 as specified in this draft proposal: https://github.com/harfbuzz/boring-expansion-spec/blob/main/avar2.md - [glifLib] Wrap underlying XML library exceptions with GlifLibError when parsing GLIFs, and also print the name and path of the glyph that fails to be parsed (#3042). - [feaLib] Consult avar for normalizing user-space values in ConditionSets and in VariableScalars (#3042, #3043). - [ttProgram] Handle string input to Program.fromAssembly() (#3038). - [otlLib] Added a config option to emit GPOS 7 lookups, currently disabled by default because of a macOS bug (#3034). - [COLRv1] Added method to automatically compute ClipBoxes (#3027). - [ttFont] Fixed getGlyphID to raise KeyError on missing glyphs instead of returning None. The regression was introduced in v4.27.0 (#3032). - [sbix] Fixed UnboundLocalError: cannot access local variable 'rawdata' (#3031). - [varLib] When building VF, do not overwrite a pre-existing ``STAT`` table that was built with feaLib from FEA feature file. Also, added support for building multiple VFs defined in Designspace v5 from ``fonttools varLib`` script (#3024). - [mtiLib] Only add ``Debg`` table with lookup names when ``FONTTOOLS_LOOKUP_DEBUGGING`` env variable is set (#3023). 4.39.0 (released 2023-03-06) ---------------------------- - [mtiLib] Optionally add `Debg` debug info for MTI feature builds (#3018). - [ttx] Support reading input file from standard input using special `-` character, similar to existing `-o -` option to write output to standard output (#3020). - [cython] Prevent ``cython.compiled`` raise AttributeError if cython not installed properly (#3017). - [OS/2] Guard against ZeroDivisionError when calculating xAvgCharWidth in the unlikely scenario no glyph has non-zero advance (#3015). - [subset] Recompute xAvgCharWidth independently of --no-prune-unicode-ranges, previously the two options were involuntarily bundled together (#3012). - [fontBuilder] Add ``debug`` parameter to addOpenTypeFeatures method to add source debugging information to the font in the ``Debg`` private table (#3008). - [name] Make NameRecord `__lt__` comparison not fail on Unicode encoding errors (#3006). - [featureVars] Fixed bug in ``overlayBox`` (#3003, #3005). - [glyf] Added experimental support for cubic bezier curves in TrueType glyf table, as outlined in glyf v1 proposal (#2988): https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1-cubicOutlines.md - Added new qu2cu module and related qu2cuPen, the reverse of cu2qu for converting TrueType quadratic splines to cubic bezier curves (#2993). - [glyf] Added experimental support for reading and writing Variable Composites/Components as defined in glyf v1 spec proposal (#2958): https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1-varComposites.md. - [pens]: Added `addVarComponent` method to pen protocols' base classes, which pens can implement to handle varcomponents (by default they get decomposed) (#2958). - [misc.transform] Added DecomposedTransform class which implements an affine transformation with separate translate, rotation, scale, skew, and transformation-center components (#2598) - [sbix] Ensure Glyph.referenceGlyphName is set; fixes error after dumping and re-compiling sbix table with 'dupe' glyphs (#2984). - [feaLib] Be cleverer when merging chained single substitutions into same lookup when they are specified using the inline notation (#2150, #2974). - [instancer] Clamp user-inputted axis ranges to those of fvar (#2959). - [otBase/subset] Define ``__getstate__`` for BaseTable so that a copied/pickled 'lazy' object gets its own OTTableReader to read from; incidentally fixes a bug while subsetting COLRv1 table containing ClipBoxes on python 3.11 (#2965, #2968). - [sbix] Handle glyphs with "dupe" graphic type on compile correctly (#2963). - [glyf] ``endPointsOfContours`` field should be unsigned! Kudos to behdad for spotting one of the oldest bugs in FT. Probably nobody has ever dared to make glyphs with more than 32767 points... (#2957). - [feaLib] Fixed handling of ``ignore`` statements with unmarked glyphs to match makeotf behavior, which assumes the first glyph is marked (#2950). - Reformatted code with ``black`` and enforce new code style via CI check (#2925). - [feaLib] Sort name table entries following OT spec prescribed order in the builder (#2927). - [cu2quPen] Add Cu2QuMultiPen that converts multiple outlines at a time in interpolation compatible way; its methods take a list of tuples arguments that would normally be passed to individual segment pens, and at the end it dispatches the converted outlines to each pen (#2912). - [reverseContourPen/ttGlyphPen] Add outputImpliedClosingLine option (#2913, #2914, #2921, #2922, #2995). - [gvar] Avoid expanding all glyphs unnecessarily upon compile (#2918). - [scaleUpem] Fixed bug whereby CFF2 vsindex was scaled; it should not (#2893, #2894). - [designspaceLib] Add DS.getAxisByTag and refactor getAxis (#2891). - [unicodedata] map Zmth<->math in ot_tag_{to,from}_script (#1737, #2889). - [woff2] Support encoding/decoding OVERLAP_SIMPLE glyf flags (#2576, #2884). - [instancer] Update OS/2 class and post.italicAngle when default moved (L4) - Dropped support for Python 3.7 which reached EOL, fontTools requires 3.8+. - [instancer] Fixed instantiateFeatureVariations logic when a rule range becomes default-applicable (#2737, #2880). - [ttLib] Add main to ttFont and ttCollection that just decompile and re-compile the input font (#2869). - [featureVars] Insert 'rvrn' lookup at the beginning of LookupList, to work around bug in Apple implementation of 'rvrn' feature which the spec says it should be processed early whereas on macOS 10.15 it follows lookup order (#2140, #2867). - [instancer/mutator] Remove 'DSIG' table if present. - [svgPathPen] Don't close path in endPath(), assume open unless closePath() (#2089, #2865). 4.38.0 (released 2022-10-21) ---------------------------- - [varLib.instancer] Added support for L4 instancing, i.e. moving the default value of an axis while keeping it variable. Thanks Behdad! (#2728, #2861). It's now also possible to restrict an axis min/max values beyond the current default value, e.g. a font wght has min=100, def=400, max=900 and you want a partial VF that only varies between 500 and 700, you can now do that. You can either specify two min/max values (wght=500:700), and the new default will be set to either the minimum or maximum, depending on which one is closer to the current default (e.g. 500 in this case). Or you can specify three values (e.g. wght=500:600:700) to specify the new default value explicitly. - [otlLib/featureVars] Set a few Count values so one doesn't need to compile the font to update them (#2860). - [varLib.models] Make extrapolation work for 2-master models as well where one master is at the default location (#2843, #2846). Add optional extrapolate=False to normalizeLocation() (#2847, #2849). - [varLib.cff] Fixed sub-optimal packing of CFF2 deltas by no longer rounding them to integer (#2838). - [scaleUpem] Calculate numShorts in VarData after scale; handle CFF hintmasks (#2840). 4.37.4 (released 2022-09-30) ---------------------------- - [subset] Keep nameIDs used by CPAL palette entry labels (#2837). - [varLib] Avoid negative hmtx values when creating font from variable CFF2 font (#2827). - [instancer] Don't prune stat.ElidedFallbackNameID (#2828). - [unicodedata] Update Scripts/Blocks to Unicode 15.0 (#2833). 4.37.3 (released 2022-09-20) ---------------------------- - Fix arguments in calls to (glyf) glyph.draw() and drawPoints(), whereby offset wasn't correctly passed down; this fix also exposed a second bug, where lsb and tsb were not set (#2824, #2825, adobe-type-tools/afdko#1560). 4.37.2 (released 2022-09-15) ---------------------------- - [subset] Keep CPAL table and don't attempt to prune unused color indices if OT-SVG table is present even if COLR table was subsetted away; OT-SVG may be referencing the CPAL table; for now we assume that's the case (#2814, #2815). - [varLib.instancer] Downgrade GPOS/GSUB version if there are no more FeatureVariations after instancing (#2812). - [subset] Added ``--no-lazy`` to optionally load fonts eagerly (mostly to ease debugging of table lazy loading, no practical effects) (#2807). - [varLib] Avoid building empty COLR.DeltaSetIndexMap with only identity mappings (#2803). - [feaLib] Allow multiple value record types (by promoting to the most general format) within the same PairPos subtable; e.g. this allows variable and non variable kerning rules to share the same subtable. This also fixes a bug whereby some kerning pairs would become unreachable while shapiong because of premature subtable splitting (#2772, #2776). - [feaLib] Speed up ``VarScalar`` by caching models for recurring master locations (#2798). - [feaLib] Optionally cythonize ``feaLib.lexer``, speeds up parsing FEA a bit (#2799). - [designspaceLib] Avoid crash when handling unbounded rule conditions (#2797). - [post] Don't crash if ``post`` legacy format 1 is malformed/improperly used (#2786) - [gvar] Don't be "lazy" (load all glyph variations up front) when TTFont.lazy=False (#2771). - [TTFont] Added ``normalizeLocation`` method to normalize a location dict from the font's defined axes space (also known as "user space") into the normalized (-1..+1) space. It applies ``avar`` mapping if the font contains an ``avar`` table (#2789). - [TTVarGlyphSet] Support drawing glyph instances from CFF2 variable glyph set (#2784). - [fontBuilder] Do not error when building cmap if there are zero code points (#2785). - [varLib.plot] Added ability to plot a variation model and set of accompaning master values corresponding to the model's master locations into a pyplot figure (#2767). - [Snippets] Added ``statShape.py`` script to draw statistical shape of a glyph as an ellips (requires pycairo) (baecd88). - [TTVarGlyphSet] implement drawPoints natively, avoiding going through SegmentToPointPen (#2778). - [TTVarGlyphSet] Fixed bug whereby drawing a composite glyph multiple times, its components would shif; needed an extra copy (#2774). 4.37.1 (released 2022-08-24) ---------------------------- - [subset] Fixed regression introduced with v4.37.0 while subsetting the VarStore of ``HVAR`` and ``VVAR`` tables, whereby an ``AttributeError: subset_varidxes`` was thrown because an apparently unused import statement (with the side-effect of dynamically binding that ``subset_varidxes`` method to the VarStore class) had been accidentally deleted in an unrelated PR (#2679, #2773). - [pens] Added ``cairoPen`` (#2678). - [gvar] Read ``gvar`` more lazily by not parsing all of the ``glyf`` table (#2771). - [ttGlyphSet] Make ``drawPoints(pointPen)`` method work for CFF fonts as well via adapter pen (#2770). 4.37.0 (released 2022-08-23) ---------------------------- - [varLib.models] Reverted PR #2717 which added support for "narrow tents" in v4.36.0, as it introduced a regression (#2764, #2765). It will be restored in upcoming release once we found a solution to the bug. - [cff.specializer] Fixed issue in charstring generalizer with the ``blend`` operator (#2750, #1975). - [varLib.models] Added support for extrapolation (#2757). - [ttGlyphSet] Ensure the newly added ``_TTVarGlyphSet`` inherits from ``_TTGlyphSet`` to keep backward compatibility with existing API (#2762). - [kern] Allow compiling legacy kern tables with more than 64k entries (d21cfdede). - [visitor] Added new visitor API to traverse tree of objects and dispatch based on the attribute type: cf. ``fontTools.misc.visitor`` and ``fontTools.ttLib.ttVisitor``. Added ``fontTools.ttLib.scaleUpem`` module that uses the latter to change a font's units-per-em and scale all the related fields accordingly (#2718, #2755). 4.36.0 (released 2022-08-17) ---------------------------- - [varLib.models] Use a simpler model that generates narrower "tents" (regions, master supports) whenever possible: specifically when any two axes that actively "cooperate" (have masters at non-zero positions for both axes) have a complete set of intermediates. The simpler algorithm produces fewer overlapping regions and behaves better with respect to rounding at the peak positions than the generic solver, always matching intermediate masters exactly, instead of maximally 0.5 units off. This may be useful when 100% metrics compatibility is desired (#2218, #2717). - [feaLib] Remove warning when about ``GDEF`` not being built when explicitly not requested; don't build one unconditonally even when not requested (#2744, also works around #2747). - [ttFont] ``TTFont.getGlyphSet`` method now supports selecting a location that represents an instance of a variable font (supports both user-scale and normalized axes coordinates via the ``normalized=False`` parameter). Currently this only works for TrueType-flavored variable fonts (#2738). 4.35.0 (released 2022-08-15) ---------------------------- - [otData/otConverters] Added support for 'biased' PaintSweepGradient start/end angles to match latest COLRv1 spec (#2743). - [varLib.instancer] Fixed bug in ``_instantiateFeatureVariations`` when at the same time pinning one axis and restricting the range of a subsequent axis; the wrong axis tag was being used in the latter step (as the records' axisIdx was updated in the preceding step but looked up using the old axes order in the following step) (#2733, #2734). - [mtiLib] Pad script tags with space when less than 4 char long (#1727). - [merge] Use ``'.'`` instead of ``'#'`` in duplicate glyph names (#2742). - [gvar] Added support for lazily loading glyph variations (#2741). - [varLib] In ``build_many``, we forgot to pass on ``colr_layer_reuse`` parameter to the ``build`` method (#2730). - [svgPathPen] Add a main that prints SVG for input text (6df779fd). - [cffLib.width] Fixed off-by-one in optimized values; previous code didn't match the code block above it (2963fa50). - [varLib.interpolatable] Support reading .designspace and .glyphs files (via optional ``glyphsLib``). - Compile some modules with Cython when available and building/installing fonttools from source: ``varLib.iup`` (35% faster), ``pens.momentsPen`` (makes ``varLib.interpolatable`` 3x faster). - [feaLib] Allow features to be built for VF without also building a GDEF table (e.g. only build GSUB); warn when GDEF would be needed but isn't requested (#2705, 2694). - [otBase] Fixed ``AttributeError`` when uharfbuzz < 0.23.0 and 'repack' method is missing (32aa8eaf). Use new ``uharfbuzz.repack_with_tag`` when available (since uharfbuzz>=0.30.0), enables table-specific optimizations to be performed during repacking (#2724). - [statisticsPen] By default report all glyphs (4139d891). Avoid division-by-zero (52b28f90). - [feaLib] Added missing required argument to FeatureLibError exception (#2693) - [varLib.merge] Fixed error during error reporting (#2689). Fixed undefined ``NotANone`` variable (#2714). 4.34.4 (released 2022-07-07) ---------------------------- - Fixed typo in varLib/merger.py that causes NameError merging COLR glyphs containing more than 255 layers (#2685). 4.34.3 (released 2022-07-07) ---------------------------- - [designspaceLib] Don't make up bad PS names when no STAT data (#2684) 4.34.2 (released 2022-07-06) ---------------------------- - [varStore/subset] fixed KeyError exception to do with NO_VARIATION_INDEX while subsetting varidxes in GPOS/GDEF (a08140d). 4.34.1 (released 2022-07-06) ---------------------------- - [instancer] When optimizing HVAR/VVAR VarStore, use_NO_VARIATION_INDEX=False to avoid including NO_VARIATION_INDEX in AdvWidthMap, RsbMap, LsbMap mappings, which would push the VarIdx width to maximum (4bytes), which is not desirable. This also fixes a hard crash when attempting to subset a varfont after it had been partially instanced with use_NO_VARIATION_INDEX=True. 4.34.0 (released 2022-07-06) ---------------------------- - [instancer] Set RIBBI bits in head and OS/2 table when cutting instances and the subfamily nameID=2 contains strings like 'Italic' or 'Bold' (#2673). - [otTraverse] Addded module containing methods for traversing trees of otData tables (#2660). - [otTables] Made DeltaSetIndexMap TTX dump less verbose by omitting no-op entries (#2660). - [colorLib.builder] Added option to disable PaintColrLayers's reuse of layers from LayerList (#2660). - [varLib] Added support for merging multiple master COLRv1 tables into a variable COLR table (#2660, #2328). Base color glyphs of same name in different masters must have identical paint graph structure (incl. number of layers, palette indices, number of color line stops, corresponding paint formats at each level of the graph), but can differ in the variable fields (e.g. PaintSolid.Alpha). PaintVar* tables are produced when this happens and a VarStore/DeltaSetIndexMap is added to the variable COLR table. It is possible for non-default masters to be 'sparse', i.e. omit some of the color glyphs present in the default master. - [feaLib] Let the Parser set nameIDs 1 through 6 that were previously reserved (#2675). - [varLib.varStore] Support NO_VARIATION_INDEX in optimizer and instancer. - [feaLib] Show all missing glyphs at once at end of parsing (#2665). - [varLib.iup] Rewrite force-set conditions and limit DP loopback length (#2651). For Noto Sans, IUP time drops from 23s down to 9s, with only a slight size increase in the final font. This basically turns the algorithm from O(n^3) into O(n). - [featureVars] Report about missing glyphs in substitution rules (#2654). - [mutator/instancer] Added CLI flag to --no-recalc-timestamp (#2649). - [SVG] Allow individual SVG documents in SVG OT table to be compressed on uncompressed, and remember that when roundtripping to/from ttx. The SVG.docList is now a list of SVGDocument namedtuple-like dataclass containing an extra ``compressed`` field, and no longer a bare 3-tuple (#2645). - [designspaceLib] Check for descriptor types with hasattr() to allow custom classes that don't inherit the default descriptors (#2634). - [subset] Enable sharing across subtables of extension lookups for harfbuzz packing (#2626). Updated how table packing falls back to fontTools from harfbuzz (#2668). - [subset] Updated default feature tags following current Harfbuzz (#2637). - [svgLib] Fixed regex for real number to support e.g. 1e-4 in addition to 1.0e-4. Support parsing negative rx, ry on arc commands (#2596, #2611). - [subset] Fixed subsetting SinglePosFormat2 when ValueFormat=0 (#2603). 4.33.3 (released 2022-04-26) ---------------------------- - [designspaceLib] Fixed typo in ``deepcopyExceptFonts`` method, preventing font references to be transferred (#2600). Fixed another typo in the name of ``Range`` dataclass's ``__post_init__`` magic method (#2597). 4.33.2 (released 2022-04-22) ---------------------------- - [otBase] Make logging less verbose when harfbuzz fails to serialize. Do not exit at the first failure but continue attempting to fix offset overflow error using the pure-python serializer even when the ``USE_HARFBUZZ_REPACKER`` option was explicitly set to ``True``. This is normal with fonts with relatively large tables, at least until hb.repack implements proper table splitting. 4.33.1 (released 2022-04-22) ---------------------------- - [otlLib] Put back the ``FONTTOOLS_GPOS_COMPACT_MODE`` environment variable to fix regression in ufo2ft (and thus fontmake) introduced with v4.33.0 (#2592, #2593). This is deprecated and will be removed one ufo2ft gets updated to use the new config setup. 4.33.0 (released 2022-04-21) ---------------------------- - [OS/2 / merge] Automatically recalculate ``OS/2.xAvgCharWidth`` after merging fonts with ``fontTools.merge`` (#2591, #2538). - [misc/config] Added ``fontTools.misc.configTools`` module, a generic configuration system (#2416, #2439). Added ``fontTools.config`` module, a fontTools-specific configuration system using ``configTools`` above. Attached a ``Config`` object to ``TTFont``. - [otlLib] Replaced environment variable for GPOS compression level with an equivalent option using the new config system. - [designspaceLib] Incremented format version to 5.0 (#2436). Added discrete axes, variable fonts, STAT information, either design- or user-space location on instances. Added ``fontTools.designspaceLib.split`` module to split a designspace into sub-spaces that interpolate and that represent the variable fonts listed in the document. Made instance names optional and allow computing them from STAT data instead. Added ``fontTools.designspaceLib.statNames`` module. Allow instances to have the same location as a previously defined STAT label. Deprecated some attributes: ``SourceDescriptor``: ``copyLib``, ``copyInfo``, ``copyGroups``, ``copyFeatures``. ``InstanceDescriptor``: ``kerning``, ``info``; ``glyphs``: use rules or sparse sources. For both, ``location``: use the more explicit designLocation. Note: all are soft deprecations and existing code should keep working. Updated documentation for Python methods and the XML format. - [varLib] Added ``build_many`` to build several variable fonts from a single designspace document (#2436). Added ``fontTools.varLib.stat`` module to build STAT tables from a designspace document. - [otBase] Try to use the Harfbuzz Repacker for packing GSUB/GPOS tables when ``uharfbuzz`` python bindings are available (#2552). Disable it by setting the "fontTools.ttLib.tables.otBase:USE_HARFBUZZ_REPACKER" config option to ``False``. If the option is set explicitly to ``True`` but ``uharfbuzz`` can't be imported or fails to serialize for any reasons, an error will be raised (ImportError or uharfbuzz errors). - [CFF/T2] Ensure that ``pen.closePath()`` gets called for CFF2 charstrings (#2577). Handle implicit CFF2 closePath within ``T2OutlineExtractor`` (#2580). 4.32.0 (released 2022-04-08) ---------------------------- - [otlLib] Disable GPOS7 optimization to work around bug in Apple CoreText. Always force Chaining GPOS8 for now (#2540). - [glifLib] Added ``outputImpliedClosingLine=False`` parameter to ``Glyph.draw()``, to control behaviour of ``PointToSegmentPen`` (6b4e2e7). - [varLib.interpolatable] Check for wrong contour starting point (#2571). - [cffLib] Remove leftover ``GlobalState`` class and fix calls to ``TopDictIndex()`` (#2569, #2570). - [instancer] Clear ``AxisValueArray`` if it is empty after instantiating (#2563). 4.31.2 (released 2022-03-22) ---------------------------- - [varLib] fix instantiation of GPOS SinglePos values (#2555). 4.31.1 (released 2022-03-18) ---------------------------- - [subset] fix subsetting OT-SVG when glyph id attribute is on the root ``<svg>`` element (#2553). 4.31.0 (released 2022-03-18) ---------------------------- - [ttCollection] Fixed 'ResourceWarning: unclosed file' warning (#2549). - [varLib.merger] Handle merging SinglePos with valueformat=0 (#2550). - [ttFont] Update glyf's glyphOrder when calling TTFont.setGlyphOrder() (#2544). - [ttFont] Added ``ensureDecompiled`` method to load all tables irrespective of the ``lazy`` attribute (#2551). - [otBase] Added ``iterSubTable`` method to iterate over BaseTable's children of type BaseTable; useful for traversing a tree of otTables (#2551). 4.30.0 (released 2022-03-10) ---------------------------- - [varLib] Added debug logger showing the glyph name for which ``gvar`` is built (#2542). - [varLib.errors] Fixed undefined names in ``FoundANone`` and ``UnsupportedFormat`` exceptions (ac4d5611). - [otlLib.builder] Added ``windowsNames`` and ``macNames`` (bool) parameters to the ``buildStatTabe`` function, so that one can select whether to only add one or both of the two sets (#2528). - [t1Lib] Added the ability to recreate PostScript stream (#2504). - [name] Added ``getFirstDebugName``, ``getBest{Family,SubFamily,Full}Name`` methods (#2526). 4.29.1 (released 2022-02-01) ---------------------------- - [colorLib] Fixed rounding issue with radial gradient's start/end circles inside one another (#2521). - [freetypePen] Handle rotate/skew transform when auto-computing width/height of the buffer; raise PenError wen missing moveTo (#2517) 4.29.0 (released 2022-01-24) ---------------------------- - [ufoLib] Fixed illegal characters and expanded reserved filenames (#2506). - [COLRv1] Don't emit useless PaintColrLayers of lenght=1 in LayerListBuilder (#2513). - [ttx] Removed legacy ``waitForKeyPress`` method on Windows (#2509). - [pens] Added FreeTypePen that uses ``freetype-py`` and the pen protocol for rasterizating outline paths (#2494). - [unicodedata] Updated the script direction list to Unicode 14.0 (#2484). Bumped unicodedata2 dependency to 14.0 (#2499). - [psLib] Fixed type of ``fontName`` in ``suckfont`` (#2496). 4.28.5 (released 2021-12-19) ---------------------------- - [svgPathPen] Continuation of #2471: make sure all occurrences of ``str()`` are now replaced with user-defined ``ntos`` callable. - [merge] Refactored code into submodules, plus several bugfixes and improvements: fixed duplicate-glyph-resolution GSUB-lookup generation code; use tolerance in glyph comparison for empty glyph's width; ignore space of default ignorable glyphs; downgrade duplicates-resolution missing-GSUB from assert to warn; added --drop-tables option (#2473, #2475, #2476). 4.28.4 (released 2021-12-15) ---------------------------- - [merge] Merge GDEF marksets in Lookups properly (#2474). - [feaLib] Have ``fontTools feaLib`` script exit with error code when build fails (#2459) - [svgPathPen] Added ``ntos`` option to customize number formatting (e.g. rounding) (#2471). - [subset] Speed up subsetting of large CFF fonts (#2467). - [otTables] Speculatively promote lookups to extension to speed up compilation. If the offset to lookup N is too big to fit in a ushort, the offset to lookup N+1 is going to be too big as well, so we promote to extension all lookups from lookup N onwards (#2465). 4.28.3 (released 2021-12-03) ---------------------------- - [subset] Fixed bug while subsetting ``COLR`` table, whereby incomplete layer records pointing to missing glyphs were being retained leading to ``struct.error`` upon compiling. Make it so that ``glyf`` glyph closure, which follows the ``COLR`` glyph closure, does not influence the ``COLR`` table subsetting (#2461, #2462). - [docs] Fully document the ``cmap`` and ``glyf`` tables (#2454, #2457). - [colorLib.unbuilder] Fixed CLI by deleting no longer existing parameter (180bb1867). 4.28.2 (released 2021-11-22) ---------------------------- - [otlLib] Remove duplicates when building coverage (#2433). - [docs] Add interrogate configuration (#2443). - [docs] Remove comment about missing “start” optional argument to ``calcChecksum`` (#2448). - [cu2qu/cli] Adapt to the latest ufoLib2. - [subset] Support subsetting SVG table and remove it from the list of drop by default tables (#534). - [subset] add ``--pretty-svg`` option to pretty print SVG table contents (#2452). - [merge] Support merging ``CFF`` tables (CID-keyed ``CFF`` is still not supported) (#2447). - [merge] Support ``--output-file`` (#2447). - [docs] Split table docs into individual pages (#2444). - [feaLib] Forbid empty classes (#2446). - [docs] Improve documentation for ``fontTools.ttLib.ttFont`` (#2442). 4.28.1 (released 2021-11-08) ---------------------------- - [subset] Fixed AttributeError while traversing a color glyph's Paint graph when there is no LayerList, which is optional (#2441). 4.28.0 (released 2021-11-05) ---------------------------- - Dropped support for EOL Python 3.6, require Python 3.7 (#2417). - [ufoLib/glifLib] Make filename-clash checks faster by using a set instead of a list (#2422). - [subset] Don't crash if optional ClipList and LayerList are ``None`` (empty) (#2424, 2439). - [OT-SVG] Removed support for old deprecated version 1 and embedded color palettes, which were never officially part of the OpenType SVG spec. Upon compile, reuse offsets to SVG documents that are identical (#2430). - [feaLib] Added support for Variable Feature File syntax. This is experimental and subject to change until it is finalized in the Adobe FEA spec (#2432). - [unicodedata] Update Scripts/ScriptExtensions/Blocks to UnicodeData 14.0 (#2437). 4.27.1 (released 2021-09-23) ---------------------------- - [otlLib] Fixed error when chained contextual lookup builder overflows (#2404, #2411). - [bezierTools] Fixed two floating-point bugs: one when computing `t` for a point lying on an almost horizontal/vertical line; another when computing the intersection point between a curve and a line (#2413). 4.27.0 (released 2021-09-14) ---------------------------- - [ttLib/otTables] Cleaned up virtual GID handling: allow virtual GIDs in ``Coverage`` and ``ClassDef`` readers; removed unused ``allowVID`` argument from ``TTFont`` constructor, and ``requireReal`` argument in ``TTFont.getGlyphID`` method. Make ``TTFont.setGlyphOrder`` clear reverse glyphOrder map, and assume ``glyphOrder`` internal attribute is never modified outside setGlyphOrder; added ``TTFont.getGlyphNameMany`` and ``getGlyphIDMany`` (#1536, #1654, #2334, #2398). - [py23] Dropped internal use of ``fontTools.py23`` module to fix deprecation warnings in client code that imports from fontTools (#2234, #2399, #2400). - [subset] Fix subsetting COLRv1 clip boxes when font is loaded lazily (#2408). 4.26.2 (released 2021-08-09) ---------------------------- - [otTables] Added missing ``CompositeMode.PLUS`` operator (#2390). 4.26.1 (released 2021-08-03) ---------------------------- - [transform] Added ``transformVector`` and ``transformVectors`` methods to the ``Transform`` class. Similar to ``transformPoint`` but ignore the translation part (#2386). 4.26.0 (released 2021-08-03) ---------------------------- - [xmlWriter] Default to ``"\n"`` for ``newlinestr`` instead of platform-specific ``os.linesep`` (#2384). - [otData] Define COLRv1 ClipList and ClipBox (#2379). - [removeOverlaps/instancer] Added --ignore-overlap-errors option to work around Skia PathOps.Simplify bug (#2382, #2363, google/fonts#3365). - NOTE: This will be the last version to support Python 3.6. FontTools will require Python 3.7 or above from the next release (#2350) 4.25.2 (released 2021-07-26) ---------------------------- - [COLRv1] Various changes to sync with the latest CORLv1 draft spec. In particular: define COLR.VarIndexMap, remove/inline ColorIndex struct, add VarIndexBase to ``PaintVar*`` tables (#2372); add reduced-precicion specialized transform Paints; define Angle as fraction of half circle encoded as F2Dot14; use FWORD (int16) for all Paint center coordinates; change PaintTransform to have an offset to Affine2x3; - [ttLib] when importing XML, only set sfntVersion if the font has no reader and is empty (#2376) 4.25.1 (released 2021-07-16) ---------------------------- - [ttGlyphPen] Fixed bug in ``TTGlyphPointPen``, whereby open contours (i.e. starting with segmentType "move") would throw ``NotImplementedError``. They are now treated as if they are closed, like with the ``TTGlyphPen`` (#2364, #2366). 4.25.0 (released 2021-07-05) ---------------------------- - [tfmLib] Added new library for parsing TeX Font Metric (TFM) files (#2354). - [TupleVariation] Make shared tuples order deterministic on python < 3.7 where Counter (subclass of dict) doesn't remember insertion order (#2351, #2353). - [otData] Renamed COLRv1 structs to remove 'v1' suffix and match the updated draft spec: 'LayerV1List' -> 'LayerList', 'BaseGlyphV1List' -> 'BaseGlyphList', 'BaseGlyphV1Record' -> 'BaseGlyphPaintRecord' (#2346). Added 8 new ``PaintScale*`` tables: with/without centers, uniform vs non-uniform. Added ``*AroundCenter`` variants to ``PaintRotate`` and ``PaintSkew``: the default versions no longer have centerX/Y, but default to origin. ``PaintRotate``, ``PaintSkew`` and ``PaintComposite`` formats were re-numbered. NOTE: these are breaking changes; clients using the experimental COLRv1 API will have to be updated (#2348). - [pointPens] Allow ``GuessSmoothPointPen`` to accept a tolerance. Fixed call to ``math.atan2`` with x/y parameters inverted. Sync the code with fontPens (#2344). - [post] Fixed parsing ``post`` table format 2.0 when it contains extra garbage at the end of the stringData array (#2314). - [subset] drop empty features unless 'size' with FeatureParams table (#2324). - [otlLib] Added ``otlLib.optimize`` module; added GPOS compaction algorithm. The compaction can be run on existing fonts with ``fonttools otlLib.optimize`` or using the snippet ``compact_gpos.py``. There's experimental support for compacting fonts at compilation time using an environment variable, but that might be removed later (#2326). 4.24.4 (released 2021-05-25) ---------------------------- - [subset/instancer] Fixed ``AttributeError`` when instantiating a VF that contains GPOS ValueRecords with ``Device`` tables but without the respective non-Device values (e.g. ``XAdvDevice`` without ``XAdvance``). When not explicitly set, the latter are assumed to be 0 (#2323). 4.24.3 (released 2021-05-20) ---------------------------- - [otTables] Fixed ``AttributeError`` in methods that split LigatureSubst, MultipleSubst and AlternateSubst subtables when an offset overflow occurs. The ``Format`` attribute was removed in v4.22.0 (#2319). 4.24.2 (released 2021-05-20) ---------------------------- - [ttGlyphPen] Fixed typing annotation of TTGlyphPen glyphSet parameter (#2315). - Fixed two instances of DeprecationWarning: invalid escape sequence (#2311). 4.24.1 (released 2021-05-20) ---------------------------- - [subset] Fixed AttributeError when SinglePos subtable has None Value (ValueFormat 0) (#2312, #2313). 4.24.0 (released 2021-05-17) ---------------------------- - [pens] Add ``ttGlyphPen.TTGlyphPointPen`` similar to ``TTGlyphPen`` (#2205). 4.23.1 (released 2021-05-14) ---------------------------- - [subset] Fix ``KeyError`` after subsetting ``COLR`` table that initially contains both v0 and v1 color glyphs when the subset only requested v1 glyphs; we were not pruning the v0 portion of the table (#2308). - [colorLib] Set ``LayerV1List`` attribute to ``None`` when empty, it's optional in CORLv1 (#2308). 4.23.0 (released 2021-05-13) ---------------------------- - [designspaceLib] Allow to use ``\\UNC`` absolute paths on Windows (#2299, #2306). - [varLib.merger] Fixed bug where ``VarLibMergeError`` was raised with incorrect parameters (#2300). - [feaLib] Allow substituting a glyph class with ``NULL`` to delete multiple glyphs (#2303). - [glyf] Fixed ``NameError`` exception in ``getPhantomPoints`` (#2295, #2305). - [removeOverlaps] Retry pathops.simplify after rounding path coordinates to integers if it fails the first time using floats, to work around a rare and hard to debug Skia bug (#2288). - [varLib] Added support for building, reading, writing and optimizing 32-bit ``ItemVariationStore`` as used in COLRv1 table (#2285). - [otBase/otConverters] Add array readers/writers for int types (#2285). - [feaLib] Allow more than one lookahead glyph/class in contextual positioning with "value at end" (#2293, #2294). - [COLRv1] Default varIdx should be 0xFFFFFFFF (#2297, #2298). - [pens] Make RecordingPointPen actually pass on identifiers; replace asserts with explicit ``PenError`` exception (#2284). - [mutator] Round lsb for CF2 fonts as well (#2286). 4.22.1 (released 2021-04-26) ---------------------------- - [feaLib] Skip references to named lookups if the lookup block definition is empty, similarly to makeotf. This also fixes an ``AttributeError`` while generating ``aalt`` feature (#2276, #2277). - [subset] Fixed bug with ``--no-hinting`` implementation for Device tables (#2272, #2275). The previous code was alwyas dropping Device tables if no-hinting was requested, but some Device tables (DeltaFormat=0x8000) are also used to encode variation indices and need to be retained. - [otBase] Fixed bug in getting the ValueRecordSize when decompiling ``MVAR`` table with ``lazy=True`` (#2273, #2274). - [varLib/glyf/gvar] Optimized and simplified ``GlyphCoordinates`` and ``TupleVariation`` classes, use ``bytearray`` where possible, refactored phantom-points calculations. We measured about 30% speedup in total time of loading master ttfs, building gvar, and saving (#2261, #2266). - [subset] Fixed ``AssertionError`` while pruning unused CPAL palettes when ``0xFFFF`` is present (#2257, #2259). 4.22.0 (released 2021-04-01) ---------------------------- - [ttLib] Remove .Format from Coverage, ClassDef, SingleSubst, LigatureSubst, AlternateSubst, MultipleSubst (#2238). ATTENTION: This will change your TTX dumps! - [misc.arrayTools] move Vector to its own submodule, and rewrite as a tuple subclass (#2201). - [docs] Added a terminology section for varLib (#2209). - [varLib] Move rounding to VariationModel, to avoid error accumulation from multiple deltas (#2214) - [varLib] Explain merge errors in more human-friendly terms (#2223, #2226) - [otlLib] Correct some documentation (#2225) - [varLib/otlLib] Allow merging into VariationFont without first saving GPOS PairPos2 (#2229) - [subset] Improve PairPosFormat2 subsetting (#2221) - [ttLib] TTFont.save: create file on disk as late as possible (#2253) - [cffLib] Add missing CFF2 dict operators LanguageGroup and ExpansionFactor (#2249) ATTENTION: This will change your TTX dumps! 4.21.1 (released 2021-02-26) ---------------------------- - [pens] Reverted breaking change that turned ``AbstractPen`` and ``AbstractPointPen`` into abstract base classes (#2164, #2198). 4.21.0 (released 2021-02-26) ---------------------------- - [feaLib] Indent anchor statements in ``asFea()`` to make them more legible and diff-able (#2193). - [pens] Turn ``AbstractPen`` and ``AbstractPointPen`` into abstract base classes (#2164). - [feaLib] Added support for parsing and building ``STAT`` table from AFDKO feature files (#2039). - [instancer] Added option to update name table of generated instance using ``STAT`` table's axis values (#2189). - [bezierTools] Added functions to compute bezier point-at-time, as well as line-line, curve-line and curve-curve intersections (#2192). 4.20.0 (released 2021-02-15) ---------------------------- - [COLRv1] Added ``unbuildColrV1`` to deconstruct COLRv1 otTables to raw json-able data structure; it does the reverse of ``buildColrV1`` (#2171). - [feaLib] Allow ``sub X by NULL`` sequence to delete a glyph (#2170). - [arrayTools] Fixed ``Vector`` division (#2173). - [COLRv1] Define new ``PaintSweepGradient`` (#2172). - [otTables] Moved ``Paint.Format`` enum class outside of ``Paint`` class definition, now named ``PaintFormat``. It was clashing with paint instance ``Format`` attribute and thus was breaking lazy load of COLR table which relies on magic ``__getattr__`` (#2175). - [COLRv1] Replace hand-coded builder functions with otData-driven dynamic implementation (#2181). - [COLRv1] Define additional static (non-variable) Paint formats (#2181). - [subset] Added support for subsetting COLR v1 and CPAL tables (#2174, #2177). - [fontBuilder] Allow ``setupFvar`` to optionally take ``designspaceLib.AxisDescriptor`` objects. Added new ``setupAvar`` method. Support localised names for axes and named instances (#2185). 4.19.1 (released 2021-01-28) ---------------------------- - [woff2] An initial off-curve point with an overlap flag now stays an off-curve point after compression. 4.19.0 (released 2021-01-25) ---------------------------- - [codecs] Handle ``errors`` parameter different from 'strict' for the custom extended mac encodings (#2137, #2132). - [featureVars] Raise better error message when a script is missing the required default language system (#2154). - [COLRv1] Avoid abrupt change caused by rounding ``PaintRadialGradient.c0`` when the start circle almost touches the end circle's perimeter (#2148). - [COLRv1] Support building unlimited lists of paints as 255-ary trees of ``PaintColrLayers`` tables (#2153). - [subset] Prune redundant format-12 cmap subtables when all non-BMP characters are dropped (#2146). - [basePen] Raise ``MissingComponentError`` instead of bare ``KeyError`` when a referenced component is missing (#2145). 4.18.2 (released 2020-12-16) ---------------------------- - [COLRv1] Implemented ``PaintTranslate`` paint format (#2129). - [varLib.cff] Fixed unbound local variable error (#1787). - [otlLib] Don't crash when creating OpenType class definitions if some glyphs occur more than once (#2125). 4.18.1 (released 2020-12-09) ---------------------------- - [colorLib] Speed optimization for ``LayerV1ListBuilder`` (#2119). - [mutator] Fixed missing tab in ``interpolate_cff2_metrics`` (0957dc7a). 4.18.0 (released 2020-12-04) ---------------------------- - [COLRv1] Update to latest draft: added ``PaintRotate`` and ``PaintSkew`` (#2118). - [woff2] Support new ``brotlicffi`` bindings for PyPy (#2117). - [glifLib] Added ``expectContentsFile`` parameter to ``GlyphSet``, for use when reading existing UFOs, to comply with the specification stating that a ``contents.plist`` file must exist in a glyph set (#2114). - [subset] Allow ``LangSys`` tags in ``--layout-scripts`` option (#2112). For example: ``--layout-scripts=arab.dflt,arab.URD,latn``; this will keep ``DefaultLangSys`` and ``URD`` language for ``arab`` script, and all languages for ``latn`` script. - [varLib.interpolatable] Allow UFOs to be checked; report open paths, non existant glyphs; add a ``--json`` option to produce a machine-readable list of incompatibilities - [pens] Added ``QuartzPen`` to create ``CGPath`` from glyph outlines on macOS. Requires pyobjc (#2107). - [feaLib] You can export ``FONTTOOLS_LOOKUP_DEBUGGING=1`` to enable feature file debugging info stored in ``Debg`` table (#2106). - [otlLib] Build more efficient format 1 and format 2 contextual lookups whenever possible (#2101). 4.17.1 (released 2020-11-16) ---------------------------- - [colorLib] Fixed regression in 4.17.0 when building COLR v0 table; when color layers are stored in UFO lib plist, we can't distinguish tuples from lists so we need to accept either types (e5439eb9, googlefonts/ufo2ft/issues#426). 4.17.0 (released 2020-11-12) ---------------------------- - [colorLib/otData] Updated to latest draft ``COLR`` v1 spec (#2092). - [svgLib] Fixed parsing error when arc commands' boolean flags are not separated by space or comma (#2094). - [varLib] Interpret empty non-default glyphs as 'missing', if the default glyph is not empty (#2082). - [feaLib.builder] Only stash lookup location for ``Debg`` if ``Builder.buildLookups_`` has cooperated (#2065, #2067). - [varLib] Fixed bug in VarStore optimizer (#2073, #2083). - [varLib] Add designspace lib key for custom feavar feature tag (#2080). - Add HashPointPen adapted from psautohint. With this pen, a hash value of a glyph can be computed, which can later be used to detect glyph changes (#2005). 4.16.1 (released 2020-10-05) ---------------------------- - [varLib.instancer] Fixed ``TypeError`` exception when instantiating a VF with a GSUB table 1.1 in which ``FeatureVariations`` attribute is present but set to ``None`` -- indicating that optional ``FeatureVariations`` is missing (#2077). - [glifLib] Make ``x`` and ``y`` attributes of the ``point`` element required even when validation is turned off, and raise a meaningful ``GlifLibError`` message when that happens (#2075). 4.16.0 (released 2020-09-30) ---------------------------- - [removeOverlaps] Added new module and ``removeOverlaps`` function that merges overlapping contours and components in TrueType glyphs. It requires the `skia-pathops <https://github.com/fonttools/skia-pathops>`__ module. Note that removing overlaps invalidates the TrueType hinting (#2068). - [varLib.instancer] Added ``--remove-overlaps`` command-line option. The ``overlap`` option in ``instantiateVariableFont`` now takes an ``OverlapMode`` enum: 0: KEEP_AND_DONT_SET_FLAGS, 1: KEEP_AND_SET_FLAGS (default), and 2: REMOVE. The latter is equivalent to calling ``removeOverlaps`` on the generated static instance. The option continues to accept ``bool`` value for backward compatibility. 4.15.0 (released 2020-09-21) ---------------------------- - [plistlib] Added typing annotations to plistlib module. Set up mypy static typechecker to run automatically on CI (#2061). - [ttLib] Implement private ``Debg`` table, a reverse-DNS namespaced JSON dict. - [feaLib] Optionally add an entry into the ``Debg`` table with the original lookup name (if any), feature name / script / language combination (if any), and original source filename and line location. Annotate the ttx output for a lookup with the information from the Debg table (#2052). - [sfnt] Disabled checksum checking by default in ``SFNTReader`` (#2058). - [Docs] Document ``mtiLib`` module (#2027). - [varLib.interpolatable] Added checks for contour node count and operation type of each node (#2054). - [ttLib] Added API to register custom table packer/unpacker classes (#2055). 4.14.0 (released 2020-08-19) ---------------------------- - [feaLib] Allow anonymous classes in LookupFlags definitions (#2037). - [Docs] Better document DesignSpace rules processing order (#2041). - [ttLib] Fixed 21-year old bug in ``maxp.maxComponentDepth`` calculation (#2044, #2045). - [varLib.models] Fixed misspelled argument name in CLI entry point (81d0042a). - [subset] When subsetting GSUB v1.1, fixed TypeError by checking whether the optional FeatureVariations table is present (e63ecc5b). - [Snippets] Added snippet to show how to decompose glyphs in a TTF (#2030). - [otlLib] Generate GSUB type 5 and GPOS type 7 contextual lookups where appropriate (#2016). 4.13.0 (released 2020-07-10) ---------------------------- - [feaLib/otlLib] Moved lookup subtable builders from feaLib to otlLib; refactored some common code (#2004, #2007). - [docs] Document otlLib module (#2009). - [glifLib] Fixed bug with some UFO .glif filenames clashing on case-insensitive filesystems (#2001, #2002). - [colorLib] Updated COLRv1 implementation following changes in the draft spec: (#2008, googlefonts/colr-gradients-spec#24). 4.12.1 (released 2020-06-16) ---------------------------- - [_n_a_m_e] Fixed error in ``addMultilingualName`` with one-character names. Only attempt to recovered malformed UTF-16 data from a ``bytes`` string, not from unicode ``str`` (#1997, #1998). 4.12.0 (released 2020-06-09) ---------------------------- - [otlLib/varLib] Ensure that the ``AxisNameID`` in the ``STAT`` and ``fvar`` tables is grater than 255 as per OpenType spec (#1985, #1986). - [docs] Document more modules in ``fontTools.misc`` package: ``filenames``, ``fixedTools``, ``intTools``, ``loggingTools``, ``macCreatorType``, ``macRes``, ``plistlib`` (#1981). - [OS/2] Don't calculate whole sets of unicode codepoints, use faster and more memory efficient ranges and bisect lookups (#1984). - [voltLib] Support writing back abstract syntax tree as VOLT data (#1983). - [voltLib] Accept DO_NOT_TOUCH_CMAP keyword (#1987). - [subset/merge] Fixed a namespace clash involving a private helper class (#1955). 4.11.0 (released 2020-05-28) ---------------------------- - [feaLib] Introduced ``includeDir`` parameter on Parser and IncludingLexer to explicitly specify the directory to search when ``include()`` statements are encountered (#1973). - [ufoLib] Silently delete duplicate glyphs within the same kerning group when reading groups (#1970). - [ttLib] Set version of COLR table when decompiling COLRv1 (commit 9d8a7e2). 4.10.2 (released 2020-05-20) ---------------------------- - [sfnt] Fixed ``NameError: SimpleNamespace`` while reading TTC header. The regression was introduced with 4.10.1 after removing ``py23`` star import. 4.10.1 (released 2020-05-19) ---------------------------- - [sfnt] Make ``SFNTReader`` pickleable even when TTFont is loaded with lazy=True option and thus keeps a reference to an external file (#1962, #1967). - [feaLib.ast] Restore backward compatibility (broken in 4.10 with #1905) for ``ChainContextPosStatement`` and ``ChainContextSubstStatement`` classes. Make them accept either list of lookups or list of lists of lookups (#1961). - [docs] Document some modules in ``fontTools.misc`` package: ``arrayTools``, ``bezierTools`` ``cliTools`` and ``eexec`` (#1956). - [ttLib._n_a_m_e] Fixed ``findMultilingualName()`` when name record's ``string`` is encoded as bytes sequence (#1963). 4.10.0 (released 2020-05-15) ---------------------------- - [varLib] Allow feature variations to be active across the entire space (#1957). - [ufoLib] Added support for ``formatVersionMinor`` in UFO's ``fontinfo.plist`` and for ``formatMinor`` attribute in GLIF file as discussed in unified-font-object/ufo-spec#78. No changes in reading or writing UFOs until an upcoming (non-0) minor update of the UFO specification is published (#1786). - [merge] Fixed merging fonts with different versions of ``OS/2`` table (#1865, #1952). - [subset] Fixed ``AttributeError`` while subsetting ``ContextSubst`` and ``ContextPos`` Format 3 subtable (#1879, #1944). - [ttLib.table._m_e_t_a] if data happens to be ascii, emit comment in TTX (#1938). - [feaLib] Support multiple lookups per glyph position (#1905). - [psCharStrings] Use inheritance to avoid repeated code in initializer (#1932). - [Doc] Improved documentation for the following modules: ``afmLib`` (#1933), ``agl`` (#1934), ``cffLib`` (#1935), ``cu2qu`` (#1937), ``encodings`` (#1940), ``feaLib`` (#1941), ``merge`` (#1949). - [Doc] Split off developer-centric info to new page, making front page of docs more user-focused. List all utilities and sub-modules with brief descriptions. Make README more concise and focused (#1914). - [otlLib] Add function to build STAT table from high-level description (#1926). - [ttLib._n_a_m_e] Add ``findMultilingualName()`` method (#1921). - [unicodedata] Update ``RTL_SCRIPTS`` for Unicode 13.0 (#1925). - [gvar] Sort ``gvar`` XML output by glyph name, not glyph order (#1907, #1908). - [Doc] Added help options to ``fonttools`` command line tool (#1913, #1920). Ensure all fonttools CLI tools have help documentation (#1948). - [ufoLib] Only write fontinfo.plist when there actually is content (#1911). 4.9.0 (released 2020-04-29) --------------------------- - [subset] Fixed subsetting of FeatureVariations table. The subsetter no longer drops FeatureVariationRecords that have empty substitutions as that will keep the search going and thus change the logic. It will only drop empty records that occur at the end of the FeatureVariationRecords array (#1881). - [subset] Remove FeatureVariations table and downgrade GSUB/GPOS to version 0x10000 when FeatureVariations contain no FeatureVariationRecords after subsetting (#1903). - [agl] Add support for legacy Adobe Glyph List of glyph names in ``fontTools.agl`` (#1895). - [feaLib] Ignore superfluous script statements (#1883). - [feaLib] Hide traceback by default on ``fonttools feaLib`` command line. Use ``--traceback`` option to show (#1898). - [feaLib] Check lookup index in chaining sub/pos lookups and print better error message (#1896, #1897). - [feaLib] Fix building chained alt substitutions (#1902). - [Doc] Included all fontTools modules in the sphinx-generated documentation, and published it to ReadTheDocs for continuous documentation of the fontTools project (#1333). Check it out at https://fonttools.readthedocs.io/. Thanks to Chris Simpkins! - [transform] The ``Transform`` class is now subclass of ``typing.NamedTuple``. No change in functionality (#1904). 4.8.1 (released 2020-04-17) --------------------------- - [feaLib] Fixed ``AttributeError: 'NoneType' has no attribute 'getAlternateGlyphs'`` when ``aalt`` feature references a chain contextual substitution lookup (googlefonts/fontmake#648, #1878). 4.8.0 (released 2020-04-16) --------------------------- - [feaLib] If Parser is initialized without a ``glyphNames`` parameter, it cannot distinguish between a glyph name containing an hyphen, or a range of glyph names; instead of raising an error, it now interprets them as literal glyph names, while also outputting a logging warning to alert user about the ambiguity (#1768, #1870). - [feaLib] When serializing AST to string, emit spaces around hyphens that denote ranges. Also, fixed an issue with CID ranges when round-tripping AST->string->AST (#1872). - [Snippets/otf2ttf] In otf2ttf.py script update LSB in hmtx to match xMin (#1873). - [colorLib] Added experimental support for building ``COLR`` v1 tables as per the `colr-gradients-spec <https://github.com/googlefonts/colr-gradients-spec/blob/main/colr-gradients-spec.md>`__ draft proposal. **NOTE**: both the API and the XML dump of ``COLR`` v1 are susceptible to change while the proposal is being discussed and formalized (#1822). 4.7.0 (released 2020-04-03) --------------------------- - [cu2qu] Added ``fontTools.cu2qu`` package, imported from the original `cu2qu <https://github.com/googlefonts/cu2qu>`__ project. The ``cu2qu.pens`` module was moved to ``fontTools.pens.cu2quPen``. The optional cu2qu extension module can be compiled by installing `Cython <https://cython.org/>`__ before installing fonttools from source (i.e. git repo or sdist tarball). The wheel package that is published on PyPI (i.e. the one ``pip`` downloads, unless ``--no-binary`` option is used), will continue to be pure-Python for now (#1868). 4.6.0 (released 2020-03-24) --------------------------- - [varLib] Added support for building variable ``BASE`` table version 1.1 (#1858). - [CPAL] Added ``fromRGBA`` method to ``Color`` class (#1861). 4.5.0 (released 2020-03-20) --------------------------- - [designspaceLib] Added ``add{Axis,Source,Instance,Rule}Descriptor`` methods to ``DesignSpaceDocument`` class, to initialize new descriptor objects using keyword arguments, and at the same time append them to the current document (#1860). - [unicodedata] Update to Unicode 13.0 (#1859). 4.4.3 (released 2020-03-13) --------------------------- - [varLib] Always build ``gvar`` table for TrueType-flavored Variable Fonts, even if it contains no variation data. The table is required according to the OpenType spec (#1855, #1857). 4.4.2 (released 2020-03-12) --------------------------- - [ttx] Annotate ``LookupFlag`` in XML dump with comment explaining what bits are set and what they mean (#1850). - [feaLib] Added more descriptive message to ``IncludedFeaNotFound`` error (#1842). 4.4.1 (released 2020-02-26) --------------------------- - [woff2] Skip normalizing ``glyf`` and ``loca`` tables if these are missing from a font (e.g. in NotoColorEmoji using ``CBDT/CBLC`` tables). - [timeTools] Use non-localized date parsing in ``timestampFromString``, to fix error when non-English ``LC_TIME`` locale is set (#1838, #1839). - [fontBuilder] Make sure the CFF table generated by fontBuilder can be used by varLib without having to compile and decompile the table first. This was breaking in converting the CFF table to CFF2 due to some unset attributes (#1836). 4.4.0 (released 2020-02-18) --------------------------- - [colorLib] Added ``fontTools.colorLib.builder`` module, initially with ``buildCOLR`` and ``buildCPAL`` public functions. More color font formats will follow (#1827). - [fontBuilder] Added ``setupCOLR`` and ``setupCPAL`` methods (#1826). - [ttGlyphPen] Quantize ``GlyphComponent.transform`` floats to ``F2Dot14`` to fix round-trip issue when computing bounding boxes of transformed components (#1830). - [glyf] If a component uses reference points (``firstPt`` and ``secondPt``) for alignment (instead of X and Y offsets), compute the effective translation offset *after* having applied any transform (#1831). - [glyf] When all glyphs have zero contours, compile ``glyf`` table data as a single null byte in order to pass validation by OTS and Windows (#1829). - [feaLib] Parsing feature code now ensures that referenced glyph names are part of the known glyph set, unless a glyph set was not provided. - [varLib] When filling in the default axis value for a missing location of a source or instance, correctly map the value forward. - [varLib] The avar table can now contain mapping output values that are greater than OR EQUAL to the preceeding value, as the avar specification allows this. - [varLib] The errors of the module are now ordered hierarchically below VarLibError. See #1821. 4.3.0 (released 2020-02-03) --------------------------- - [EBLC/CBLC] Fixed incorrect padding length calculation for Format 3 IndexSubTable (#1817, #1818). - [varLib] Fixed error when merging OTL tables and TTFonts were loaded as ``lazy=True`` (#1808, #1809). - [varLib] Allow to use master fonts containing ``CFF2`` table when building VF (#1816). - [ttLib] Make ``recalcBBoxes`` option work also with ``CFF2`` table (#1816). - [feaLib] Don't reset ``lookupflag`` in lookups defined inside feature blocks. They will now inherit the current ``lookupflag`` of the feature. This is what Adobe ``makeotf`` also does in this case (#1815). - [feaLib] Fixed bug with mixed single/multiple substitutions. If a single substitution involved a glyph class, we were incorrectly using only the first glyph in the class (#1814). 4.2.5 (released 2020-01-29) --------------------------- - [feaLib] Do not fail on duplicate multiple substitutions, only warn (#1811). - [subset] Optimize SinglePos subtables to Format 1 if all ValueRecords are the same (#1802). 4.2.4 (released 2020-01-09) --------------------------- - [unicodedata] Update RTL_SCRIPTS for Unicode 11 and 12. 4.2.3 (released 2020-01-07) --------------------------- - [otTables] Fixed bug when splitting `MarkBasePos` subtables as offsets overflow. The mark class values in the split subtable were not being updated, leading to invalid mark-base attachments (#1797, googlefonts/noto-source#145). - [feaLib] Only log a warning instead of error when features contain duplicate substitutions (#1767). - [glifLib] Strip XML comments when parsing with lxml (#1784, #1785). 4.2.2 (released 2019-12-12) --------------------------- - [subset] Fixed issue with subsetting FeatureVariations table when the index of features changes as features get dropped. The feature index need to be remapped to point to index of the remaining features (#1777, #1782). - [fontBuilder] Added `addFeatureVariations` method to `FontBuilder` class. This is a shorthand for calling `featureVars.addFeatureVariations` on the builder's TTFont object (#1781). - [glyf] Fixed the flags bug in glyph.drawPoints() like we did for glyph.draw() (#1771, #1774). 4.2.1 (released 2019-12-06) --------------------------- - [glyf] Use the ``flagOnCurve`` bit mask in ``glyph.draw()``, so that we ignore the ``overlap`` flag that may be set when instantiating variable fonts (#1771). 4.2.0 (released 2019-11-28) --------------------------- - [pens] Added the following pens: * ``roundingPen.RoundingPen``: filter pen that rounds coordinates and components' offsets to integer; * ``roundingPen.RoundingPointPen``: like the above, but using PointPen protocol. * ``filterPen.FilterPointPen``: base class for filter point pens; * ``transformPen.TransformPointPen``: filter point pen to apply affine transform; * ``recordingPen.RecordingPointPen``: records and replays point-pen commands. - [ttGlyphPen] Always round float coordinates and component offsets to integers (#1763). - [ufoLib] When converting kerning groups from UFO2 to UFO3, avoid confusing groups with the same name as one of the glyphs (#1761, #1762, unified-font-object/ufo-spec#98). 4.1.0 (released 2019-11-18) --------------------------- - [instancer] Implemented restricting axis ranges (level 3 partial instancing). You can now pass ``{axis_tag: (min, max)}`` tuples as input to the ``instantiateVariableFont`` function. Note that changing the default axis position is not supported yet. The command-line script also accepts axis ranges in the form of colon-separated float values, e.g. ``wght=400:700`` (#1753, #1537). - [instancer] Never drop STAT ``DesignAxis`` records, but only prune out-of-range ``AxisValue`` records. - [otBase/otTables] Enforce that VarStore.RegionAxisCount == fvar.axisCount, even when regions list is empty to appease OTS < v8.0 (#1752). - [designspaceLib] Defined new ``processing`` attribute for ``<rules>`` element, with values "first" or "last", plus other editorial changes to DesignSpace specification. Bumped format version to 4.1 (#1750). - [varLib] Improved error message when masters' glyph orders do not match (#1758, #1759). - [featureVars] Allow to specify custom feature tag in ``addFeatureVariations``; allow said feature to already exist, in which case we append new lookup indices to existing features. Implemented ``<rules>`` attribute ``processing`` according to DesignSpace specification update in #1750. Depending on this flag, we generate either an 'rvrn' (always processed first) or a 'rclt' feature (follows lookup order, therefore last) (#1747, #1625, #1371). - [ttCollection] Added support for context manager auto-closing via ``with`` statement like with ``TTFont`` (#1751). - [unicodedata] Require unicodedata2 >= 12.1.0. - [py2.py3] Removed yet more PY2 vestiges (#1743). - [_n_a_m_e] Fixed issue when comparing NameRecords with different string types (#1742). - [fixedTools] Changed ``fixedToFloat`` to not do any rounding but simply return ``value / (1 << precisionBits)``. Added ``floatToFixedToStr`` and ``strToFixedToFloat`` functions to be used when loading from or dumping to XML. Fixed values (e.g. fvar axes and instance coordinates, avar mappings, etc.) are are now stored as un-rounded decimal floats upon decompiling (#1740, #737). - [feaLib] Fixed handling of multiple ``LigatureCaret`` statements for the same glyph. Only the first rule per glyph is used, additional ones are ignored (#1733). 4.0.2 (released 2019-09-26) --------------------------- - [voltLib] Added support for ``ALL`` and ``NONE`` in ``PROCESS_MARKS`` (#1732). - [Silf] Fixed issue in ``Silf`` table compilation and decompilation regarding str vs bytes in python3 (#1728). - [merge] Handle duplicate glyph names better: instead of appending font index to all glyph names, use similar code like we use in ``post`` and ``CFF`` tables (#1729). 4.0.1 (released 2019-09-11) --------------------------- - [otTables] Support fixing offset overflows in ``MultipleSubst`` lookup subtables (#1706). - [subset] Prune empty strikes in ``EBDT`` and ``CBDT`` table data (#1698, #1633). - [pens] Fixed issue in ``PointToSegmentPen`` when last point of closed contour has same coordinates as the starting point and was incorrectly dropped (#1720). - [Graphite] Fixed ``Sill`` table output to pass OTS (#1705). - [name] Added ``removeNames`` method to ``table__n_a_m_e`` class (#1719). - [ttLib] Added aliases for renamed entries ``ascender`` and ``descender`` in ``hhea`` table (#1715). 4.0.0 (released 2019-08-22) --------------------------- - NOTE: The v4.x version series only supports Python 3.6 or greater. You can keep using fonttools 3.x if you need support for Python 2. - [py23] Removed all the python2-only code since it is no longer reachable, thus unused; only the Python3 symbols were kept, but these are no-op. The module is now DEPRECATED and will removed in the future. - [ttLib] Fixed UnboundLocalError for empty loca/glyph tables (#1680). Also, allow the glyf table to be incomplete when dumping to XML (#1681). - [varLib.models] Fixed KeyError while sorting masters and there are no on-axis for a given axis (38a8eb0e). - [cffLib] Make sure glyph names are unique (#1699). - [feaLib] Fix feature parser to correctly handle octal numbers (#1700). 3.44.0 (released 2019-08-02) ---------------------------- - NOTE: This is the last scheduled release to support Python 2.7. The upcoming fonttools v4.x series is going to require Python 3.6 or greater. - [varLib] Added new ``varLib.instancer`` module for partially instantiating variable fonts. This extends (and will eventually replace) ``varLib.mutator`` module, as it allows to create not just full static instances from a variable font, but also "partial" or "less variable" fonts where some of the axes are dropped or instantiated at a particular value. Also available from the command-line as `fonttools varLib.instancer --help` (#1537, #1628). - [cffLib] Added support for ``FDSelect`` format 4 (#1677). - [subset] Added support for subsetting ``sbix`` (Apple bitmap color font) table. - [t1Lib] Fixed issue parsing ``eexec`` section in Type1 fonts when whitespace characters are interspersed among the trailing zeros (#1676). - [cffLib.specializer] Fixed bug in ``programToCommands`` with CFF2 charstrings (#1669). 3.43.2 (released 2019-07-10) ---------------------------- - [featureVars] Fixed region-merging code on python3 (#1659). - [varLib.cff] Fixed merging of sparse PrivateDict items (#1653). 3.43.1 (released 2019-06-19) ---------------------------- - [subset] Fixed regression when passing ``--flavor=woff2`` option with an input font that was already compressed as WOFF 1.0 (#1650). 3.43.0 (released 2019-06-18) ---------------------------- - [woff2] Added support for compressing/decompressing WOFF2 fonts with non-transformed ``glyf`` and ``loca`` tables, as well as with transformed ``hmtx`` table. Removed ``Snippets/woff2_compress.py`` and ``Snippets/woff2_decompress.py`` scripts, and replaced them with a new console entry point ``fonttools ttLib.woff2`` that provides two sub-commands ``compress`` and ``decompress``. - [varLib.cff] Fixed bug when merging CFF2 ``PrivateDicts``. The ``PrivateDict`` data from the first region font was incorrecty used for all subsequent fonts. The bug would only affect variable CFF2 fonts with hinting (#1643, #1644). Also, fixed a merging bug when VF masters have no blends or marking glyphs (#1632, #1642). - [loggingTools] Removed unused backport of ``LastResortLogger`` class. - [subset] Gracefully handle partial MATH table (#1635). - [featureVars] Avoid duplicate references to ``rvrn`` feature record in ``DefaultLangSys`` tables when calling ``addFeatureVariations`` on a font that does not already have a ``GSUB`` table (aa8a5bc6). - [varLib] Fixed merging of class-based kerning. Before, the process could introduce rogue kerning values and variations for random classes against class zero (everything not otherwise classed). - [varLib] Fixed merging GPOS tables from master fonts with different number of ``SinglePos`` subtables (#1621, #1641). - [unicodedata] Updated Blocks, Scripts and ScriptExtensions to Unicode 12.1. 3.42.0 (released 2019-05-28) ---------------------------- - [OS/2] Fixed sign of ``fsType``: it should be ``uint16``, not ``int16`` (#1619). - [subset] Skip out-of-range class values in mark attachment (#1478). - [fontBuilder] Add an empty ``DSIG`` table with ``setupDummyDSIG`` method (#1621). - [varLib.merger] Fixed bug whereby ``GDEF.GlyphClassDef`` were being dropped when generating instance via ``varLib.mutator`` (#1614). - [varLib] Added command-line options ``-v`` and ``-q`` to configure logging (#1613). - [subset] Update font extents in head table (#1612). - [subset] Make --retain-gids truncate empty glyphs after the last non-empty glyph (#1611). - [requirements] Updated ``unicodedata2`` backport for Unicode 12.0. 3.41.2 (released 2019-05-13) ---------------------------- - [cffLib] Fixed issue when importing a ``CFF2`` variable font from XML, whereby the VarStore state was not propagated to PrivateDict (#1598). - [varLib] Don't drop ``post`` glyph names when building CFF2 variable font (#1609). 3.41.1 (released 2019-05-13) ---------------------------- - [designspaceLib] Added ``loadSourceFonts`` method to load source fonts using custom opener function (#1606). - [head] Round font bounding box coordinates to integers to fix compile error if CFF font has float coordinates (#1604, #1605). - [feaLib] Don't write ``None`` in ``ast.ValueRecord.asFea()`` (#1599). - [subset] Fixed issue ``AssertionError`` when using ``--desubroutinize`` option (#1590, #1594). - [graphite] Fixed bug in ``Silf`` table's ``decompile`` method unmasked by previous typo fix (#1597). Decode languange code as UTF-8 in ``Sill`` table's ``decompile`` method (#1600). 3.41.0 (released 2019-04-29) ---------------------------- - [varLib/cffLib] Added support for building ``CFF2`` variable font from sparse masters, or masters with more than one model (multiple ``VarStore.VarData``). In ``cffLib.specializer``, added support for ``CFF2`` CharStrings with ``blend`` operators (#1547, #1591). - [subset] Fixed subsetting ``HVAR`` and ``VVAR`` with ``--retain-gids`` option, and when advances mapping is null while sidebearings mappings are non-null (#1587, #1588). - Added ``otlLib.maxContextCalc`` module to compute ``OS/2.usMaxContext`` value. Calculate it automatically when compiling features with feaLib. Added option ``--recalc-max-context`` to ``subset`` module (#1582). - [otBase/otTables] Fixed ``AttributeError`` on missing OT table fields after importing font from TTX (#1584). - [graphite] Fixed typo ``Silf`` table's ``decompile`` method (#1586). - [otlLib] Better compress ``GPOS`` SinglePos (LookupType 1) subtables (#1539). 3.40.0 (released 2019-04-08) ---------------------------- - [subset] Fixed error while subsetting ``VVAR`` with ``--retain-gids`` option (#1552). - [designspaceLib] Use up-to-date default location in ``findDefault`` method (#1554). - [voltLib] Allow passing file-like object to Parser. - [arrayTools/glyf] ``calcIntBounds`` (used to compute bounding boxes of glyf table's glyphs) now uses ``otRound`` instead of ``round3`` (#1566). - [svgLib] Added support for converting more SVG shapes to path ``d`` strings (ellipse, line, polyline), as well as support for ``transform`` attributes. Only ``matrix`` transformations are currently supported (#1564, #1564). - [varLib] Added support for building ``VVAR`` table from ``vmtx`` and ``VORG`` tables (#1551). - [fontBuilder] Enable making CFF2 fonts with ``post`` table format 2 (#1557). - Fixed ``DeprecationWarning`` on invalid escape sequences (#1562). 3.39.0 (released 2019-03-19) ---------------------------- - [ttLib/glyf] Raise more specific error when encountering recursive component references (#1545, #1546). - [Doc/designspaceLib] Defined new ``public.skipExportGlyphs`` lib key (#1534, unified-font-object/ufo-spec#84). - [varLib] Use ``vmtx`` to compute vertical phantom points; or ``hhea.ascent`` and ``head.unitsPerEM`` if ``vmtx`` is missing (#1528). - [gvar/cvar] Sort XML element's min/value/max attributes in TupleVariation toXML to improve readability of TTX dump (#1527). - [varLib.plot] Added support for 2D plots with only 1 variation axis (#1522). - [designspaceLib] Use axes maps when normalizing locations in DesignSpaceDocument (#1226, #1521), and when finding default source (#1535). - [mutator] Set ``OVERLAP_SIMPLE`` and ``OVERLAP_COMPOUND`` glyf flags by default in ``instantiateVariableFont``. Added ``--no-overlap`` cli option to disable this (#1518). - [subset] Fixed subsetting ``VVAR`` table (#1516, #1517). Fixed subsetting an ``HVAR`` table that has an ``AdvanceWidthMap`` when the option ``--retain-gids`` is used. - [feaLib] Added ``forceChained`` in MultipleSubstStatement (#1511). Fixed double indentation of ``subtable`` statement (#1512). Added support for ``subtable`` statement in more places than just PairPos lookups (#1520). Handle lookupflag 0 and lookupflag without a value (#1540). - [varLib] In ``load_designspace``, provide a default English name for the ``ital`` axis tag. - Remove pyftinspect because it is unmaintained and bitrotted. 3.38.0 (released 2019-02-18) ---------------------------- - [cffLib] Fixed RecursionError when unpickling or deepcopying TTFont with CFF table (#1488, 649dc49). - [subset] Fixed AttributeError when using --desubroutinize option (#1490). Also, fixed desubroutinizing bug when subrs contain hints (#1499). - [CPAL] Make Color a subclass of namedtuple (173a0f5). - [feaLib] Allow hyphen in glyph class names. - [feaLib] Added 'tables' option to __main__.py (#1497). - [feaLib] Add support for special-case contextual positioning formatting (#1501). - [svgLib] Support converting SVG basic shapes (rect, circle, etc.) into equivalent SVG paths (#1500, #1508). - [Snippets] Added name-viewer.ipynb Jupyter notebook. 3.37.3 (released 2019-02-05) ---------------------------- - The previous release accidentally changed several files from Unix to DOS line-endings. Fix that. 3.37.2 (released 2019-02-05) ---------------------------- - [varLib] Temporarily revert the fix to ``load_masters()``, which caused a crash in ``interpolate_layout()`` when ``deepcopy``-ing OTFs. 3.37.1 (released 2019-02-05) ---------------------------- - [varLib] ``load_masters()`` now actually assigns the fonts it loads to the source.font attributes. - [varLib] Fixed an MVAR table generation crash when sparse masters were involved. - [voltLib] ``parse_coverage_()`` returns a tuple instead of an ast.Enum. - [feaLib] A MarkClassDefinition inside a block is no longer doubly indented compared to the rest of the block. 3.37.0 (released 2019-01-28) ---------------------------- - [svgLib] Added support for converting elliptical arcs to cubic bezier curves (#1464). - [py23] Added backport for ``math.isfinite``. - [varLib] Apply HIDDEN flag to fvar axis if designspace axis has attribute ``hidden=1``. - Fixed "DeprecationWarning: invalid escape sequence" in Python 3.7. - [voltLib] Fixed parsing glyph groups. Distinguish different PROCESS_MARKS. Accept COMPONENT glyph type. - [feaLib] Distinguish missing value and explicit ``<NULL>`` for PairPos2 format A (#1459). Round-trip ``useExtension`` keyword. Implemented ``ValueRecord.asFea`` method. - [subset] Insert empty widths into hdmx when retaining gids (#1458). 3.36.0 (released 2019-01-17) ---------------------------- - [ttx] Added ``--no-recalc-timestamp`` option to keep the original font's ``head.modified`` timestamp (#1455, #46). - [ttx/psCharStrings] Fixed issues while dumping and round-tripping CFF2 table with ttx (#1451, #1452, #1456). - [voltLib] Fixed check for duplicate anchors (#1450). Don't try to read past the ``END`` operator in .vtp file (#1453). - [varLib] Use sentinel value -0x8000 (-32768) to ignore post.underlineThickness and post.underlinePosition when generating MVAR deltas (#1449, googlei18n/ufo2ft#308). - [subset] Added ``--retain-gids`` option to subset font without modifying the current glyph indices (#1443, #1447). - [ufoLib] Replace deprecated calls to ``getbytes`` and ``setbytes`` with new equivalent ``readbytes`` and ``writebytes`` calls. ``fs`` >= 2.2 no required. - [varLib] Allow loading masters from TTX files as well (#1441). 3.35.2 (released 2019-01-14) ---------------------------- - [hmtx/vmtx]: Allow to compile/decompile ``hmtx`` and ``vmtx`` tables even without the corresponding (required) metrics header tables, ``hhea`` and ``vhea`` (#1439). - [varLib] Added support for localized axes' ``labelname`` and named instances' ``stylename`` (#1438). 3.35.1 (released 2019-01-09) ---------------------------- - [_m_a_x_p] Include ``maxComponentElements`` in ``maxp`` table's recalculation. 3.35.0 (released 2019-01-07) ---------------------------- - [psCharStrings] In ``encodeFloat`` function, use float's "general format" with 8 digits of precision (i.e. ``%8g``) instead of ``str()``. This works around a macOS rendering issue when real numbers in CFF table are too long, and also makes sure that floats are encoded with the same precision in python 2.7 and 3.x (#1430, googlei18n/ufo2ft#306). - [_n_a_m_e/fontBuilder] Make ``_n_a_m_e_table.addMultilingualName`` also add Macintosh (platformID=1) names by default. Added options to ``FontBuilder`` ``setupNameTable`` method to optionally disable Macintosh or Windows names. (#1359, #1431). - [varLib] Make ``build`` optionally accept a ``DesignSpaceDocument`` object, instead of a designspace file path. The caller can now set the ``font`` attribute of designspace's sources to a TTFont object, thus allowing to skip filenames manipulation altogether (#1416, #1425). - [sfnt] Allow SFNTReader objects to be deep-copied. - Require typing>=3.6.4 on py27 to fix issue with singledispatch (#1423). - [designspaceLib/t1Lib/macRes] Fixed some cases where pathlib.Path objects were not accepted (#1421). - [varLib] Fixed merging of multiple PairPosFormat2 subtables (#1411). - [varLib] The default STAT table version is now set to 1.1, to improve compatibility with legacy applications (#1413). 3.34.2 (released 2018-12-17) ---------------------------- - [merge] Fixed AssertionError when none of the script tables in GPOS/GSUB have a DefaultLangSys record (#1408, 135a4a1). 3.34.1 (released 2018-12-17) ---------------------------- - [varLib] Work around macOS rendering issue for composites without gvar entry (#1381). 3.34.0 (released 2018-12-14) ---------------------------- - [varLib] Support generation of CFF2 variable fonts. ``model.reorderMasters()`` now supports arbitrary mapping. Fix handling of overlapping ranges for feature variations (#1400). - [cffLib, subset] Code clean-up and fixing related to CFF2 support. - [ttLib.tables.ttProgram] Use raw strings for regex patterns (#1389). - [fontbuilder] Initial support for building CFF2 fonts. Set CFF's ``FontMatrix`` automatically from unitsPerEm. - [plistLib] Accept the more general ``collections.Mapping`` instead of the specific ``dict`` class to support custom data classes that should serialize to dictionaries. 3.33.0 (released 2018-11-30) ---------------------------- - [subset] subsetter bug fix with variable fonts. - [varLib.featureVar] Improve FeatureVariations generation with many rules. - [varLib] Enable sparse masters when building variable fonts: https://github.com/fonttools/fonttools/pull/1368#issuecomment-437257368 - [varLib.mutator] Add IDEF for GETVARIATION opcode, for handling hints in an instance. - [ttLib] Ignore the length of kern table subtable format 0 3.32.0 (released 2018-11-01) ---------------------------- - [ufoLib] Make ``UFOWriter`` a subclass of ``UFOReader``, and use mixins for shared methods (#1344). - [featureVars] Fixed normalization error when a condition's minimum/maximum attributes are missing in designspace ``<rule>`` (#1366). - [setup.py] Added ``[plot]`` to extras, to optionally install ``matplotlib``, needed to use the ``fonTools.varLib.plot`` module. - [varLib] Take total bounding box into account when resolving model (7ee81c8). If multiple axes have the same range ratio, cut across both (62003f4). - [subset] Don't error if ``STAT`` has no ``AxisValue`` tables. - [fontBuilder] Added a new submodule which contains a ``FontBuilder`` wrapper class around ``TTFont`` that makes it easier to create a working TTF or OTF font from scratch with code. NOTE: the API is still experimental and may change in future versions. 3.31.0 (released 2018-10-21) ---------------------------- - [ufoLib] Merged the `ufoLib <https://github.com/unified-font-objects/ufoLib>`__ master branch into a new ``fontTools.ufoLib`` package (#1335, #1095). Moved ``ufoLib.pointPen`` module to ``fontTools.pens.pointPen``. Moved ``ufoLib.etree`` module to ``fontTools.misc.etree``. Moved ``ufoLib.plistlib`` module to ``fontTools.misc.plistlib``. To use the new ``fontTools.ufoLib`` module you need to install fonttools with the ``[ufo]`` extra, or you can manually install the required additional dependencies (cf. README.rst). - [morx] Support AAT action type to insert glyphs and clean up compilation of AAT action tables (4a1871f, 2011ccf). - [subset] The ``--no-hinting`` on a CFF font now also drops the optional hinting keys in Private dict: ``ForceBold``, ``LanguageGroup``, and ``ExpansionFactor`` (#1322). - [subset] Include nameIDs referenced by STAT table (#1327). - [loggingTools] Added ``msg=None`` argument to ``CapturingLogHandler.assertRegex`` (0245f2c). - [varLib.mutator] Implemented ``FeatureVariations`` instantiation (#1244). - [g_l_y_f] Added PointPen support to ``_TTGlyph`` objects (#1334). 3.30.0 (released 2018-09-18) ---------------------------- - [feaLib] Skip building noop class PairPos subtables when Coverage is NULL (#1318). - [ttx] Expose the previously reserved bit flag ``OVERLAP_SIMPLE`` of glyf table's contour points in the TTX dump. This is used in some implementations to specify a non-zero fill with overlapping contours (#1316). - [ttLib] Added support for decompiling/compiling ``TS1C`` tables containing VTT sources for ``cvar`` variation table (#1310). - [varLib] Use ``fontTools.designspaceLib`` to read DesignSpaceDocument. The ``fontTools.varLib.designspace`` module is now deprecated and will be removed in future versions. The presence of an explicit ``axes`` element is now required in order to build a variable font (#1224, #1313). - [varLib] Implemented building GSUB FeatureVariations table from the ``rules`` element of DesignSpace document (#1240, #713, #1314). - [subset] Added ``--no-layout-closure`` option to not expand the subset with the glyphs produced by OpenType layout features. Instead, OpenType features will be subset to only rules that are relevant to the otherwise-specified glyph set (#43, #1121). 3.29.1 (released 2018-09-10) ---------------------------- - [feaLib] Fixed issue whereby lookups from DFLT/dflt were not included in the DFLT/non-dflt language systems (#1307). - [graphite] Fixed issue on big-endian architectures (e.g. ppc64) (#1311). - [subset] Added ``--layout-scripts`` option to add/exclude set of OpenType layout scripts that will be preserved. By default all scripts are retained (``'*'``) (#1303). 3.29.0 (released 2018-07-26) ---------------------------- - [feaLib] In the OTL table builder, when the ``name`` table is excluded from the list of tables to be build, skip compiling ``featureNames`` blocks, as the records referenced in ``FeatureParams`` table don't exist (68951b7). - [otBase] Try ``ExtensionLookup`` if other offset-overflow methods fail (05f95f0). - [feaLib] Added support for explicit ``subtable;`` break statements in PairPos lookups; previously these were ignored (#1279, #1300, #1302). - [cffLib.specializer] Make sure the stack depth does not exceed maxstack - 1, so that a subroutinizer can insert subroutine calls (#1301, https://github.com/googlei18n/ufo2ft/issues/266). - [otTables] Added support for fixing offset overflow errors occurring inside ``MarkBasePos`` subtables (#1297). - [subset] Write the default output file extension based on ``--flavor`` option, or the value of ``TTFont.sfntVersion`` (d7ac0ad). - [unicodedata] Updated Blocks, Scripts and ScriptExtensions for Unicode 11 (452c85e). - [xmlWriter] Added context manager to XMLWriter class to autoclose file descriptor on exit (#1290). - [psCharStrings] Optimize the charstring's bytecode by encoding as integers all float values that have no decimal portion (8d7774a). - [ttFont] Fixed missing import of ``TTLibError`` exception (#1285). - [feaLib] Allow any languages other than ``dflt`` under ``DFLT`` script (#1278, #1292). 3.28.0 (released 2018-06-19) ---------------------------- - [featureVars] Added experimental module to build ``FeatureVariations`` tables. Still needs to be hooked up to ``varLib.build`` (#1240). - [fixedTools] Added ``otRound`` to round floats to nearest integer towards positive Infinity. This is now used where we deal with visual data like X/Y coordinates, advance widths/heights, variation deltas, and similar (#1274, #1248). - [subset] Improved GSUB closure memoize algorithm. - [varLib.models] Fixed regression in model resolution (180124, #1269). - [feaLib.ast] Fixed error when converting ``SubtableStatement`` to string (#1275). - [varLib.mutator] Set ``OS/2.usWeightClass`` and ``usWidthClass``, and ``post.italicAngle`` based on the 'wght', 'wdth' and 'slnt' axis values (#1276, #1264). - [py23/loggingTools] Don't automatically set ``logging.lastResort`` handler on py27. Moved ``LastResortLogger`` to the ``loggingTools`` module (#1277). 3.27.1 (released 2018-06-11) ---------------------------- - [ttGlyphPen] Issue a warning and skip building non-existing components (https://github.com/googlei18n/fontmake/issues/411). - [tests] Fixed issue running ttx_test.py from a tagged commit. 3.27.0 (released 2018-06-11) ---------------------------- - [designspaceLib] Added new ``conditionSet`` element to ``rule`` element in designspace document. Bumped ``format`` attribute to ``4.0`` (previously, it was formatted as an integer). Removed ``checkDefault``, ``checkAxes`` methods, and any kind of guessing about the axes when the ``<axes>`` element is missing. The default master is expected at the intersection of all default values for each axis (#1254, #1255, #1267). - [cffLib] Fixed issues when compiling CFF2 or converting from CFF when the font has an FDArray (#1211, #1271). - [varLib] Avoid attempting to build ``cvar`` table when ``glyf`` table is not present, as is the case for CFF2 fonts. - [subset] Handle None coverages in MarkGlyphSets; revert commit 02616ab that sets empty Coverage tables in MarkGlyphSets to None, to make OTS happy. - [ttFont] Allow to build glyph order from ``maxp.numGlyphs`` when ``post`` or ``cmap`` are missing. - [ttFont] Added ``__len__`` method to ``_TTGlyphSet``. - [glyf] Ensure ``GlyphCoordinates`` never overflow signed shorts (#1230). - [py23] Added alias for ``itertools.izip`` shadowing the built-in ``zip``. - [loggingTools] Memoize ``log`` property of ``LogMixin`` class (fbab12). - [ttx] Impoved test coverage (#1261). - [Snippets] Addded script to append a suffix to all family names in a font. - [varLib.plot] Make it work with matplotlib >= 2.1 (b38e2b). 3.26.0 (released 2018-05-03) ---------------------------- - [designspace] Added a new optional ``layer`` attribute to the source element, and a corresponding ``layerName`` attribute to the ``SourceDescriptor`` object (#1253). Added ``conditionset`` element to the ``rule`` element to the spec, but not implemented in designspace reader/writer yet (#1254). - [varLib.models] Refine modeling one last time (0ecf5c5). - [otBase] Fixed sharing of tables referred to by different offset sizes (795f2f9). - [subset] Don't drop a GDEF that only has VarStore (fc819d6). Set to None empty Coverage tables in MarkGlyphSets (02616ab). - [varLib]: Added ``--master-finder`` command-line option (#1249). - [varLib.mutator] Prune fvar nameIDs from instance's name table (#1245). - [otTables] Allow decompiling bad ClassDef tables with invalid format, with warning (#1236). - [varLib] Make STAT v1.2 and reuse nameIDs from fvar table (#1242). - [varLib.plot] Show master locations. Set axis limits to -1, +1. - [subset] Handle HVAR direct mapping. Passthrough 'cvar'. Added ``--font-number`` command-line option for collections. - [t1Lib] Allow a text encoding to be specified when parsing a Type 1 font (#1234). Added ``kind`` argument to T1Font constructor (c5c161c). - [ttLib] Added context manager API to ``TTFont`` class, so it can be used in ``with`` statements to auto-close the file when exiting the context (#1232). 3.25.0 (released 2018-04-03) ---------------------------- - [varLib] Improved support-resolution algorithm. Previously, the on-axis masters would always cut the space. They don't anymore. That's more consistent, and fixes the main issue Erik showed at TYPO Labs 2017. Any varfont built that had an unusual master configuration will change when rebuilt (42bef17, a523a697, https://github.com/googlei18n/fontmake/issues/264). - [varLib.models] Added a ``main()`` entry point, that takes positions and prints model results. - [varLib.plot] Added new module to plot a designspace's VariationModel. Requires ``matplotlib``. - [varLib.mutator] Added -o option to specify output file path (2ef60fa). - [otTables] Fixed IndexError while pruning of HVAR pre-write (6b6c34a). - [varLib.models] Convert delta array to floats if values overflows signed short integer (0055f94). 3.24.2 (released 2018-03-26) ---------------------------- - [otBase] Don't fail during ``ValueRecord`` copy if src has more items. We drop hinting in the subsetter by simply changing ValueFormat, without cleaning up the actual ValueRecords. This was causing assertion error if a variable font was subsetted without hinting and then passed directly to the mutator for instantiation without first it saving to disk. 3.24.1 (released 2018-03-06) ---------------------------- - [varLib] Don't remap the same ``DeviceTable`` twice in VarStore optimizer (#1206). - [varLib] Add ``--disable-iup`` option to ``fonttools varLib`` script, and a ``optimize=True`` keyword argument to ``varLib.build`` function, to optionally disable IUP optimization while building varfonts. - [ttCollection] Fixed issue while decompiling ttc with python3 (#1207). 3.24.0 (released 2018-03-01) ---------------------------- - [ttGlyphPen] Decompose composite glyphs if any components' transform is too large to fit a ``F2Dot14`` value, or clamp transform values that are (almost) equal to +2.0 to make them fit and avoid decomposing (#1200, #1204, #1205). - [ttx] Added new ``-g`` option to dump glyphs from the ``glyf`` table splitted as individual ttx files (#153, #1035, #1132, #1202). - Copied ``ufoLib.filenames`` module to ``fontTools.misc.filenames``, used for the ttx split-glyphs option (#1202). - [feaLib] Added support for ``cvParameters`` blocks in Character Variant feautures ``cv01-cv99`` (#860, #1169). - [Snippets] Added ``checksum.py`` script to generate/check SHA1 hash of ttx files (#1197). - [varLib.mutator] Fixed issue while instantiating some variable fonts whereby the horizontal advance width computed from ``gvar`` phantom points could turn up to be negative (#1198). - [varLib/subset] Fixed issue with subsetting GPOS variation data not picking up ``ValueRecord`` ``Device`` objects (54fd71f). - [feaLib/voltLib] In all AST elements, the ``location`` is no longer a required positional argument, but an optional kewyord argument (defaults to ``None``). This will make it easier to construct feature AST from code (#1201). 3.23.0 (released 2018-02-26) ---------------------------- - [designspaceLib] Added an optional ``lib`` element to the designspace as a whole, as well as to the instance elements, to store arbitrary data in a property list dictionary, similar to the UFO's ``lib``. Added an optional ``font`` attribute to the ``SourceDescriptor``, to allow operating on in-memory font objects (#1175). - [cffLib] Fixed issue with lazy-loading of attributes when attempting to set the CFF TopDict.Encoding (#1177, #1187). - [ttx] Fixed regression introduced in 3.22.0 that affected the split tables ``-s`` option (#1188). - [feaLib] Added ``IncludedFeaNotFound`` custom exception subclass, raised when an included feature file cannot be found (#1186). - [otTables] Changed ``VarIdxMap`` to use glyph names internally instead of glyph indexes. The old ttx dumps of HVAR/VVAR tables that contain indexes can still be imported (21cbab8, 38a0ffb). - [varLib] Implemented VarStore optimizer (#1184). - [subset] Implemented pruning of GDEF VarStore, HVAR and MVAR (#1179). - [sfnt] Restore backward compatiblity with ``numFonts`` attribute of ``SFNTReader`` object (#1181). - [merge] Initial support for merging ``LangSysRecords`` (#1180). - [ttCollection] don't seek(0) when writing to possibly unseekable strems. - [subset] Keep all ``--name-IDs`` from 0 to 6 by default (#1170, #605, #114). - [cffLib] Added ``width`` module to calculate optimal CFF default and nominal glyph widths. - [varLib] Don’t fail if STAT already in the master fonts (#1166). 3.22.0 (released 2018-02-04) ---------------------------- - [subset] Support subsetting ``endchar`` acting as ``seac``-like components in ``CFF`` (fixes #1162). - [feaLib] Allow to build from pre-parsed ``ast.FeatureFile`` object. Added ``tables`` argument to only build some tables instead of all (#1159, #1163). - [textTools] Replaced ``safeEval`` with ``ast.literal_eval`` (#1139). - [feaLib] Added option to the parser to not resolve ``include`` statements (#1154). - [ttLib] Added new ``ttCollection`` module to read/write TrueType and OpenType Collections. Exports a ``TTCollection`` class with a ``fonts`` attribute containing a list of ``TTFont`` instances, the methods ``save`` and ``saveXML``, plus some list-like methods. The ``importXML`` method is not implemented yet (#17). - [unicodeadata] Added ``ot_tag_to_script`` function that converts from OpenType script tag to Unicode script code. - Added new ``designspaceLib`` subpackage, originally from Erik Van Blokland's ``designSpaceDocument``: https://github.com/LettError/designSpaceDocument NOTE: this is not yet used internally by varLib, and the API may be subject to changes (#911, #1110, LettError/designSpaceDocument#28). - Added new FontTools icon images (8ee7c32). - [unicodedata] Added ``script_horizontal_direction`` function that returns either "LTR" or "RTL" given a unicode script code. - [otConverters] Don't write descriptive name string as XML comment if the NameID value is 0 (== NULL) (#1151, #1152). - [unicodedata] Add ``ot_tags_from_script`` function to get the list of OpenType script tags associated with unicode script code (#1150). - [feaLib] Don't error when "enumerated" kern pairs conflict with preceding single pairs; emit warning and chose the first value (#1147, #1148). - [loggingTools] In ``CapturingLogHandler.assertRegex`` method, match the fully formatted log message. - [sbix] Fixed TypeError when concatenating str and bytes (#1154). - [bezierTools] Implemented cusp support and removed ``approximate_fallback`` arg in ``calcQuadraticArcLength``. Added ``calcCubicArcLength`` (#1142). 3.21.2 (released 2018-01-08) ---------------------------- - [varLib] Fixed merging PairPos Format1/2 with missing subtables (#1125). 3.21.1 (released 2018-01-03) ---------------------------- - [feaLib] Allow mixed single/multiple substitutions (#612) - Added missing ``*.afm`` test assets to MAINFEST.in (#1137). - Fixed dumping ``SVG`` tables containing color palettes (#1124). 3.21.0 (released 2017-12-18) ---------------------------- - [cmap] when compiling format6 subtable, don't assume gid0 is always called '.notdef' (1e42224). - [ot] Allow decompiling fonts with bad Coverage format number (1aafae8). - Change FontTools licence to MIT (#1127). - [post] Prune extra names already in standard Mac set (df1e8c7). - [subset] Delete empty SubrsIndex after subsetting (#994, #1118). - [varLib] Don't share points in cvar by default, as it currently fails on some browsers (#1113). - [afmLib] Make poor old afmLib work on python3. 3.20.1 (released 2017-11-22) ---------------------------- - [unicodedata] Fixed issue with ``script`` and ``script_extension`` functions returning inconsistent short vs long names. They both return the short four- letter script codes now. Added ``script_name`` and ``script_code`` functions to look up the long human-readable script name from the script code, and viceversa (#1109, #1111). 3.20.0 (released 2017-11-21) ---------------------------- - [unicodedata] Addded new module ``fontTools.unicodedata`` which exports the same interface as the built-in ``unicodedata`` module, with the addition of a few functions that are missing from the latter, such as ``script``, ``script_extension`` and ``block``. Added a ``MetaTools/buildUCD.py`` script to download and parse data files from the Unicode Character Database and generate python modules containing lists of ranges and property values. - [feaLib] Added ``__str__`` method to all ``ast`` elements (delegates to the ``asFea`` method). - [feaLib] ``Parser`` constructor now accepts a ``glyphNames`` iterable instead of ``glyphMap`` dict. The latter still works but with a pending deprecation warning (#1104). - [bezierTools] Added arc length calculation functions originally from ``pens.perimeterPen`` module (#1101). - [varLib] Started generating STAT table (8af4309). Right now it just reflects the axes, and even that with certain limitations: * AxisOrdering is set to the order axes are defined, * Name-table entries are not shared with fvar. - [py23] Added backports for ``redirect_stdout`` and ``redirect_stderr`` context managers (#1097). - [Graphite] Fixed some round-trip bugs (#1093). 3.19.0 (released 2017-11-06) ---------------------------- - [varLib] Try set of used points instead of all points when testing whether to share points between tuples (#1090). - [CFF2] Fixed issue with reading/writing PrivateDict BlueValues to TTX file. Read the commit message 8b02b5a and issue #1030 for more details. NOTE: this change invalidates all the TTX files containing CFF2 tables that where dumped with previous verisons of fonttools. CFF2 Subr items can have values on the stack after the last operator, thus a ``CFF2Subr`` class was added to accommodate this (#1091). - [_k_e_r_n] Fixed compilation of AAT kern version=1.0 tables (#1089, #1094) - [ttLib] Added getBestCmap() convenience method to TTFont class and cmap table class that returns a preferred Unicode cmap subtable given a list of options (#1092). - [morx] Emit more meaningful subtable flags. Implement InsertionMorphAction 3.18.0 (released 2017-10-30) ---------------------------- - [feaLib] Fixed writing back nested glyph classes (#1086). - [TupleVariation] Reactivated shared points logic, bugfixes (#1009). - [AAT] Implemented ``morx`` ligature subtables (#1082). - [reverseContourPen] Keep duplicate lineTo following a moveTo (#1080, https://github.com/googlei18n/cu2qu/issues/51). - [varLib.mutator] Suport instantiation of GPOS, GDEF and MVAR (#1079). - [sstruct] Fixed issue with ``unicode_literals`` and ``struct`` module in old versions of python 2.7 (#993). 3.17.0 (released 2017-10-16) ---------------------------- - [svgPathPen] Added an ``SVGPathPen`` that translates segment pen commands into SVG path descriptions. Copied from Tal Leming's ``ufo2svg.svgPathPen`` https://github.com/typesupply/ufo2svg/blob/d69f992/Lib/ufo2svg/svgPathPen.py - [reverseContourPen] Added ``ReverseContourPen``, a filter pen that draws contours with the winding direction reversed, while keeping the starting point (#1071). - [filterPen] Added ``ContourFilterPen`` to manipulate contours as a whole rather than segment by segment. - [arrayTools] Added ``Vector`` class to apply math operations on an array of numbers, and ``pairwise`` function to loop over pairs of items in an iterable. - [varLib] Added support for building and interpolation of ``cvar`` table (f874cf6, a25a401). 3.16.0 (released 2017-10-03) ---------------------------- - [head] Try using ``SOURCE_DATE_EPOCH`` environment variable when setting the ``head`` modified timestamp to ensure reproducible builds (#1063). See https://reproducible-builds.org/specs/source-date-epoch/ - [VTT] Decode VTT's ``TSI*`` tables text as UTF-8 (#1060). - Added support for Graphite font tables: Feat, Glat, Gloc, Silf and Sill. Thanks @mhosken! (#1054). - [varLib] Default to using axis "name" attribute if "labelname" element is missing (588f524). - [merge] Added support for merging Script records. Remove unused features and lookups after merge (d802580, 556508b). - Added ``fontTools.svgLib`` package. Includes a parser for SVG Paths that supports the Pen protocol (#1051). Also, added a snippet to convert SVG outlines to UFO GLIF (#1053). - [AAT] Added support for ``ankr``, ``bsln``, ``mort``, ``morx``, ``gcid``, and ``cidg``. - [subset] Implemented subsetting of ``prop``, ``opbd``, ``bsln``, ``lcar``. 3.15.1 (released 2017-08-18) ---------------------------- - [otConverters] Implemented ``__add__`` and ``__radd__`` methods on ``otConverters._LazyList`` that decompile a lazy list before adding it to another list or ``_LazyList`` instance. Fixes an ``AttributeError`` in the ``subset`` module when attempting to sum ``_LazyList`` objects (6ef48bd2, 1aef1683). - [AAT] Support the `opbd` table with optical bounds (a47f6588). - [AAT] Support `prop` table with glyph properties (d05617b4). 3.15.0 (released 2017-08-17) ---------------------------- - [AAT] Added support for AAT lookups. The ``lcar`` table can be decompiled and recompiled; futher work needed to handle ``morx`` table (#1025). - [subset] Keep (empty) DefaultLangSys for Script 'DFLT' (6eb807b5). - [subset] Support GSUB/GPOS.FeatureVariations (fe01d87b). - [varLib] In ``models.supportScalars``, ignore an axis when its peak value is 0 (fixes #1020). - [varLib] Add default mappings to all axes in avar to fix rendering issue in some rasterizers (19c4b377, 04eacf13). - [varLib] Flatten multiple tail PairPosFormat2 subtables before merging (c55ef525). - [ttLib] Added support for recalculating font bounding box in ``CFF`` and ``head`` tables, and min/max values in ``hhea`` and ``vhea`` tables (#970). 3.14.0 (released 2017-07-31) ---------------------------- - [varLib.merger] Remove Extensions subtables before merging (f7c20cf8). - [varLib] Initialize the avar segment map with required default entries (#1014). - [varLib] Implemented optimal IUP optmiziation (#1019). - [otData] Add ``AxisValueFormat4`` for STAT table v1.2 from OT v1.8.2 (#1015). - [name] Fixed BCP46 language tag for Mac langID=9: 'si' -> 'sl'. - [subset] Return value from ``_DehintingT2Decompiler.op_hintmask`` (c0d672ba). - [cffLib] Allow to get TopDict by index as well as by name (dca96c9c). - [cffLib] Removed global ``isCFF2`` state; use one set of classes for both CFF and CFF2, maintaining backward compatibility existing code (#1007). - [cffLib] Deprecated maxstack operator, per OpenType spec update 1.8.1. - [cffLib] Added missing default (-100) for UnderlinePosition (#983). - [feaLib] Enable setting nameIDs greater than 255 (#1003). - [varLib] Recalculate ValueFormat when merging SinglePos (#996). - [varLib] Do not emit MVAR if there are no entries in the variation store (#987). - [ttx] For ``-x`` option, pad with space if table tag length is < 4. 3.13.1 (released 2017-05-30) ---------------------------- - [feaLib.builder] Removed duplicate lookups optimization. The original lookup order and semantics of the feature file are preserved (#976). 3.13.0 (released 2017-05-24) ---------------------------- - [varLib.mutator] Implement IUP optimization (#969). - [_g_l_y_f.GlyphCoordinates] Changed ``__bool__()`` semantics to match those of other iterables (e46f949). Removed ``__abs__()`` (3db5be2). - [varLib.interpolate_layout] Added ``mapped`` keyword argument to ``interpolate_layout`` to allow disabling avar mapping: if False (default), the location is mapped using the map element of the axes in designspace file; if True, it is assumed that location is in designspace's internal space and no mapping is performed (#950, #975). - [varLib.interpolate_layout] Import designspace-loading logic from varLib. - [varLib] Fixed bug with recombining PairPosClass2 subtables (81498e5, #914). - [cffLib.specializer] When copying iterables, cast to list (462b7f86). 3.12.1 (released 2017-05-18) ---------------------------- - [pens.t2CharStringPen] Fixed AttributeError when calling addComponent in T2CharStringPen (#965). 3.12.0 (released 2017-05-17) ---------------------------- - [cffLib.specializer] Added new ``specializer`` module to optimize CFF charstrings, used by the T2CharStringPen (#948). - [varLib.mutator] Sort glyphs by component depth before calculating composite glyphs' bounding boxes to ensure deltas are correctly caclulated (#945). - [_g_l_y_f] Fixed loss of precision in GlyphCoordinates by using 'd' (double) instead of 'f' (float) as ``array.array`` typecode (#963, #964). 3.11.0 (released 2017-05-03) ---------------------------- - [t2CharStringPen] Initial support for specialized Type2 path operators: vmoveto, hmoveto, vlineto, hlineto, vvcurveto, hhcurveto, vhcurveto and hvcurveto. This should produce more compact charstrings (#940, #403). - [Doc] Added Sphinx sources for the documentation. Thanks @gferreira (#935). - [fvar] Expose flags in XML (#932) - [name] Add helper function for building multi-lingual names (#921) - [varLib] Fixed kern merging when a PairPosFormat2 has ClassDef1 with glyphs that are NOT present in the Coverage (1b5e1c4, #939). - [varLib] Fixed non-deterministic ClassDef order with PY3 (f056c12, #927). - [feLib] Throw an error when the same glyph is defined in multiple mark classes within the same lookup (3e3ff00, #453). 3.10.0 (released 2017-04-14) ---------------------------- - [varLib] Added support for building ``avar`` table, using the designspace ``<map>`` elements. - [varLib] Removed unused ``build(..., axisMap)`` argument. Axis map should be specified in designspace file now. We do not accept nonstandard axes if ``<axes>`` element is not present. - [varLib] Removed "custom" axis from the ``standard_axis_map``. This was added before when glyphsLib was always exporting the (unused) custom axis. - [varLib] Added partial support for building ``MVAR`` table; does not implement ``gasp`` table variations yet. - [pens] Added FilterPen base class, for pens that control another pen; factored out ``addComponent`` method from BasePen into a separate abstract DecomposingPen class; added DecomposingRecordingPen, which records components decomposed as regular contours. - [TSI1] Fixed computation of the textLength of VTT private tables (#913). - [loggingTools] Added ``LogMixin`` class providing a ``log`` property to subclasses, which returns a ``logging.Logger`` named after the latter. - [loggingTools] Added ``assertRegex`` method to ``CapturingLogHandler``. - [py23] Added backport for python 3's ``types.SimpleNamespace`` class. - [EBLC] Fixed issue with python 3 ``zip`` iterator. 3.9.2 (released 2017-04-08) --------------------------- - [pens] Added pen to draw glyphs using WxPython ``GraphicsPath`` class: https://wxpython.org/docs/api/wx.GraphicsPath-class.html - [varLib.merger] Fixed issue with recombining multiple PairPosFormat2 subtables (#888) - [varLib] Do not encode gvar deltas that are all zeroes, or if all values are smaller than tolerance. - [ttLib] _TTGlyphSet glyphs now also have ``height`` and ``tsb`` (top side bearing) attributes from the ``vmtx`` table, if present. - [glyf] In ``GlyphCoordintes`` class, added ``__bool__`` / ``__nonzero__`` methods, and ``array`` property to get raw array. - [ttx] Support reading TTX files with BOM (#896) - [CFF2] Fixed the reporting of the number of regions in the font. 3.9.1 (released 2017-03-20) --------------------------- - [varLib.merger] Fixed issue while recombining multiple PairPosFormat2 subtables if they were split because of offset overflows (9798c30). - [varLib.merger] Only merge multiple PairPosFormat1 subtables if there is at least one of the fonts with a non-empty Format1 subtable (0f5a46b). - [varLib.merger] Fixed IndexError with empty ClassDef1 in PairPosFormat2 (aad0d46). - [varLib.merger] Avoid reusing Class2Record (mutable) objects (e6125b3). - [varLib.merger] Calculate ClassDef1 and ClassDef2's Format when merging PairPosFormat2 (23511fd). - [macUtils] Added missing ttLib import (b05f203). 3.9.0 (released 2017-03-13) --------------------------- - [feaLib] Added (partial) support for parsing feature file comments ``# ...`` appearing in between statements (#879). - [feaLib] Cleaned up syntax tree for FeatureNames. - [ttLib] Added support for reading/writing ``CFF2`` table (thanks to @readroberts at Adobe), and ``TTFA`` (ttfautohint) table. - [varLib] Fixed regression introduced with 3.8.0 in the calculation of ``NumShorts``, i.e. the number of deltas in ItemVariationData's delta sets that use a 16-bit representation (b2825ff). 3.8.0 (released 2017-03-05) --------------------------- - New pens: MomentsPen, StatisticsPen, RecordingPen, and TeePen. - [misc] Added new ``fontTools.misc.symfont`` module, for symbolic font statistical analysis; requires ``sympy`` (http://www.sympy.org/en/index.html) - [varLib] Added experimental ``fontTools.varLib.interpolatable`` module for finding wrong contour order between different masters - [varLib] designspace.load() now returns a dictionary, instead of a tuple, and supports <axes> element (#864); the 'masters' item was renamed 'sources', like the <sources> element in the designspace document - [ttLib] Fixed issue with recalculating ``head`` modified timestamp when saving CFF fonts - [ttLib] In TupleVariation, round deltas before compiling (#861, fixed #592) - [feaLib] Ignore duplicate glyphs in classes used as MarkFilteringSet and MarkAttachmentType (#863) - [merge] Changed the ``gasp`` table merge logic so that only the one from the first font is retained, similar to other hinting tables (#862) - [Tests] Added tests for the ``varLib`` package, as well as test fonts from the "Annotated OpenType Specification" (AOTS) to exercise ``ttLib``'s table readers/writers (<https://github.com/adobe-type-tools/aots>) 3.7.2 (released 2017-02-17) --------------------------- - [subset] Keep advance widths when stripping ".notdef" glyph outline in CID-keyed CFF fonts (#845) - [feaLib] Zero values now produce the same results as makeotf (#633, #848) - [feaLib] More compact encoding for “Contextual positioning with in-line single positioning rules” (#514) 3.7.1 (released 2017-02-15) --------------------------- - [subset] Fixed issue with ``--no-hinting`` option whereby advance widths in Type 2 charstrings were also being stripped (#709, #343) - [feaLib] include statements now resolve relative paths like makeotf (#838) - [feaLib] table ``name`` now handles Unicode codepoints beyond the Basic Multilingual Plane, also supports old-style MacOS platform encodings (#842) - [feaLib] correctly escape string literals when emitting feature syntax (#780) 3.7.0 (released 2017-02-11) --------------------------- - [ttx, mtiLib] Preserve ordering of glyph alternates in GSUB type 3 (#833). - [feaLib] Glyph names can have dashes, as per new AFDKO syntax v1.20 (#559). - [feaLib] feaLib.Parser now needs the font's glyph map for parsing. - [varLib] Fix regression where GPOS values were stored as 0. - [varLib] Allow merging of class-based kerning when ClassDefs are different 3.6.3 (released 2017-02-06) --------------------------- - [varLib] Fix building variation of PairPosFormat2 (b5c34ce). - Populate defaults even for otTables that have postRead (e45297b). - Fix compiling of MultipleSubstFormat1 with zero 'out' glyphs (b887860). 3.6.2 (released 2017-01-30) --------------------------- - [varLib.merger] Fixed "TypeError: reduce() of empty sequence with no initial value" (3717dc6). 3.6.1 (released 2017-01-28) --------------------------- - [py23] Fixed unhandled exception occurring at interpreter shutdown in the "last resort" logging handler (972b3e6). - [agl] Ensure all glyph names are of native 'str' type; avoid mixing 'str' and 'unicode' in TTFont.glyphOrder (d8c4058). - Fixed inconsistent title levels in README.rst that caused PyPI to incorrectly render the reStructuredText page. 3.6.0 (released 2017-01-26) --------------------------- - [varLib] Refactored and improved the variation-font-building process. - Assembly code in the fpgm, prep, and glyf tables is now indented in XML output for improved readability. The ``instruction`` element is written as a simple tag if empty (#819). - [ttx] Fixed 'I/O operation on closed file' error when dumping multiple TTXs to standard output with the '-o -' option. - The unit test modules (``*_test.py``) have been moved outside of the fontTools package to the Tests folder, thus they are no longer installed (#811). 3.5.0 (released 2017-01-14) --------------------------- - Font tables read from XML can now be written back to XML with no loss. - GSUB/GPOS LookupType is written out in XML as an element, not comment. (#792) - When parsing cmap table, do not store items mapped to glyph id 0. (#790) - [otlLib] Make ClassDef sorting deterministic. Fixes #766 (7d1ddb2) - [mtiLib] Added unit tests (#787) - [cvar] Implemented cvar table - [gvar] Renamed GlyphVariation to TupleVariation to match OpenType terminology. - [otTables] Handle gracefully empty VarData.Item array when compiling XML. (#797) - [varLib] Re-enabled generation of ``HVAR`` table for fonts with TrueType outlines; removed ``--build-HVAR`` command-line option. - [feaLib] The parser can now be extended to support non-standard statements in FEA code by using a customized Abstract Syntax Tree. See, for example, ``feaLib.builder_test.test_extensions`` and baseClass.feax (#794, fixes #773). - [feaLib] Added ``feaLib`` command to the 'fonttools' command-line tool; applies a feature file to a font. ``fonttools feaLib -h`` for help. - [pens] The ``T2CharStringPen`` now takes an optional ``roundTolerance`` argument to control the rounding of coordinates (#804, fixes #769). - [ci] Measure test coverage on all supported python versions and OSes, combine coverage data and upload to https://codecov.io/gh/fonttools/fonttools (#786) - [ci] Configured Travis and Appveyor for running tests on Python 3.6 (#785, 55c03bc) - The manual pages installation directory can be customized through ``FONTTOOLS_MANPATH`` environment variable (#799, fixes #84). - [Snippets] Added otf2ttf.py, for converting fonts from CFF to TrueType using the googlei18n/cu2qu module (#802) 3.4.0 (released 2016-12-21) --------------------------- - [feaLib] Added support for generating FEA text from abstract syntax tree (AST) objects (#776). Thanks @mhosken - Added ``agl.toUnicode`` function to convert AGL-compliant glyph names to Unicode strings (#774) - Implemented MVAR table (b4d5381) 3.3.1 (released 2016-12-15) --------------------------- - [setup] We no longer use versioneer.py to compute fonttools version from git metadata, as this has caused issues for some users (#767). Now we bump the version strings manually with a custom ``release`` command of setup.py script. 3.3.0 (released 2016-12-06) --------------------------- - [ttLib] Implemented STAT table from OpenType 1.8 (#758) - [cffLib] Fixed decompilation of CFF fonts containing non-standard key/value pairs in FontDict (issue #740; PR #744) - [py23] minor: in ``round3`` function, allow the second argument to be ``None`` (#757) - The standalone ``sstruct`` and ``xmlWriter`` modules, deprecated since vesion 3.2.0, have been removed. They can be imported from the ``fontTools.misc`` package. 3.2.3 (released 2016-12-02) --------------------------- - [py23] optimized performance of round3 function; added backport for py35 math.isclose() (9d8dacb) - [subset] fixed issue with 'narrow' (UCS-2) Python 2 builds and ``--text``/``--text-file`` options containing non-BMP chararcters (16d0e5e) - [varLib] fixed issuewhen normalizing location values (8fa2ee1, #749) - [inspect] Made it compatible with both python2 and python3 (167ee60, #748). Thanks @pnemade 3.2.2 (released 2016-11-24) --------------------------- - [varLib] Do not emit null axes in fvar (1bebcec). Thanks @robmck-ms - [varLib] Handle fonts without GPOS (7915a45) - [merge] Ignore LangSys if None (a11bc56) - [subset] Fix subsetting MathVariants (78d3cbe) - [OS/2] Fix "Private Use (plane 15)" range (08a0d55). Thanks @mashabow 3.2.1 (released 2016-11-03) --------------------------- - [OS/2] fix checking ``fsSelection`` bits matching ``head.macStyle`` bits - [varLib] added ``--build-HVAR`` option to generate ``HVAR`` table for fonts with TrueType outlines. For ``CFF2``, it is enabled by default. 3.2.0 (released 2016-11-02) --------------------------- - [varLib] Improve support for OpenType 1.8 Variable Fonts: - Implement GDEF's VariationStore - Implement HVAR/VVAR tables - Partial support for loading MutatorMath .designspace files with varLib.designspace module - Add varLib.models with Variation fonts interpolation models - Implement GSUB/GPOS FeatureVariations - Initial support for interpolating and merging OpenType Layout tables (see ``varLib.interpolate_layout`` and ``varLib.merger`` modules) - [API change] Change version to be an integer instead of a float in XML output for GSUB, GPOS, GDEF, MATH, BASE, JSTF, HVAR, VVAR, feat, hhea and vhea tables. Scripts that set the Version for those to 1.0 or other float values also need fixing. A warning is emitted when code or XML needs fix. - several bug fixes to the cffLib module, contributed by Adobe's @readroberts - The XML output for CFF table now has a 'major' and 'minor' elements for specifying whether it's version 1.0 or 2.0 (support for CFF2 is coming soon) - [setup.py] remove undocumented/deprecated ``extra_path`` Distutils argument. This means that we no longer create a "FontTools" subfolder in site-packages containing the actual fontTools package, as well as the standalone xmlWriter and sstruct modules. The latter modules are also deprecated, and scheduled for removal in upcoming releases. Please change your import statements to point to from fontTools.misc import xmlWriter and from fontTools.misc import sstruct. - [scripts] Add a 'fonttools' command-line tool that simply runs ``fontTools.*`` sub-modules: e.g. ``fonttools ttx``, ``fonttools subset``, etc. - [hmtx/vmts] Read advance width/heights as unsigned short (uint16); automatically round float values to integers. - [ttLib/xmlWriter] add 'newlinestr=None' keyword argument to ``TTFont.saveXML`` for overriding os-specific line endings (passed on to ``XMLWriter`` instances). - [versioning] Use versioneer instead of ``setuptools_scm`` to dynamically load version info from a git checkout at import time. - [feaLib] Support backslash-prefixed glyph names. 3.1.2 (released 2016-09-27) --------------------------- - restore Makefile as an alternative way to build/check/install - README.md: update instructions for installing package from source, and for running test suite - NEWS: Change log was out of sync with tagged release 3.1.1 (released 2016-09-27) --------------------------- - Fix ``ttLibVersion`` attribute in TTX files still showing '3.0' instead of '3.1'. - Use ``setuptools_scm`` to manage package versions. 3.1.0 (released 2016-09-26) --------------------------- - [feaLib] New library to parse and compile Adobe FDK OpenType Feature files. - [mtiLib] New library to parse and compile Monotype 'FontDame' OpenType Layout Tables files. - [voltLib] New library to parse Microsoft VOLT project files. - [otlLib] New library to work with OpenType Layout tables. - [varLib] New library to work with OpenType Font Variations. - [pens] Add ttGlyphPen to draw to TrueType glyphs, and t2CharStringPen to draw to Type 2 Charstrings (CFF); add areaPen and perimeterPen. - [ttLib.tables] Implement 'meta' and 'trak' tables. - [ttx] Add --flavor option for compiling to 'woff' or 'woff2'; add ``--with-zopfli`` option to use Zopfli to compress WOFF 1.0 fonts. - [subset] Support subsetting 'COLR'/'CPAL' and 'CBDT'/'CBLC' color fonts tables, and 'gvar' table for variation fonts. - [Snippets] Add ``symfont.py``, for symbolic font statistics analysis; interpolatable.py, a preliminary script for detecting interpolation errors; ``{merge,dump}_woff_metadata.py``. - [classifyTools] Helpers to classify things into classes. - [CI] Run tests on Windows, Linux and macOS using Appveyor and Travis CI; check unit test coverage with Coverage.py/Coveralls; automatic deployment to PyPI on tags. - [loggingTools] Use Python built-in logging module to print messages. - [py23] Make round() behave like Python 3 built-in round(); define round2() and round3(). 3.0 (released 2015-09-01) ------------------------- - Add Snippet scripts for cmap subtable format conversion, printing GSUB/GPOS features, building a GX font from two masters - TTX WOFF2 support and a ``-f`` option to overwrite output file(s) - Support GX tables: ``avar``, ``gvar``, ``fvar``, ``meta`` - Support ``feat`` and gzip-compressed SVG tables - Upgrade Mac East Asian encodings to native implementation if available - Add Roman Croatian and Romanian encodings, codecs for mac-extended East Asian encodings - Implement optimal GLYF glyph outline packing; disabled by default 2.5 (released 2014-09-24) ------------------------- - Add a Qt pen - Add VDMX table converter - Load all OpenType sub-structures lazily - Add support for cmap format 13. - Add pyftmerge tool - Update to Unicode 6.3.0d3 - Add pyftinspect tool - Add support for Google CBLC/CBDT color bitmaps, standard EBLC/EBDT embedded bitmaps, and ``SVG`` table (thanks to Read Roberts at Adobe) - Add support for loading, saving and ttx'ing WOFF file format - Add support for Microsoft COLR/CPAL layered color glyphs - Support PyPy - Support Jython, by replacing numpy with array/lists modules and removed it, pure-Python StringIO, not cStringIO - Add pyftsubset and Subsetter object, supporting CFF and TTF - Add to ttx args for -q for quiet mode, -z to choose a bitmap dump format 2.4 (released 2013-06-22) ------------------------- - Option to write to arbitrary files - Better dump format for DSIG - Better detection of OTF XML - Fix issue with Apple's kern table format - Fix mangling of TT glyph programs - Fix issues related to mona.ttf - Fix Windows Installer instructions - Fix some modern MacOS issues - Fix minor issues and typos 2.3 (released 2009-11-08) ------------------------- - TrueType Collection (TTC) support - Python 2.6 support - Update Unicode data to 5.2.0 - Couple of bug fixes 2.2 (released 2008-05-18) ------------------------- - ClearType support - cmap format 1 support - PFA font support - Switched from Numeric to numpy - Update Unicode data to 5.1.0 - Update AGLFN data to 1.6 - Many bug fixes 2.1 (released 2008-01-28) ------------------------- - Many years worth of fixes and features 2.0b2 (released 2002-??-??) --------------------------- - Be "forgiving" when interpreting the maxp table version field: interpret any value as 1.0 if it's not 0.5. Fixes dumping of these GPL fonts: http://www.freebsd.org/cgi/pds.cgi?ports/chinese/wangttf - Fixed ttx -l: it turned out this part of the code didn't work with Python 2.2.1 and earlier. My bad to do most of my testing with a different version than I shipped TTX with :-( - Fixed bug in ClassDef format 1 subtable (Andreas Seidel bumped into this one). 2.0b1 (released 2002-09-10) --------------------------- - Fixed embarrassing bug: the master checksum in the head table is now calculated correctly even on little-endian platforms (such as Intel). - Made the cmap format 4 compiler smarter: the binary data it creates is now more or less as compact as possible. TTX now makes more compact data than in any shipping font I've tested it with. - Dump glyph names as a separate "GlyphOrder" pseudo table as opposed to as part of the glyf table (obviously needed for CFF-OTF's). - Added proper support for the CFF table. - Don't barf on empty tables (questionable, but "there are font out there...") - When writing TT glyf data, align glyphs on 4-byte boundaries. This seems to be the current recommendation by MS. Also: don't barf on fonts which are already 4-byte aligned. - Windows installer contributed bu Adam Twardoch! Yay! - Changed the command line interface again, now by creating one new tool replacing the old ones: ttx It dumps and compiles, depending on input file types. The options have changed somewhat. - The -d option is back (output dir) - ttcompile's -i options is now called -m (as in "merge"), to avoid clash with dump's -i. - The -s option ("split tables") no longer creates a directory, but instead outputs a small .ttx file containing references to the individual table files. This is not a true link, it's a simple file name, and the referenced file should be in the same directory so ttcompile can find them. - compile no longer accepts a directory as input argument. Instead it can parse the new "mini-ttx" format as output by "ttx -s". - all arguments are input files - Renamed the command line programs and moved them to the Tools subdirectory. They are now installed by the setup.py install script. - Added OpenType support. BASE, GDEF, GPOS, GSUB and JSTF are (almost) fully supported. The XML output is not yet final, as I'm still considering to output certain subtables in a more human-friendly manner. - Fixed 'kern' table to correctly accept subtables it doesn't know about, as well as interpreting Apple's definition of the 'kern' table headers correctly. - Fixed bug where glyphnames were not calculated from 'cmap' if it was (one of the) first tables to be decompiled. More specifically: it cmap was the first to ask for a glyphID -> glyphName mapping. - Switched XML parsers: use expat instead of xmlproc. Should be faster. - Removed my UnicodeString object: I now require Python 2.0 or up, which has unicode support built in. - Removed assert in glyf table: redundant data at the end of the table is now ignored instead of raising an error. Should become a warning. - Fixed bug in hmtx/vmtx code that only occured if all advances were equal. - Fixed subtle bug in TT instruction disassembler. - Couple of fixes to the 'post' table. - Updated OS/2 table to latest spec. 1.0b1 (released 2001-08-10) --------------------------- - Reorganized the command line interface for ttDump.py and ttCompile.py, they now behave more like "normal" command line tool, in that they accept multiple input files for batch processing. - ttDump.py and ttCompile.py don't silently override files anymore, but ask before doing so. Can be overridden by -f. - Added -d option to both ttDump.py and ttCompile.py. - Installation is now done with distutils. (Needs work for environments without compilers.) - Updated installation instructions. - Added some workarounds so as to handle certain buggy fonts more gracefully. - Updated Unicode table to Unicode 3.0 (Thanks Antoine!) - Included a Python script by Adam Twardoch that adds some useful stuff to the Windows registry. - Moved the project to SourceForge. 1.0a6 (released 2000-03-15) --------------------------- - Big reorganization: made ttLib a subpackage of the new fontTools package, changed several module names. Called the entire suite "FontTools" - Added several submodules to fontTools, some new, some older. - Added experimental CFF/GPOS/GSUB support to ttLib, read-only (but XML dumping of GPOS/GSUB is for now disabled) - Fixed hdmx endian bug - Added -b option to ttCompile.py, it disables recalculation of bounding boxes, as requested by Werner Lemberg. - Renamed tt2xml.pt to ttDump.py and xml2tt.py to ttCompile.py - Use ".ttx" as file extension instead of ".xml". - TTX is now the name of the XML-based *format* for TT fonts, and not just an application. 1.0a5 ----- Never released - More tables supported: hdmx, vhea, vmtx 1.0a3 & 1.0a4 ------------- Never released - fixed most portability issues - retracted the "Euro_or_currency" change from 1.0a2: it was nonsense! 1.0a2 (released 1999-05-02) --------------------------- - binary release for MacOS - genenates full FOND resources: including width table, PS font name info and kern table if applicable. - added cmap format 4 support. Extra: dumps Unicode char names as XML comments! - added cmap format 6 support - now accepts true type files starting with "true" (instead of just 0x00010000 and "OTTO") - 'glyf' table support is now complete: I added support for composite scale, xy-scale and two-by-two for the 'glyf' table. For now, component offset scale behaviour defaults to Apple-style. This only affects the (re)calculation of the glyph bounding box. - changed "Euro" to "Euro_or_currency" in the Standard Apple Glyph order list, since we cannot tell from the 'post' table which is meant. I should probably doublecheck with a Unicode encoding if available. (This does not affect the output!) Fixed bugs: - 'hhea' table is now recalculated correctly - fixed wrong assumption about sfnt resource names 1.0a1 (released 1999-04-27) --------------------------- - initial binary release for MacOS PKaZZZ�{K\\ fonttools-4.51.0.dist-info/WHEELWheel-Version: 1.0 Generator: bdist_wheel (0.43.0) Root-Is-Purelib: true Tag: py3-none-any PKaZZZηē�+fonttools-4.51.0.dist-info/entry_points.txt[console_scripts] fonttools = fontTools.__main__:main pyftmerge = fontTools.merge:main pyftsubset = fontTools.subset:main ttx = fontTools.ttx:main PKaZZZ¨�+ (fonttools-4.51.0.dist-info/top_level.txtfontTools PKaZZZ�^���i�i!fonttools-4.51.0.dist-info/RECORDfontTools/__init__.py,sha256=wZP86I1CZDHxRjXYoLVEofzkydDkYtOMUS-EM73r0s0,183 fontTools/__main__.py,sha256=VjkGh1UD-i1zTDA1dXo1uecSs6PxHdGQ5vlCk_mCCYs,925 fontTools/afmLib.py,sha256=1MagIItOzRV4vV5kKPxeDZbPJsfxLB3wdHLFkQvl0uk,13164 fontTools/agl.py,sha256=05bm8Uq45uVWW8nPbP6xbNgmFyxQr8sWhYAiP0VSjnI,112975 fontTools/fontBuilder.py,sha256=ueiX043jDFF99mraL3awsD7JbJROohphrCBssfMaPBU,33489 fontTools/help.py,sha256=xaZTZsaLVQGv0_HGkRdEPiW6D6OwxOLY5tFHpS3aVlk,1027 fontTools/tfmLib.py,sha256=UMbkM73JXRJVS9t2B-BJc13rSjImaWBuzCoehLwHFhs,14270 fontTools/ttx.py,sha256=_Ka2OrU5EPEh1dLwQqC4TccdNbBJdjLVk-jEJnKatOI,16648 fontTools/unicode.py,sha256=ZZ7OMmWvIyV1IL1k6ioTzaRAh3tUvm6gvK7QgFbOIHY,1237 fontTools/cffLib/__init__.py,sha256=ndoZbmobnniaH9dQa4I7sIzKAjt0gpY9Z7PEdM0oM8A,114360 fontTools/cffLib/specializer.py,sha256=5wToOz7X9jIyz3SdvPmpABr5lHHDNNCC1qKUUkKTi8o,30652 fontTools/cffLib/width.py,sha256=pm1LKz28n27GHuGx4U9NYs3lnzfPBUleqbabRPq3VD8,6035 fontTools/colorLib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 fontTools/colorLib/builder.py,sha256=kmO7OuudQQb3fEOS7aLzgTDVjqS9i2xIQmk9p1uBe8A,23008 fontTools/colorLib/errors.py,sha256=CsaviiRxxrpgVX4blm7KCyK8553ljwL44xkJOeC5U7U,41 fontTools/colorLib/geometry.py,sha256=3ScySrR2YDJa7d5K5_xM5Yt1-3NCV-ry8ikYA5VwVbI,5518 fontTools/colorLib/table_builder.py,sha256=ZeltWY6n-YPiJv_hQ1iBXoEFAG70EKxZyScgsMKUFGU,7469 fontTools/colorLib/unbuilder.py,sha256=iW-E5I39WsV82K3NgCO4Cjzwm1WqzGrtypHt8epwbHM,2142 fontTools/config/__init__.py,sha256=Ti5jpozjMqp5qhnrmwNcWI6b9uvHzhZlbWXHTqVZlGI,2643 fontTools/cu2qu/__init__.py,sha256=Cuc7Uglb0nSgaraTxXY5J8bReznH5wApW0uakN7MycY,618 fontTools/cu2qu/__main__.py,sha256=IWCEMB0jcIxofHb_8NvVLmNBVRK9ZU8TQhN6TNAGrfE,83 fontTools/cu2qu/benchmark.py,sha256=M3Dix_peO6d4Nsq4SAiAO2bQGeNnZNordYAOk8hkHkw,1349 fontTools/cu2qu/cli.py,sha256=LcwNAc4rh3ufJEfZZoO6j-XTtAgg9MiQ0acZi16gFXA,6075 fontTools/cu2qu/cu2qu.py,sha256=UIFGlFq9X6Pj_NuaXg7KWIzLyR1jnx7nMCX-hFVG0SQ,16466 fontTools/cu2qu/errors.py,sha256=PyJNMy8lHDtKpfFkc0nkM8F4jNLZAC4lPQCN1Km4bpg,2441 fontTools/cu2qu/ufo.py,sha256=qZR70uWdCia19Ff8GLn5NeItscvvn69DegjDZVF4eNI,11794 fontTools/designspaceLib/__init__.py,sha256=qSce6J0xR0dOffmtODLS75UmxQV3CnseSJojt9T2Ugw,129250 fontTools/designspaceLib/__main__.py,sha256=xhtYXo1T1tsykhQDD0tcconSNYgWL5hoTBORpVDUYrc,103 fontTools/designspaceLib/split.py,sha256=FB1NuvhUO453UXveQZi9oyrW_caoCPM3RADp1rYWkDs,19239 fontTools/designspaceLib/statNames.py,sha256=lDqFxZAKSbpMuLsgbK6XtyHA5lqLyAK0t561wsSWmaM,9069 fontTools/designspaceLib/types.py,sha256=ofK65qXNADqcpl7zI72Pa5s07-cm7G41iEmLVV44-Es,5320 fontTools/encodings/MacRoman.py,sha256=4vEooUDm2gLCG8KIIDhRxm5-A64w7XrhP9cjDRr2Eo0,3576 fontTools/encodings/StandardEncoding.py,sha256=Eo3AGE8FE_p-IVYYuV097KouSsF3UrXoRRN0XyvYbrs,3581 fontTools/encodings/__init__.py,sha256=DJBWmoX_Haau7qlgmvWyfbhSzrX2qL636Rns7CG01pk,75 fontTools/encodings/codecs.py,sha256=u50ruwz9fcRsrUrRGpR17Cr55Ovn1fvCHCKrElVumDE,4721 fontTools/feaLib/__init__.py,sha256=jlIru2ghxvb1HhC5Je2BCXjFJmFQlYKpruorPoz3BvQ,213 fontTools/feaLib/__main__.py,sha256=Df2PA6LXwna98lSXiL7R4as_ZEdWCIk3egSM5w7GpvM,2240 fontTools/feaLib/ast.py,sha256=_27skibzPidJtI5lUFeVjEv5NVaNPbuz4u8oZfMuxMk,73801 fontTools/feaLib/builder.py,sha256=1ND1iQvxHQn5eLU-5bwCq-dpeHiv1JPaQmX15bJS2Gg,69242 fontTools/feaLib/error.py,sha256=Tq2dZUlCOyLfjTr3qibsT2g9t-S_JEf6bKgyNX55oCE,643 fontTools/feaLib/lexer.py,sha256=vKJiI1RVDRmYmdbuXA2NmcAOn8vDJPtiZZ7SfNGdfJ0,11117 fontTools/feaLib/location.py,sha256=JXzHqGV56EHdcq823AwA5oaK05hf_1ySWpScbo3zGC0,234 fontTools/feaLib/lookupDebugInfo.py,sha256=gVRr5-APWfT_a5-25hRuawSVX8fEvXVsOSLWkH91T2w,304 fontTools/feaLib/parser.py,sha256=wbfG_-rqrn2RWMRQMlR3-uaiM9k4_mzCVF-wPLr00rQ,98466 fontTools/feaLib/variableScalar.py,sha256=Aqx6BVUtd-A8566igKQXn_DxS7KBaes9VAkUlvkTb8c,4008 fontTools/merge/__init__.py,sha256=ndfRXVdiSNuVXMLRP3z3wRHMKwQVxtvb5gj2AfZBao4,8249 fontTools/merge/__main__.py,sha256=hDx3gfbUBO83AJKumSEhiV-xqNTJNNgK2uFjazOGTmw,94 fontTools/merge/base.py,sha256=l0G1Px98E9ZdVuFLMUBKWdtr7Jb8JX8vxcjeaDUUnzY,2389 fontTools/merge/cmap.py,sha256=_oCBnZfm5M7ebYRJnOYw5wUEICFmdR6kMUe1w6jsVuM,5545 fontTools/merge/layout.py,sha256=fkMPGPLxEdxohS3scVM4W7LmNthSz-UPyocsffe2KqE,16075 fontTools/merge/options.py,sha256=xko_1-WErcNQkirECzIOOYxSJR_bRtdQYQYOtmgccYI,2501 fontTools/merge/tables.py,sha256=DGefbqrjRAWW9nyBidvf79H-FyeREegqUy1uAj3ni0w,10545 fontTools/merge/unicode.py,sha256=kb1Jrfuoq1KUcVhhSKnflAED_wMZxXDjVwB-CI9k05Y,4273 fontTools/merge/util.py,sha256=BH3bZWNFy-Tsj1cth7aSpGVJ18YXKXqDakPn6Wzku6U,3378 fontTools/misc/__init__.py,sha256=DJBWmoX_Haau7qlgmvWyfbhSzrX2qL636Rns7CG01pk,75 fontTools/misc/arrayTools.py,sha256=jZk__GE-K9VViZE_H-LPPj0smWbKng-yfPE8BfGp8HI,11483 fontTools/misc/bezierTools.py,sha256=eiA4zu-hYd06Ys3bEgRI7VS9GnKuJhgRIPmi_1dSDlY,44731 fontTools/misc/classifyTools.py,sha256=zcg3EM4GOerBW9c063ljaLllgeeZ772EpFZjp9CdgLI,5613 fontTools/misc/cliTools.py,sha256=qCznJMLCQu3ZHQD_4ctUnr3TkfAUdkGl-UuxZUrppy0,1862 fontTools/misc/configTools.py,sha256=YXBE_vL2dMWCnK4oY3vtU15B79q82DtKp7h7XRqJc1Q,11188 fontTools/misc/cython.py,sha256=eyLcL2Bw-SSToYro8f44dkkYRlQfiFbhcza0afS-qHE,682 fontTools/misc/dictTools.py,sha256=VxjarsGJuk_wa3z29FSCtKZNCFfXtMBiNEu0RPAlpDk,2417 fontTools/misc/eexec.py,sha256=GNn2OCRvO1HbbIeDPxk9i0glO7cux_AQaoVMXhBR8y8,3331 fontTools/misc/encodingTools.py,sha256=hCv5PFfnXQJVCZA8Wyn1vr3vzLBbUuEPtGk5CzWM9RY,2073 fontTools/misc/etree.py,sha256=EPldipUNNMvbPimNX7qOUwKkbpJMY4uyElhe-wqKWkM,17079 fontTools/misc/filenames.py,sha256=MMCO3xjk1pcDc-baobcKd8IdoFPt-bcGqu8t8HUGAkI,8223 fontTools/misc/fixedTools.py,sha256=gsotTCOJLyMis13M4_jQJ8-QPob2Gl2TtNJhW6FER1I,7647 fontTools/misc/intTools.py,sha256=l6pjk4UYlXcyLtfC0DdOC5RL6UJ8ihRR0zRiYow5xA8,586 fontTools/misc/loggingTools.py,sha256=2uXks8fEnBjdgJEcxMLvD77-lbOPto3neJ86bMqV_qM,19898 fontTools/misc/macCreatorType.py,sha256=Je9jtqUr7EPbpH3QxlVl3pizoQ-1AOPMBIctHIMTM3k,1593 fontTools/misc/macRes.py,sha256=GT_pnfPw2NCvvOF86nHLAnOtZ6SMHqEuLntaplXzvHM,8579 fontTools/misc/psCharStrings.py,sha256=KAqcehT_iNMSpDEnAHh5aNSeyGJ5cErx9r8tOIECxjE,42212 fontTools/misc/psLib.py,sha256=ioIPm5x3MHkBXF2vzNkC4iVZYobrkWcyvFhmYsjOrPY,12099 fontTools/misc/psOperators.py,sha256=9SLl5PPBulLo0Xxg_dqlJMitNIBdiGKdkXhOWsNSYZE,15700 fontTools/misc/py23.py,sha256=aPVCEUz_deggwLBCeTSsccX6QgJavZqvdVtuhpzrPvA,2238 fontTools/misc/roundTools.py,sha256=1RSXZ0gyi1qW42tz6WSBMJD1FlPdtgqKfWixVN9bd78,3173 fontTools/misc/sstruct.py,sha256=y8EpmoblqYL37Ru9B5mH9MlXTznO5JUw3YUImB5wtA4,6725 fontTools/misc/symfont.py,sha256=SXbpqWEjH8AqVwMlBo5Ozfm1y-IXlmSZkeCrR-rODKE,7069 fontTools/misc/testTools.py,sha256=P0lianKHKQ1re3IrLW5JGfoLgUXdtVJJceaNO5stA3o,6933 fontTools/misc/textTools.py,sha256=pbhr6LVhm3J-0Z4saYnJfxBDzyoiw4BR9pAgwypiOw8,3377 fontTools/misc/timeTools.py,sha256=e9h5pgzL04tBDXmCv_8eRGB4boFV8GKXlS6dq3ggEpw,2234 fontTools/misc/transform.py,sha256=mQs68bQCNozv_79QiMAhXjTdiGYUuhkapcPxT2anuvo,14473 fontTools/misc/treeTools.py,sha256=tLWkwyDHeZUPVOGNnJeD4Pn7x2bQeZetwJKaEAW2J2M,1269 fontTools/misc/vector.py,sha256=6lqZcDjAgHJFQgjzD-ULQ_PrigAMfeZKaBZmAfcC0ig,4062 fontTools/misc/visitor.py,sha256=-iX1ECuI-han4JdcZT1PUjvOlV9nlT6i2pxdv7FzMvk,5266 fontTools/misc/xmlReader.py,sha256=igut4_d13RT4WarliqVvuuPybO1uSXVeoBOeW4j0_e4,6580 fontTools/misc/xmlWriter.py,sha256=CA1c-Ov5vFTF9tT4bGk-f3yBvaX7lVmSdLPYygUqlAE,6046 fontTools/misc/plistlib/__init__.py,sha256=1HfhHPt3As6u2eRSlFfl6XdnXv_ypQImeQdWIw6wK7Y,21113 fontTools/misc/plistlib/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 fontTools/mtiLib/__init__.py,sha256=vPgS5Ko7dE0GJX1aDmXSwLOaBENDUgdAAFvYVdQ4boo,46617 fontTools/mtiLib/__main__.py,sha256=gd8X89jnZOe-752k7uaR1lWoiju-2zIT5Yx35Kl0Xek,94 fontTools/otlLib/__init__.py,sha256=D2leUW-3gsUTOFcJYGC18edBYjIJ804ut4qitJYWsaQ,45 fontTools/otlLib/builder.py,sha256=BFIdTKJYRTx0__yK0yd0eiUQzdPcvRtugR4YCyq9Uf4,119473 fontTools/otlLib/error.py,sha256=cthuhBuOwZYpkTLi5gFPupUxkXkCHe-L_YgkE7N1wCI,335 fontTools/otlLib/maxContextCalc.py,sha256=l3PeR9SrhzCTwMRuRzpGxBZJzIWppk1hUqGv6b3--AE,3187 fontTools/otlLib/optimize/__init__.py,sha256=UUQRpNkHU2RczCRt-Gz7sEiYE9AQq9BHLXZEOyvsnX4,1530 fontTools/otlLib/optimize/__main__.py,sha256=BvP472kA9KxBb9RMyyehPNevAfpmgW9MfdazkUiAO3M,104 fontTools/otlLib/optimize/gpos.py,sha256=NTDLwjo90L4GiqdIdWkBEycQ7VcT7cOxxype73mFz8c,18474 fontTools/pens/__init__.py,sha256=DJBWmoX_Haau7qlgmvWyfbhSzrX2qL636Rns7CG01pk,75 fontTools/pens/areaPen.py,sha256=Y1WkmqzcC4z_bpGAR0IZUKrtHFtxKUQBmr5-64_zCOk,1472 fontTools/pens/basePen.py,sha256=eIGSKrKm6w4LLHuG6XJoQZ3eObtoKV5P6aF4gT4sk7U,17073 fontTools/pens/boundsPen.py,sha256=wE3owOQA8DfhH-zBGC3lJvnVwp-oyIt0KZrEqXbmS9I,3129 fontTools/pens/cairoPen.py,sha256=wuuOJ1qQDSt_K3zscM2nukRyHZTZMwMzzCXCirfq_qQ,592 fontTools/pens/cocoaPen.py,sha256=IJRQcAxRuVOTQ90bB_Bgjnmz7px_ST5uLF9CW-Y0KPY,612 fontTools/pens/cu2quPen.py,sha256=gMUwFUsm_-WzBlDjTMQiNnEuI2heomGeOJBX81zYXPo,13007 fontTools/pens/explicitClosingLinePen.py,sha256=kKKtdZiwaf8Cj4_ytrIDdGB2GMpPPDXm5Nwbw5WDgwU,3219 fontTools/pens/filterPen.py,sha256=kKSvLmWCW4MkCF0ciJhjTj-LdUGOQL593PFkpm5PhP8,7790 fontTools/pens/freetypePen.py,sha256=MsIjlwvd54qQoSe3fqqGm4ZyhrhQi3-9B6X1yv5_KuQ,19813 fontTools/pens/hashPointPen.py,sha256=gElrFyQoOQp3ZbpKHRWPwC61A9OgT2Js8crVUD8BQAY,3573 fontTools/pens/momentsPen.py,sha256=kiSvVWLJQPmUlus8MbMGkj8TB_QoZYL9uUwUMhyAHCQ,25685 fontTools/pens/perimeterPen.py,sha256=lr6NzrIWxi4TXBJPbcJsKzqABWfQeil2Bgm9BgUD3N4,2153 fontTools/pens/pointInsidePen.py,sha256=AloaWABNZY0KHkjIiHhElXUqJ_kr1Slf3XgwNtH7vwU,6336 fontTools/pens/pointPen.py,sha256=IA0JVDaf8_aAvjRQv3asXItxxfzhv4gEEFvrlDlCx_k,22296 fontTools/pens/qtPen.py,sha256=QRNLIry2rQl4E_7ct2tu10-qLHneQp0XV7FfaZ-tcL8,634 fontTools/pens/qu2cuPen.py,sha256=pRST43-rUpzlOP83Z_Rr0IvIQBCx6RWI6nnNaitQcLk,3985 fontTools/pens/quartzPen.py,sha256=EH482Kz_xsqYhVRovv6N_T1CXaSvOzUKPLxTaN956tU,1287 fontTools/pens/recordingPen.py,sha256=EOoMxFQkBqSSSPYkLe0lu09bNquRvLbn-XKN2k2rSSg,11975 fontTools/pens/reportLabPen.py,sha256=kpfMfOLXt2vOQ5smPsU82ft80FpCPWJzQLl7ENOH8Ew,2066 fontTools/pens/reverseContourPen.py,sha256=oz64ZRhLAvT7DYMAwGKoLzZXQK8l81jRiYnTZkW6a-Y,4022 fontTools/pens/roundingPen.py,sha256=Q4vvG0Esq_sLNODU0TITU4F3wcXcKWo4BA7DWdDaVcM,4649 fontTools/pens/statisticsPen.py,sha256=IhiLy_pssq9hYZz5ZwOP6WSy9mX_bTvb-6PatbT47kY,9641 fontTools/pens/svgPathPen.py,sha256=LyLip9W0rirJs3YfGgdFS_f41OLjYM6EJt54gwzw49Y,8488 fontTools/pens/t2CharStringPen.py,sha256=uq9KCOxrk5TEZGYpcOG-pgkWHYCe4dMwb2hx5uYOmWA,2391 fontTools/pens/teePen.py,sha256=P1ARJOCMJ6MxK-PB1yZ-ips3CUfnadWYnQ_do6VIasQ,1290 fontTools/pens/transformPen.py,sha256=Nax1C9GflG1DFUBQBV2Vzr3d4mIOF41uO0SA3V_FJDM,3970 fontTools/pens/ttGlyphPen.py,sha256=yLtB-E5pTQR59OKVYySttWBu1xC2vR8ezSaRhIMtVwg,11870 fontTools/pens/wxPen.py,sha256=W9RRHlBWHp-CVC4Exvk3ytBmRaB4-LgJPP5Bv7o9BA0,680 fontTools/qu2cu/__init__.py,sha256=Jfm1JljXbt91w4gyvZn6jzEmVnhRx50sh2fDongrOsE,618 fontTools/qu2cu/__main__.py,sha256=bYg7TzC9ZpCBLl91hIzkcghan9ESbb_j61lw708JHmY,84 fontTools/qu2cu/benchmark.py,sha256=b-YjDXcbRr07rxwjJKUXxYhXznTyGffsavRW0pRuJwY,1453 fontTools/qu2cu/cli.py,sha256=undiO1TF_L4aJTaep1iDzYfYEAGN9KQpbXZMDPxxr3g,3713 fontTools/qu2cu/qu2cu.py,sha256=1RKhaMBBiDvo5PtkNqR5p0X2HQ4yel4TbWT8MFU6Hps,12315 fontTools/subset/__init__.py,sha256=MOmuPAP9_ucldTiSEZywvvNg0ppy74x1N3L6WnSidoY,129814 fontTools/subset/__main__.py,sha256=bhtfP2SqP4k799pxtksFgnC-XGNQDr3LcO4lc8T5e5g,95 fontTools/subset/cff.py,sha256=AG88Mj8uHtG481D-Al3QIJk3t9tjX8ZsN-Tbcz0bWo8,18864 fontTools/subset/svg.py,sha256=8dLBzQlnIt4_fOKEFDAVlKTucdHvcbCcyG9-a6UBZZ0,9384 fontTools/subset/util.py,sha256=9SXFYb5Ef9Z58uXmYPCQil8B2i3Q7aFB_1fFDFSppdU,754 fontTools/svgLib/__init__.py,sha256=IGCLwSbU8jLhq6HI2vSdPQgNs6zDUi5774TgX5MCXPY,75 fontTools/svgLib/path/__init__.py,sha256=S9TqNYjzbkboA451YQrOoFlBvfZP3YAUrjCYlX9_wc4,1954 fontTools/svgLib/path/arc.py,sha256=-f5Ym6q4tDWQ76sMNSTUTWgL_7AfgXojvBhtBS7bWwQ,5812 fontTools/svgLib/path/parser.py,sha256=OEVtWJwi0o_kDhKX2S4hfP_FAR7uEmAQ24pr6O5VvwY,10767 fontTools/svgLib/path/shapes.py,sha256=xvBUIckKyT9JLy7q_ZP50r6TjvZANyHdZP7wFDzErcI,5322 fontTools/t1Lib/__init__.py,sha256=p42y70wEIbuX0IIxZG7-b_I-gHto1VLy0gLsDvxCfkw,20865 fontTools/ttLib/__init__.py,sha256=fjOFcwbRed9b_giTgJ7FLsqeJC8ndnx327WfJztW-Tc,553 fontTools/ttLib/__main__.py,sha256=O19nJTxsS3O2umaVbrYgZOjghGb4KcppHXn2Cs1v0Z4,3406 fontTools/ttLib/macUtils.py,sha256=lj3oeFpyjV7ko_JqnluneITmAtlc119J-vwTTg2s73A,1737 fontTools/ttLib/removeOverlaps.py,sha256=0eRyIBLjwoxgKnC98IjCJOXGSodhZHSbWAq-kJOpydg,8138 fontTools/ttLib/reorderGlyphs.py,sha256=y4UAVABTMykRWIF9_BJP1B8X4JRLde5GzIOkAafofE8,10011 fontTools/ttLib/scaleUpem.py,sha256=VNUWyE8ZmbhTe8PpRFZ308a6394lkV2dYolW4Ix_cgw,12223 fontTools/ttLib/sfnt.py,sha256=KkLGD3SBKdZ1QCTEtTvmXP05-w1ZAM_8pKRfPUVg240,22829 fontTools/ttLib/standardGlyphOrder.py,sha256=7AY_fVWdtwZ4iv5uWdyKAUcbEQiSDt1lN4sqx9xXwE0,5785 fontTools/ttLib/ttCollection.py,sha256=aRph2MkBK3kd9-JCLqhJ1EN9pffN_lVX6WWmOTTewc8,3963 fontTools/ttLib/ttFont.py,sha256=55LSK8d5jOAmj_IQkiUX58I9RUidmdbmLN9_p7oQS6s,41206 fontTools/ttLib/ttGlyphSet.py,sha256=VYd22RriGS87j_BBJVpi_azKJXDwe4rILoTlapxYes0,13192 fontTools/ttLib/ttVisitor.py,sha256=_tah4C42Tv6Pm9QeLNQwwVCxqI4VNEAqYCbmThp6cvY,1025 fontTools/ttLib/woff2.py,sha256=MapZb0Hcfs7tfUIngTdhRk-n-duMLMBTccawMjtkOYo,61133 fontTools/ttLib/tables/B_A_S_E_.py,sha256=fotjQyGgXXMrLeWH-eu_R-OJ_ZepQ3GHOzQ3auhZ82Y,88 fontTools/ttLib/tables/BitmapGlyphMetrics.py,sha256=9gcGPVzsxEYnVBO7YLWfeOuht9PaCl09GmbAqDYqKi0,1769 fontTools/ttLib/tables/C_B_D_T_.py,sha256=cmxOO93VXhtS_nS6-iG9K2UUKHqTTEiFThV2wPMi0vA,3331 fontTools/ttLib/tables/C_B_L_C_.py,sha256=2Qr_xPnZn6yKMgWU5LzKfPyOu-dUK7q6XtyKAOOJl-0,188 fontTools/ttLib/tables/C_F_F_.py,sha256=jFX4ClhxD57IxfYDkDDCq2oJqSdbgAp1ghNQw5AYU7M,1443 fontTools/ttLib/tables/C_F_F__2.py,sha256=TTX4_bKYGmFGt2lihlFfKw8LLc-wIr6uE2P45Rv4qW0,425 fontTools/ttLib/tables/C_O_L_R_.py,sha256=qmexaOF-RtKSzHmekBPQIOa4Q2bmFMV3X_ytaCZhwhc,5725 fontTools/ttLib/tables/C_P_A_L_.py,sha256=4bXVL-qFKQaQhW_llYQzXZQClL24aJkEy0ms0-Bh2gk,11631 fontTools/ttLib/tables/D_S_I_G_.py,sha256=U5OCCI0sjhK5HvhNKaEonD0wucXzHXdfz5l3sb4CB8U,5327 fontTools/ttLib/tables/D__e_b_g.py,sha256=vROIV3UTxbK9eN3rmHOu1ARwBiOXL6K5ihmq0QMToJQ,443 fontTools/ttLib/tables/DefaultTable.py,sha256=cOtgkLWPY9qmOH2BSPt4c4IUSdANWTKx2rK1CTxQ4h0,1487 fontTools/ttLib/tables/E_B_D_T_.py,sha256=8iakmy4PP8BNiem9ZT_P7ysu8BkV1gWFJD94K5ThVSo,32276 fontTools/ttLib/tables/E_B_L_C_.py,sha256=rKqNd_Hxg4kJvjRLiFYS8M1GUv6aoHhLrplZRx46nBU,29761 fontTools/ttLib/tables/F_F_T_M_.py,sha256=aq9FsyfMegjxRsAWF8U2a3OpxFCPHJjNiLlC63dmqnI,1354 fontTools/ttLib/tables/F__e_a_t.py,sha256=x3ryfFJPsGVWqy10a4ulXADBnsB2JEVpyx_DuWYqy8k,5380 fontTools/ttLib/tables/G_D_E_F_.py,sha256=xN2hcW8GPMOos7dTpXJSWNJxUbGzUrnQ_2i-vxlNT_E,88 fontTools/ttLib/tables/G_M_A_P_.py,sha256=S0KyulRo88aZ4YM8OJ_l8Mf0husmlI03IlXP6aa1C1w,4515 fontTools/ttLib/tables/G_P_K_G_.py,sha256=XbfsF-qCk9ortdZycw7r6DEo94lfg6TTb3fN7HPYCuM,4441 fontTools/ttLib/tables/G_P_O_S_.py,sha256=nVSjCI8k7-8aIkzIMc7bCmd2aHeVvjwPIh2jhwn9KY4,88 fontTools/ttLib/tables/G_S_U_B_.py,sha256=-e_9Jxihz6AUSzSBCdW3tycdu4QZUsL8hZI6A7lMt9Q,88 fontTools/ttLib/tables/G__l_a_t.py,sha256=rWcOEnv9GmNIvJu7y-cpnrAUkc82527LroBIYA7NQTI,8568 fontTools/ttLib/tables/G__l_o_c.py,sha256=_MFYx8IUuJseNrS65QN-P8oq4CcGZnSxdGXKyv92Kco,2598 fontTools/ttLib/tables/H_V_A_R_.py,sha256=bdU_ktJJ2-MQ_zFn1wWTtGpZar7OTFeOEnXyrzDhts8,88 fontTools/ttLib/tables/J_S_T_F_.py,sha256=d36nOt42I5EY-7JDOulBHKtv1StpxxuvLU7gSOC6OGw,88 fontTools/ttLib/tables/L_T_S_H_.py,sha256=DG559txp9zRwe5xlhhq8_HqkOvKrgbWUBw-11nKtw-o,1826 fontTools/ttLib/tables/M_A_T_H_.py,sha256=zXSUNz98761iTREcge-YQ4LcEGCFhp1VVWAZt8B4TTQ,88 fontTools/ttLib/tables/M_E_T_A_.py,sha256=0IZysRvZur6rhe4DP7P2JnKW0O9SgbxLBHBmAJMx5vA,11784 fontTools/ttLib/tables/M_V_A_R_.py,sha256=uMresSbbzC43VL8Lou2bHjNmN3aY8wxxrV3qa6SSmR4,88 fontTools/ttLib/tables/O_S_2f_2.py,sha256=4TN66vch-0lJnr-f-ErbfWbxuDF_JRTOt-qy84oDG2k,27752 fontTools/ttLib/tables/S_I_N_G_.py,sha256=73zv425wym8w3MndveArHsp1TzM6VOQAz1gvwB9GgoQ,3112 fontTools/ttLib/tables/S_T_A_T_.py,sha256=tPbD_6x4aJACOux8bKe_sFlk0PEat7aiZn8pnXoUGws,88 fontTools/ttLib/tables/S_V_G_.py,sha256=8h8arIl9gedLB3GRRNF8V0x2pq1GikF7If9e_srB69I,7463 fontTools/ttLib/tables/S__i_l_f.py,sha256=5hZ1ze12-tRyYIu-hEewRlgMWiuGHNf40om7Rs369_Q,34901 fontTools/ttLib/tables/S__i_l_l.py,sha256=KvjK_vrh_YyPHtYwLyrHLx33gcTYg5lBnvUYie6b06M,3104 fontTools/ttLib/tables/T_S_I_B_.py,sha256=CMcquVV86ug63Zk_yTB37DKqO91FZW14WtzwBI2aPjY,86 fontTools/ttLib/tables/T_S_I_C_.py,sha256=TjDKgGdFEaL4Affo9MTInuVKbYUHMa0pJX18pzgYxT0,88 fontTools/ttLib/tables/T_S_I_D_.py,sha256=OP_tHge02Fs7Y5lnVrgUGfr4FdIu-iv3GVtMEyH3Nrw,86 fontTools/ttLib/tables/T_S_I_J_.py,sha256=soJ3cf52aXLQTqvhQV2bHzyRSh6bsxxvZcpAV4Z9tlc,86 fontTools/ttLib/tables/T_S_I_P_.py,sha256=SvDvtRhxiC96WvZxNb2RoyTf0IXjeVMF_UP42ZD_vwU,86 fontTools/ttLib/tables/T_S_I_S_.py,sha256=IHJsyWONSgbg9hm5VnkCeq70SQcwnNJZZO_dBtJGZFc,86 fontTools/ttLib/tables/T_S_I_V_.py,sha256=Pqr8g0zrgCZl2sSJlxE5AYXazlZE29o1BO8oMVblBUs,655 fontTools/ttLib/tables/T_S_I__0.py,sha256=c0F4nKBKTeURqxCFv3nwxCu9Dl0mh7wr0PhOrLKMjho,2043 fontTools/ttLib/tables/T_S_I__1.py,sha256=N-BoLR5WWZv8tglokn5WZv8w_52jzKDG8jiZn5bS__k,6982 fontTools/ttLib/tables/T_S_I__2.py,sha256=ZV39h3SKtVSxKF9dKkI4sC0X5oXLkQDSPCcOeBTxUTM,420 fontTools/ttLib/tables/T_S_I__3.py,sha256=wQnwccPX3IaxGjzCdJHwtLh2ZqSsoAS-vWjhdI2h5dQ,467 fontTools/ttLib/tables/T_S_I__5.py,sha256=jB-P8RMFC3KOGdtTQH5uzvqEJDIWhRlDFsuvAix0cl0,1510 fontTools/ttLib/tables/T_T_F_A_.py,sha256=7wiKnyzrHiLgdtz6klG02flh8S7hm7GKarif7lw3IMc,81 fontTools/ttLib/tables/TupleVariation.py,sha256=alTazkxRcDtySxQFGeI4vnWGvKa_BFQ6otLetSF0E54,29507 fontTools/ttLib/tables/V_D_M_X_.py,sha256=dqE3G2Hg4ByQNteceOMctgFu2Er_DHh4_vOlAAaP5nM,10189 fontTools/ttLib/tables/V_O_R_G_.py,sha256=XasThyPjPNah6Yn0TCFVv9H5kmYDx5FIMaH8B9sA2oU,5762 fontTools/ttLib/tables/V_V_A_R_.py,sha256=X9C_r2HiSnI2mYqUQ93yK4zLpweRzobJ0Kh1J2lTsAw,88 fontTools/ttLib/tables/__init__.py,sha256=zCtd7rcQS5cIoj0cqePaZ6oTcTPCwnzDS90_VIIEwQ0,2601 fontTools/ttLib/tables/_a_n_k_r.py,sha256=DhIUAWnvXZZdC1jlh9ubcsobFahdtlJMsk7v_2s-WaM,462 fontTools/ttLib/tables/_a_v_a_r.py,sha256=gQi_aDfC-MXOgXDwOV-f_SsFZGTX8d6cFoYtLwI30wI,5376 fontTools/ttLib/tables/_b_s_l_n.py,sha256=D1tRo8TDAUxeCqVWsTma9u2VxRzxUkCpF84Lv_hy4rU,170 fontTools/ttLib/tables/_c_i_d_g.py,sha256=A6llfYvsJQl0Mj6fnrRxUGXUlBkyEowo1J2euUulHM4,787 fontTools/ttLib/tables/_c_m_a_p.py,sha256=OP0WuHxErqVIDEuGnJ20lel04jd9JeAYIYTENqKK--Y,61643 fontTools/ttLib/tables/_c_v_a_r.py,sha256=Nlf8etrchBixD7qxFgxuDZ51VHA0XtsHfABDSgPG2RU,3307 fontTools/ttLib/tables/_c_v_t.py,sha256=E_mDVniDspGjbBQk9CDEm8y3LJ5FbnHxZHRGbq-okHA,1361 fontTools/ttLib/tables/_f_e_a_t.py,sha256=cshl7jgxj2RgzE8kECCkQVAW2ibJqgKLpZdT1PwyvuM,560 fontTools/ttLib/tables/_f_p_g_m.py,sha256=-a5WYucI482KQ65rmbl8YwsD4q9BRyDIunJ_9MYAeyc,1170 fontTools/ttLib/tables/_f_v_a_r.py,sha256=9KKJjFu--gQavQLKwuDLMFi_T5SlfivlE6I2LmzIgD0,8479 fontTools/ttLib/tables/_g_a_s_p.py,sha256=Sp31uXdZyQO2Bbp4Qh5QBu75TvnDmxNQYhfMXf6PkCg,1916 fontTools/ttLib/tables/_g_c_i_d.py,sha256=4VWq2u6c21ZOQ5_EJ5EwtZXC-zDz6SOPYwDDRZWRczA,170 fontTools/ttLib/tables/_g_l_y_f.py,sha256=vsEjxw_3Q8j0TngkTO7veaJx6cJQIRvE1lW2fvE6n68,98623 fontTools/ttLib/tables/_g_v_a_r.py,sha256=d_2scSh_71bSwaVHdEAk05py7KJA5BQMiszOBnMAvdw,10236 fontTools/ttLib/tables/_h_d_m_x.py,sha256=BOadCwbQhtiwQZoduvkvt6rtevP7BQiyd5KYnfjE0Cc,4024 fontTools/ttLib/tables/_h_e_a_d.py,sha256=cWH7gPQdb7SoWH88eyHHv0HeJ-k7xyXWjorPVTMIMGs,4745 fontTools/ttLib/tables/_h_h_e_a.py,sha256=YSMaTvNp3CD4G6WgGLmYdJGv_TKghKkT-IHW5Gw0iio,4434 fontTools/ttLib/tables/_h_m_t_x.py,sha256=DEcruWWtBYNW6sHtuv17snMCUYkvdaVtx_lrZLLhBfc,5767 fontTools/ttLib/tables/_k_e_r_n.py,sha256=SXkBnwz39gd6YHrQizGqz1orFEETp02vLgxzJSCNdYQ,10437 fontTools/ttLib/tables/_l_c_a_r.py,sha256=SKmQ65spClbLnsYMDoqecsUOWWNyBDsFWut-Y6ahVhk,88 fontTools/ttLib/tables/_l_o_c_a.py,sha256=aAcaTyf4ntk-PGY_Ko_K3IVAqbNLKwNc_FmRYIPWPG4,1994 fontTools/ttLib/tables/_l_t_a_g.py,sha256=L1ekoPzh4pMdWGRr-cdjL3M2asf4CqeUHq7zh4wvwrw,2274 fontTools/ttLib/tables/_m_a_x_p.py,sha256=Xol-ByrHp_eysC7Kvc7c0KLHWEQZ9rY2L8L2mFGQ1wk,5056 fontTools/ttLib/tables/_m_e_t_a.py,sha256=MslEJ7E0oO-JNHyAhtkRsBCBp0kK4OXfAgRqtRF9GDA,3651 fontTools/ttLib/tables/_m_o_r_t.py,sha256=2p7PzPGzdOtFhg-Fxvdh0PO4yRs6_z_WjQegexeZCsw,170 fontTools/ttLib/tables/_m_o_r_x.py,sha256=UJhBbA3mgVQO1oGmu_2bNXUwQreVSztG85F9k7DpmiQ,170 fontTools/ttLib/tables/_n_a_m_e.py,sha256=djXx4Tzw4LOGTiIoCz72mj-jErlYG-qWwKHL1POX58I,40729 fontTools/ttLib/tables/_o_p_b_d.py,sha256=t3eqUkZPyaQbahEmKaqp7brDNbt4MQje2Vq1jBu-fEc,170 fontTools/ttLib/tables/_p_o_s_t.py,sha256=DusC5HkI4eJw9jw9idb0GA1Xr9YuhQMnmsz4GM36kVI,11284 fontTools/ttLib/tables/_p_r_e_p.py,sha256=97rDk0OiGoOD-foAIzqzYM1IKhB4gQuWyBrkH1PVvP0,115 fontTools/ttLib/tables/_p_r_o_p.py,sha256=3JHFloIJwg9n4dzoe4KLobHc75oJh6DLNe51sakfz8E,170 fontTools/ttLib/tables/_s_b_i_x.py,sha256=eHzNG4I8732aeW7iUNEEdYsxgsHT9sTtbaD2vvAxxR8,4443 fontTools/ttLib/tables/_t_r_a_k.py,sha256=fZV1pQrAilSNc0Yd3x0XoIGbqlNoDv67LB2gb_CejMo,11069 fontTools/ttLib/tables/_v_h_e_a.py,sha256=zHokAcH7CQ4tZPQAGmdTuv0_X-FHwyLWea1f9aFb1Gg,4130 fontTools/ttLib/tables/_v_m_t_x.py,sha256=oUrskRNAf3FLIZaYLuk03np_IsIWBGUWbMFcdjU3Sys,229 fontTools/ttLib/tables/asciiTable.py,sha256=4c69jsAirUnDEpylf9CYBoCKTzwbmfbtUAOrtPnpHjY,637 fontTools/ttLib/tables/grUtils.py,sha256=hcOJ5oJPOd2uJWnWA7qwR7AfL37YZ5zUT7g8o5BBV80,2270 fontTools/ttLib/tables/otBase.py,sha256=0Aik3BCMmUBUwsPMKHhAAHKYqXSaL7dIUuYP2qd1D0k,53158 fontTools/ttLib/tables/otConverters.py,sha256=NHiRZz8KQtKY-6JCz5wRHC5UXxEAhvLD2wJmpop6mP8,69318 fontTools/ttLib/tables/otData.py,sha256=u4MduMik-MnGDZeqVTVQaBRerjeHKQXRdmXaHsGqmMY,192591 fontTools/ttLib/tables/otTables.py,sha256=87FTcd-g0YT1muO0nat93FjN66tfNG3x1ucb3WVcPcQ,83289 fontTools/ttLib/tables/otTraverse.py,sha256=oTr7nA7u7kEltLAhl4Kfl1RPD8O2_bKaoXa5l0hkRVA,5497 fontTools/ttLib/tables/sbixGlyph.py,sha256=tjEUPVRfx6gr5yme8UytGTtVrimKN5qmbzT1GZPjXiM,5796 fontTools/ttLib/tables/sbixStrike.py,sha256=gFyOlhRIGnd59y0SrhtsT2Ce4L3yaBrLoFJ_dK9u9mQ,6663 fontTools/ttLib/tables/table_API_readme.txt,sha256=eZlRTLUkLzc_9Ot3pdfhyMb3ahU0_Iipx0vSbzOVGy8,2748 fontTools/ttLib/tables/ttProgram.py,sha256=tgtxgd-EnOq-2PUlYEihp-6NHu_7HnE5rxeSAtmXOtU,35888 fontTools/ufoLib/__init__.py,sha256=FcF400e77rehKcGfEt15_CAWgfSc0GF2Er5NPZubZYg,93665 fontTools/ufoLib/converters.py,sha256=EjuBkQxFltzeb-qnt2jzwieJH92f9ybcdZwAvQJi_Kw,10558 fontTools/ufoLib/errors.py,sha256=UULZ4h1i_Lb9lywjScgC6N-wC4yyPceTSin0BebbhJk,584 fontTools/ufoLib/etree.py,sha256=YQpCsRlLv0zfZUK8_i9cNFKBvyq1Gyy6HQbKyPLCoEY,224 fontTools/ufoLib/filenames.py,sha256=Trm8k9AzXYYaYo0VwAgLJKCtWgsA1QjBlirmgXdZhjg,7562 fontTools/ufoLib/glifLib.py,sha256=wpoSz624xqocPJbdzzElyCAgmEOjZVQeGr2KiZtHvAA,72053 fontTools/ufoLib/kerning.py,sha256=0jPFd7mti884yvPjvYcU8lAWDwvVsNOObeQvVmPRJ3k,2973 fontTools/ufoLib/plistlib.py,sha256=IpMh2FH9-6dxcvjSK4YR7L01HTIP1_RnQ8mWliyds1E,1499 fontTools/ufoLib/pointPen.py,sha256=QGg6b_UeosZodcqqfAIPyAPUbfT7KgCxDwYfSR0GlCI,233 fontTools/ufoLib/utils.py,sha256=8aqNHdFUd_imnawCQFY3UaXpF_s_4sHeinH0lqELTos,1893 fontTools/ufoLib/validators.py,sha256=zIcp2weAYLOJBCvxbqBqAy34TaJrqpAlXKshJIkdhWI,30805 fontTools/unicodedata/Blocks.py,sha256=8sfrqmUZYlWWwy2tnh7d9DBE0RiGtZmMa5H4ZBVfPCU,31360 fontTools/unicodedata/OTTags.py,sha256=wOPpbMsNcp_gdvPFeITtgVMnTN8TJSNAsVEdu_nuPXE,1196 fontTools/unicodedata/ScriptExtensions.py,sha256=mrNVubRG5A6K8ARPoUVuW9GY9G5_m4b4aRg3w2_gOO4,19443 fontTools/unicodedata/Scripts.py,sha256=8qddX0zmRy4eqQoAFVtum3RiD2EwQqcN_SxHZ43KxsQ,126086 fontTools/unicodedata/__init__.py,sha256=QWboow5NU0P6sTAezY__zE-eL6JFvBpwEReeH2dIOb4,8816 fontTools/varLib/__init__.py,sha256=_tYAD-r5wrSuk5nUNsjyac3Yk4vwXosCBdIW8SZvz2c,53401 fontTools/varLib/__main__.py,sha256=wbdYC5bPjWCxA0I4SKcLO88gl-UMtsYS8MxdW9ySTkY,95 fontTools/varLib/avar.py,sha256=wQ9jbQblv8-5RslOJzJigV3dAXC-eaQdG5I2MF7w9TU,1895 fontTools/varLib/avarPlanner.py,sha256=uLMGsL6cBbEMq5YItwABG_vXlXV3bxquM93WGDJ1brA,27358 fontTools/varLib/builder.py,sha256=gIcF3wydIbSQG7xXalaxMRiHYw48Lq6T3ObAfDgmFRM,4859 fontTools/varLib/cff.py,sha256=ZnIbuyYjq_iOL2qbGF-699IZ2wIgqNi68cErtWAA8co,26122 fontTools/varLib/errors.py,sha256=dMo8eGj76I7H4hrBEiNbYrGs2J1K1SwdsUyTHpkVOrQ,6934 fontTools/varLib/featureVars.py,sha256=BCOBGjGUv2Rw_z0rlVi1ZYkTDcCMh0LyAUzDVJ2PYm4,25448 fontTools/varLib/interpolatable.py,sha256=QucGcQQQIglJhk4oNkVUJlYESWCvIz-Bo0dERRQSKO4,41959 fontTools/varLib/interpolatableHelpers.py,sha256=TN0nRv_CjuSh8hurgz4LQ5E6-HBQECWTLjTFKnidxsg,10810 fontTools/varLib/interpolatablePlot.py,sha256=w393P6mGLRhYkIjSxMww3qyoYxAUZzCXlmPBbI_84C0,44375 fontTools/varLib/interpolatableTestContourOrder.py,sha256=EmJ2jp4sHuSM5P-seYvOLk0HLdWyPOHeVWRKIGIKXx4,3033 fontTools/varLib/interpolatableTestStartingPoint.py,sha256=Hq3NtC8I-O5dFnFoBQ6qVvLCzlPWEZqOSLP15QeuSAw,4231 fontTools/varLib/interpolate_layout.py,sha256=22VjGZuV2YiAe2MpdTf0xPVz1x2G84bcOL0vOeBpGQM,3689 fontTools/varLib/iup.py,sha256=bUk3O1QoFM8k_QEleHruT0biPoauX8AUJorbRuO21Vo,14675 fontTools/varLib/merger.py,sha256=E59oli4AwqWZ-FgnuStMSBvsB-FHe-55esXTYUqGeJ8,60802 fontTools/varLib/models.py,sha256=SQI0ipNdl1WX8ysszet58UPtSRAAr9R0K40800AdXQo,21961 fontTools/varLib/mutator.py,sha256=P1ukWRojb1p7kcfa37EPny_GOky-FkZy7wcAHHeF9jA,19226 fontTools/varLib/mvar.py,sha256=LTV77vH_3Ecg_qKBO5xQzjLOlJir_ppEr7mPVZRgad8,2449 fontTools/varLib/plot.py,sha256=NoSZkJ5ndxNcDvJIvd5pQ9_jX6X1oM1K2G_tR4sdPVs,7494 fontTools/varLib/stat.py,sha256=pNtU3Jebm8Gr5umrbF5xGj5yJQciFwSFpfePOcg37xY,4535 fontTools/varLib/varStore.py,sha256=gBR-gvddip1qukEcGQdI-Nra5eKXGHowTsVZuZmZ2B4,23689 fontTools/varLib/instancer/__init__.py,sha256=klnPkky_dlHCcJNcip9K7XC_U3gypaDSPqZjsxeG25s,58569 fontTools/varLib/instancer/__main__.py,sha256=zfULwcP01FhplS1IlcMgNQnLxk5RVfmOuinWjqeid-g,104 fontTools/varLib/instancer/featureVars.py,sha256=oPqSlnHLMDTtOsmQMi6gkzLox7ymCrqlRAkvC_EJ4bc,7110 fontTools/varLib/instancer/names.py,sha256=IPRqel_M8zVU0jl30WsfgufxUm9PBBQDQCY3VHapeHc,14950 fontTools/varLib/instancer/solver.py,sha256=uMePwX0BVT5F94kUvDglsI4_F0nEH67F7RFuJ6tQwQ0,11002 fontTools/voltLib/__init__.py,sha256=ZZ1AsTx1VlDn40Kupce-fM3meOWugy3RZraBW9LG-9M,151 fontTools/voltLib/ast.py,sha256=sioOeSazmC8PxRMRql33I64JaCflu55UUZcikm9mwIY,13226 fontTools/voltLib/error.py,sha256=phcQOQj-xOspCXu9hBJQRhSOBDzxHRgZd3fWQOFNJzw,395 fontTools/voltLib/lexer.py,sha256=OvuETOSvlS6v7iCVeJ3IdH2Cg71n3OJoEyiB3-h6vhE,3368 fontTools/voltLib/parser.py,sha256=wBSUrjLT3fSPv9Mjx6_ULIf8IcGlwjtb4Auxjh5wqnc,24916 fontTools/voltLib/voltToFea.py,sha256=yi5cytZjCJbsKdJuM5k6HWSlqE_ZUv0l1rzp8K1vo3A,28465 fonttools-4.51.0.data/data/share/man/man1/ttx.1,sha256=cLbm_pOOj1C76T2QXvDxzwDj9gk-GTd5RztvTMsouFw,5377 fonttools-4.51.0.dist-info/LICENSE,sha256=Z4cgj4P2Wcy8IiOy_elS_6b36KymLxqKK_W8UbsbI4M,1072 fonttools-4.51.0.dist-info/METADATA,sha256=lr4M1q7eNqmK2TlHe3pDxxFGBChT9m2Y6AEpJ_LfRQM,159505 fonttools-4.51.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 fonttools-4.51.0.dist-info/entry_points.txt,sha256=8kVHddxfFWA44FSD4mBpmC-4uCynQnkoz_9aNJb227Y,147 fonttools-4.51.0.dist-info/top_level.txt,sha256=rRgRylrXzekqWOsrhygzib12pQ7WILf7UGjqEwkIFDM,10 fonttools-4.51.0.dist-info/RECORD,, PKaZZZ�?�Ƿ��fontTools/__init__.pyPKaZZZ��A�����fontTools/__main__.pyPKaZZZ�1ƅl3l3��fontTools/afmLib.pyPKaZZZ�]�O�O��W8fontTools/agl.pyPKaZZZ�9lтт���fontTools/fontBuilder.pyPKaZZZ+��Z��tfontTools/help.pyPKaZZZ�C��7�7� yfontTools/tfmLib.pyPKaZZZ ���AA���fontTools/ttx.pyPKaZZZ~[���2�fontTools/unicode.pyPKaZZZ�l$������9�fontTools/cffLib/__init__.pyPKaZZZߟ'w�w�w�+�fontTools/cffLib/specializer.pyPKaZZZ�q���$.fontTools/cffLib/width.pyPKaZZZ��EfontTools/colorLib/__init__.pyPKaZZZ�"��Y�Y�*FfontTools/colorLib/builder.pyPKaZZZ]��))�E�fontTools/colorLib/errors.pyPKaZZZ+t������fontTools/colorLib/geometry.pyPKaZZZ}`/�--#�r�fontTools/colorLib/table_builder.pyPKaZZZV��^^���fontTools/colorLib/unbuilder.pyPKaZZZS�6�S S �{�fontTools/config/__init__.pyPKaZZZ���7jj��fontTools/cu2qu/__init__.pyPKaZZZ�|'KSS���fontTools/cu2qu/__main__.pyPKaZZZ��d�EE�7�fontTools/cu2qu/benchmark.pyPKaZZZ!�7Ի����fontTools/cu2qu/cli.pyPKaZZZl��9R@R@��fontTools/cu2qu/cu2qu.pyPKaZZZ�� � �-HfontTools/cu2qu/errors.pyPKaZZZ<T �..��QfontTools/cu2qu/ufo.pyPKaZZZ�vl�����$�3�fontTools/designspaceLib/__init__.pyPKaZZZ�Iݑgg$�WyfontTools/designspaceLib/__main__.pyPKaZZZP;u�'K'K!�zfontTools/designspaceLib/split.pyPKaZZZ� �xm#m#%�f�fontTools/designspaceLib/statNames.pyPKaZZZĮ����!��fontTools/designspaceLib/types.pyPKaZZZ �C�� � ��fontTools/encodings/MacRoman.pyPKaZZZD�)�� � '�R fontTools/encodings/StandardEncoding.pyPKaZZZhXXKK�� fontTools/encodings/__init__.pyPKaZZZ�`�qqq� fontTools/encodings/codecs.pyPKaZZZr!u����- fontTools/feaLib/__init__.pyPKaZZZ�E������. fontTools/feaLib/__main__.pyPKaZZZ��q�I I ��7 fontTools/feaLib/ast.pyPKaZZZS��!zz�OX fontTools/feaLib/builder.pyPKaZZZh[I���g fontTools/feaLib/error.pyPKaZZZ��ދm+m+��i fontTools/feaLib/lexer.pyPKaZZZ8�3:���`� fontTools/feaLib/location.pyPKaZZZ����00#��� fontTools/feaLib/lookupDebugInfo.pyPKaZZZd+ݢ������ fontTools/feaLib/parser.pyPKaZZZ&� ���"�� fontTools/feaLib/variableScalar.pyPKaZZZx�.[9 9 ��( fontTools/merge/__init__.pyPKaZZZJ_<�^^�)I fontTools/merge/__main__.pyPKaZZZ��OU U ��I fontTools/merge/base.pyPKaZZZ=z� ���JS fontTools/merge/cmap.pyPKaZZZ_cf��>�>�(i fontTools/merge/layout.pyPKaZZZ�g�� � �*� fontTools/merge/options.pyPKaZZZ�b�s1)1)�'� fontTools/merge/tables.pyPKaZZZA�^ױ���� fontTools/merge/unicode.pyPKaZZZ/�٠2 2 �x� fontTools/merge/util.pyPKaZZZhXXKK��� fontTools/misc/__init__.pyPKaZZZ��^��,�,�b� fontTools/misc/arrayTools.pyPKaZZZ�l�I�����w'fontTools/misc/bezierTools.pyPKaZZZ���0���m�fontTools/misc/classifyTools.pyPKaZZZ�/dFF���fontTools/misc/cliTools.pyPKaZZZ!V���+�+��fontTools/misc/configTools.pyPKaZZZ��bت�� fontTools/misc/cython.pyPKaZZZBu�q q ��"fontTools/misc/dictTools.pyPKaZZZ?�f�  ��,fontTools/misc/eexec.pyPKaZZZ��?��9fontTools/misc/encodingTools.pyPKaZZZ8�ha�B�B�BfontTools/misc/etree.pyPKaZZZ�h�2  ��fontTools/misc/filenames.pyPKaZZZp�:����`�fontTools/misc/fixedTools.pyPKaZZZ��.JJ�y�fontTools/misc/intTools.pyPKaZZZs��)�M�M���fontTools/misc/loggingTools.pyPKaZZZU�@�99 ��fontTools/misc/macCreatorType.pyPKaZZZ3g�&�!�!�hfontTools/misc/macRes.pyPKaZZZ��bv���!<fontTools/misc/psCharStrings.pyPKaZZZ[~$�C/C/�B�fontTools/misc/psLib.pyPKaZZZҪ��T=T=��fontTools/misc/psOperators.pyPKaZZZ���X���INfontTools/misc/py23.pyPKaZZZԬ�e e �;WfontTools/misc/roundTools.pyPKaZZZís�EE��cfontTools/misc/sstruct.pyPKaZZZ �����V~fontTools/misc/symfont.pyPKaZZZ0����*�fontTools/misc/testTools.pyPKaZZZҔ��1 1 �x�fontTools/misc/textTools.pyPKaZZZN��?�����fontTools/misc/timeTools.pyPKaZZZ�xԅ�8�8���fontTools/misc/transform.pyPKaZZZ�"^����fontTools/misc/treeTools.pyPKaZZZPK������ fontTools/misc/vector.pyPKaZZZs�x ����fontTools/misc/visitor.pyPKaZZZ+�������.fontTools/misc/xmlReader.pyPKaZZZ������HfontTools/misc/xmlWriter.pyPKaZZZ$��yRyR#�f`fontTools/misc/plistlib/__init__.pyPKaZZZ � �fontTools/misc/plistlib/py.typedPKaZZZs!|����^�fontTools/mtiLib/__init__.pyPKaZZZ���^^��ifontTools/mtiLib/__main__.pyPKaZZZ��zf--�IjfontTools/otlLib/__init__.pyPKaZZZ���l������jfontTools/otlLib/builder.pyPKaZZZ�-��OO��=fontTools/otlLib/error.pyPKaZZZ=A�%s s "� ?fontTools/otlLib/maxContextCalc.pyPKaZZZ�����%��KfontTools/otlLib/optimize/__init__.pyPKaZZZI ��hh%�RfontTools/otlLib/optimize/__main__.pyPKaZZZj*F*H*H!��RfontTools/otlLib/optimize/gpos.pyPKaZZZhXXKK�$�fontTools/pens/__init__.pyPKaZZZ�pk�����fontTools/pens/areaPen.pyPKaZZZ:i���B�B���fontTools/pens/basePen.pyPKaZZZ0܋{9 9 ���fontTools/pens/boundsPen.pyPKaZZZm ��PP���fontTools/pens/cairoPen.pyPKaZZZ5^��dd���fontTools/pens/cocoaPen.pyPKaZZZ۸�g�2�2��fontTools/pens/cu2quPen.pyPKaZZZs�?D� � (�#)fontTools/pens/explicitClosingLinePen.pyPKaZZZw��Cnn��5fontTools/pens/filterPen.pyPKaZZZ�ҊeMeM��TfontTools/pens/freetypePen.pyPKaZZZ���� � �C�fontTools/pens/hashPointPen.pyPKaZZZ�^njUdUd�t�fontTools/pens/momentsPen.pyPKaZZZs],ii�fontTools/pens/perimeterPen.pyPKaZZZ�%���� ��fontTools/pens/pointInsidePen.pyPKaZZZ9k~WW��6fontTools/pens/pointPen.pyPKaZZZJ3�zz���fontTools/pens/qtPen.pyPKaZZZ�4)������fontTools/pens/qu2cuPen.pyPKaZZZ"���n�fontTools/pens/quartzPen.pyPKaZZZ�ө�.�.���fontTools/pens/recordingPen.pyPKaZZZ� �/���fontTools/pens/reportLabPen.pyPKaZZZ���{��#���fontTools/pens/reverseContourPen.pyPKaZZZ��K�))���fontTools/pens/roundingPen.pyPKaZZZOcĩ%�%�Z�fontTools/pens/statisticsPen.pyPKaZZZ��h(!(!�@%fontTools/pens/svgPathPen.pyPKaZZZ�l�W W !��FfontTools/pens/t2CharStringPen.pyPKaZZZ��à  �8PfontTools/pens/teePen.pyPKaZZZ ������xUfontTools/pens/transformPen.pyPKaZZZ��}g^.^.�6efontTools/pens/ttGlyphPen.pyPKaZZZ?�/H���ΓfontTools/pens/wxPen.pyPKaZZZB�9;jj���fontTools/qu2cu/__init__.pyPKaZZZ���TT�N�fontTools/qu2cu/__main__.pyPKaZZZ236���ۙfontTools/qu2cu/benchmark.pyPKaZZZ[OQ)���ŸfontTools/qu2cu/cli.pyPKaZZZ��C+00�w�fontTools/qu2cu/qu2cu.pyPKaZZZYc�������fontTools/subset/__init__.pyPKaZZZ�:��__��fontTools/subset/__main__.pyPKaZZZd�x�I�I���fontTools/subset/cff.pyPKaZZZ��$�$�$��$fontTools/subset/svg.pyPKaZZZ�^$����sIfontTools/subset/util.pyPKaZZZ 2MKK��LfontTools/svgLib/__init__.pyPKaZZZ�HN��!� MfontTools/svgLib/path/__init__.pyPKaZZZ�p����UfontTools/svgLib/path/arc.pyPKaZZZ�MW�**��kfontTools/svgLib/path/parser.pyPKaZZZ��:���;�fontTools/svgLib/path/shapes.pyPKaZZZ/��Q�Q�B�fontTools/t1Lib/__init__.pyPKaZZZw ��))���fontTools/ttLib/__init__.pyPKaZZZ�,��N N �^�fontTools/ttLib/__main__.pyPKaZZZ��f����� fontTools/ttLib/macUtils.pyPKaZZZ�=���!��fontTools/ttLib/removeOverlaps.pyPKaZZZ�C?h'' ��3fontTools/ttLib/reorderGlyphs.pyPKaZZZ}S�3�/�/�I[fontTools/ttLib/scaleUpem.pyPKaZZZ��c�-Y-Y�B�fontTools/ttLib/sfnt.pyPKaZZZ��;��%���fontTools/ttLib/standardGlyphOrder.pyPKaZZZ��5<{{���fontTools/ttLib/ttCollection.pyPKaZZZH�U������8 fontTools/ttLib/ttFont.pyPKaZZZ� �X�3�3�e�fontTools/ttLib/ttGlyphSet.pyPKaZZZi�7�(�fontTools/ttLib/ttVisitor.pyPKaZZZ���������c�fontTools/ttLib/woff2.pyPKaZZZ��XX"�f�fontTools/ttLib/tables/B_A_S_E_.pyPKaZZZ|ɇ��,���fontTools/ttLib/tables/BitmapGlyphMetrics.pyPKaZZZ��'   "�1�fontTools/ttLib/tables/C_B_D_T_.pyPKaZZZ)�Ƽ�"�t�fontTools/ttLib/tables/C_B_L_C_.pyPKaZZZ��b$�� �p�fontTools/ttLib/tables/C_F_F_.pyPKaZZZI���"�Q�fontTools/ttLib/tables/C_F_F__2.pyPKaZZZH��X]]"�:�fontTools/ttLib/tables/C_O_L_R_.pyPKaZZZ6���o-o-"��fontTools/ttLib/tables/C_P_A_L_.pyPKaZZZA ]��"��5fontTools/ttLib/tables/D_S_I_G_.pyPKaZZZ����"��JfontTools/ttLib/tables/D__e_b_g.pyPKaZZZ�rW���&��LfontTools/ttLib/tables/DefaultTable.pyPKaZZZDe��~~"��RfontTools/ttLib/tables/E_B_D_T_.pyPKaZZZ�M�sAtAt"���fontTools/ttLib/tables/E_B_L_C_.pyPKaZZZ�p�mJJ"�xE fontTools/ttLib/tables/F_F_T_M_.pyPKaZZZ� "�K fontTools/ttLib/tables/F__e_a_t.pyPKaZZZ!���XX"�F` fontTools/ttLib/tables/G_D_E_F_.pyPKaZZZ����"��` fontTools/ttLib/tables/G_M_A_P_.pyPKaZZZ��YY"��r fontTools/ttLib/tables/G_P_K_G_.pyPKaZZZr���XX"�Z� fontTools/ttLib/tables/G_P_O_S_.pyPKaZZZ'ahXX"�� fontTools/ttLib/tables/G_S_U_B_.pyPKaZZZp��x!x!"��� fontTools/ttLib/tables/G__l_a_t.pyPKaZZZ��M& & "�B� fontTools/ttLib/tables/G__l_o_c.pyPKaZZZ�2�+XX"��� fontTools/ttLib/tables/H_V_A_R_.pyPKaZZZN� WXX"�@� fontTools/ttLib/tables/J_S_T_F_.pyPKaZZZ��"""�ز fontTools/ttLib/tables/L_T_S_H_.pyPKaZZZe��9XX"�:� fontTools/ttLib/tables/M_A_T_H_.pyPKaZZZ� ��.."�Һ fontTools/ttLib/tables/M_E_T_A_.pyPKaZZZ"s-�XX"�� fontTools/ttLib/tables/M_V_A_R_.pyPKaZZZ?J6ahlhl"��� fontTools/ttLib/tables/O_S_2f_2.pyPKaZZZ�b��( ( "�ZV!fontTools/ttLib/tables/S_I_N_G_.pyPKaZZZ�h�XX"��b!fontTools/ttLib/tables/S_T_A_T_.pyPKaZZZmb�'' �Zc!fontTools/ttLib/tables/S_V_G_.pyPKaZZZ���U�U�"���!fontTools/ttLib/tables/S__i_l_f.pyPKaZZZo��; "�T "fontTools/ttLib/tables/S__i_l_l.pyPKaZZZ\�X�VV"��"fontTools/ttLib/tables/T_S_I_B_.pyPKaZZZ�5?0XX"�J"fontTools/ttLib/tables/T_S_I_C_.pyPKaZZZ�]9VV"��"fontTools/ttLib/tables/T_S_I_D_.pyPKaZZZ�H� VV"�x"fontTools/ttLib/tables/T_S_I_J_.pyPKaZZZ���VV"�"fontTools/ttLib/tables/T_S_I_P_.pyPKaZZZ}ٗkVV"��"fontTools/ttLib/tables/T_S_I_S_.pyPKaZZZ˷:ď�"�:"fontTools/ttLib/tables/T_S_I_V_.pyPKaZZZs��[��"� "fontTools/ttLib/tables/T_S_I__0.pyPKaZZZ�Z�6FF"�D$"fontTools/ttLib/tables/T_S_I__1.pyPKaZZZ�����"��?"fontTools/ttLib/tables/T_S_I__2.pyPKaZZZi�����"��A"fontTools/ttLib/tables/T_S_I__3.pyPKaZZZ������"��C"fontTools/ttLib/tables/T_S_I__5.pyPKaZZZ'�(QQ"��I"fontTools/ttLib/tables/T_T_F_A_.pyPKaZZZ�d� CsCs(�xJ"fontTools/ttLib/tables/TupleVariation.pyPKaZZZ����'�'"��"fontTools/ttLib/tables/V_D_M_X_.pyPKaZZZ�`R���"��"fontTools/ttLib/tables/V_O_R_G_.pyPKaZZZ���!XX"���"fontTools/ttLib/tables/V_V_A_R_.pyPKaZZZ�b�0) ) "�h�"fontTools/ttLib/tables/__init__.pyPKaZZZ��D~��"��#fontTools/ttLib/tables/_a_n_k_r.pyPKaZZZ��Q"�� #fontTools/ttLib/tables/_a_v_a_r.pyPKaZZZ.xb���"�#fontTools/ttLib/tables/_b_s_l_n.pyPKaZZZ�&"� #fontTools/ttLib/tables/_c_i_d_g.pyPKaZZZ؏�����"�\##fontTools/ttLib/tables/_c_m_a_p.pyPKaZZZ�� � � "�g$fontTools/ttLib/tables/_c_v_a_r.pyPKaZZZ����QQ ��!$fontTools/ttLib/tables/_c_v_t.pyPKaZZZ4� �00"�!'$fontTools/ttLib/tables/_f_e_a_t.pyPKaZZZc�k��"��)$fontTools/ttLib/tables/_f_p_g_m.pyPKaZZZ��%�!!"�c.$fontTools/ttLib/tables/_f_v_a_r.pyPKaZZZ�� ||"��O$fontTools/ttLib/tables/_g_a_s_p.pyPKaZZZ>��9��"�~W$fontTools/ttLib/tables/_g_c_i_d.pyPKaZZZB�Y?�?�"�hX$fontTools/ttLib/tables/_g_l_y_f.pyPKaZZZ?J��'�'"���%fontTools/ttLib/tables/_g_v_a_r.pyPKaZZZe��-��"�#&fontTools/ttLib/tables/_h_d_m_x.pyPKaZZZ���ȉ�"�&fontTools/ttLib/tables/_h_e_a_d.pyPKaZZZ��RR"��$&fontTools/ttLib/tables/_h_h_e_a.pyPKaZZZ�yB��"�v6&fontTools/ttLib/tables/_h_m_t_x.pyPKaZZZ��V��(�("�=M&fontTools/ttLib/tables/_k_e_r_n.pyPKaZZZ��?XX"�Bv&fontTools/ttLib/tables/_l_c_a_r.pyPKaZZZ9Nޕ��"��v&fontTools/ttLib/tables/_l_o_c_a.pyPKaZZZY)���"��~&fontTools/ttLib/tables/_l_t_a_g.pyPKaZZZ�?����"��&fontTools/ttLib/tables/_m_a_x_p.pyPKaZZZ\XF CC"��&fontTools/ttLib/tables/_m_e_t_a.pyPKaZZZPӦȪ�"���&fontTools/ttLib/tables/_m_o_r_t.pyPKaZZZ������"�s�&fontTools/ttLib/tables/_m_o_r_x.pyPKaZZZ��r��"�]�&fontTools/ttLib/tables/_n_a_m_e.pyPKaZZZ��d���"��K'fontTools/ttLib/tables/_o_p_b_d.pyPKaZZZ}a�T,,"��L'fontTools/ttLib/tables/_p_o_s_t.pyPKaZZZ��ss"��x'fontTools/ttLib/tables/_p_r_e_p.pyPKaZZZP{հ��"��y'fontTools/ttLib/tables/_p_r_o_p.pyPKaZZZ���[["��z'fontTools/ttLib/tables/_s_b_i_x.pyPKaZZZ +��=+=+"�,�'fontTools/ttLib/tables/_t_r_a_k.pyPKaZZZ�ޘ�"""���'fontTools/ttLib/tables/_v_h_e_a.pyPKaZZZ���,��"� �'fontTools/ttLib/tables/_v_m_t_x.pyPKaZZZP?�}}$�0�'fontTools/ttLib/tables/asciiTable.pyPKaZZZ�Iz���!���'fontTools/ttLib/tables/grUtils.pyPKaZZZ4��D���� � �'fontTools/ttLib/tables/otBase.pyPKaZZZN`��&��(fontTools/ttLib/tables/otConverters.pyPKaZZZ"�O�O� ���)fontTools/ttLib/tables/otData.pyPKaZZZ���YYEYE"���,fontTools/ttLib/tables/otTables.pyPKaZZZ�9��yy$� �-fontTools/ttLib/tables/otTraverse.pyPKaZZZ� 9���#���-fontTools/ttLib/tables/sbixGlyph.pyPKaZZZe�K�$��.fontTools/ttLib/tables/sbixStrike.pyPKaZZZLL6� � +� 1.fontTools/ttLib/tables/table_API_readme.txtPKaZZZ�I�0�0�#�<.fontTools/ttLib/tables/ttProgram.pyPKaZZZl��T�m�m��.fontTools/ufoLib/__init__.pyPKaZZZ��h>)>)��60fontTools/ufoLib/converters.pyPKaZZZ����HH�`0fontTools/ufoLib/errors.pyPKaZZZ�ܤ�����b0fontTools/ufoLib/etree.pyPKaZZZ���A����c0fontTools/ufoLib/filenames.pyPKaZZZKYZ�uu�p�0fontTools/ufoLib/glifLib.pyPKaZZZ��q� � ��1fontTools/ufoLib/kerning.pyPKaZZZ�`>������1fontTools/ufoLib/plistlib.pyPKaZZZ.���� �1fontTools/ufoLib/pointPen.pyPKaZZZ"2ee�,�1fontTools/ufoLib/utils.pyPKaZZZ����UxUx�ȵ1fontTools/ufoLib/validators.pyPKaZZZ&����z�z�Y.2fontTools/unicodedata/Blocks.pyPKaZZZR�~����2fontTools/unicodedata/OTTags.pyPKaZZZ c_��K�K)���2fontTools/unicodedata/ScriptExtensions.pyPKaZZZ� �l���� �9�2fontTools/unicodedata/Scripts.pyPKaZZZ�Usp"p"!���4fontTools/unicodedata/__init__.pyPKaZZZ�Jj������� 5fontTools/varLib/__init__.pyPKaZZZ�P�__��5fontTools/varLib/__main__.pyPKaZZZ �t�gg��5fontTools/varLib/avar.pyPKaZZZ�M� �j�j���5fontTools/varLib/avarPlanner.pyPKaZZZ�z�����M6fontTools/varLib/builder.pyPKaZZZ��� f f�a6fontTools/varLib/cff.pyPKaZZZ6Q"��C�6fontTools/varLib/errors.pyPKaZZZ݅s�hchc���6fontTools/varLib/featureVars.pyPKaZZZ��X���"�6F7fontTools/varLib/interpolatable.pyPKaZZZ�sO�:*:*)�]�7fontTools/varLib/interpolatableHelpers.pyPKaZZZ �%~W�W�&��8fontTools/varLib/interpolatablePlot.pyPKaZZZ��<� � 2�y�8fontTools/varLib/interpolatableTestContourOrder.pyPKaZZZ�.�z��3���8fontTools/varLib/interpolatableTestStartingPoint.pyPKaZZZ.��ii&�z�8fontTools/varLib/interpolate_layout.pyPKaZZZ���S9S9�'�8fontTools/varLib/iup.pyPKaZZZYW������'9fontTools/varLib/merger.pyPKaZZZ���y�U�U�i:fontTools/varLib/models.pyPKaZZZx#SXKK�jk:fontTools/varLib/mutator.pyPKaZZZ�J� � ���:fontTools/varLib/mvar.pyPKaZZZ���FF���:fontTools/varLib/plot.pyPKaZZZC�����:fontTools/varLib/stat.pyPKaZZZ��b6�\�\���:fontTools/varLib/varStore.pyPKaZZZ��Cg����&��L;fontTools/varLib/instancer/__init__.pyPKaZZZv؁�hh&��1<fontTools/varLib/instancer/__main__.pyPKaZZZ��k��)�i2<fontTools/varLib/instancer/featureVars.pyPKaZZZ�‹�f:f:#�vN<fontTools/varLib/instancer/names.pyPKaZZZŁ=��*�*$��<fontTools/varLib/instancer/solver.pyPKaZZZ�e����Y�<fontTools/voltLib/__init__.pyPKaZZZ�ޮ.�3�3�+�<fontTools/voltLib/ast.pyPKaZZZ��U{��� �<fontTools/voltLib/error.pyPKaZZZolai( ( ���<fontTools/voltLib/lexer.pyPKaZZZ��;�TaTa�.�<fontTools/voltLib/parser.pyPKaZZZ�y5R1o1o��Y=fontTools/voltLib/voltToFea.pyPKaZZZb���/�(�=fonttools-4.51.0.data/data/share/man/man1/ttx.1PKaZZZ~�u00"�v�=fonttools-4.51.0.dist-info/LICENSEPKaZZZ�w��oo#���=fonttools-4.51.0.dist-info/METADATAPKaZZZ�{K\\ �8R@fonttools-4.51.0.dist-info/WHEELPKaZZZηē�+��R@fonttools-4.51.0.dist-info/entry_points.txtPKaZZZ¨�+ (��S@fonttools-4.51.0.dist-info/top_level.txtPKaZZZ�^���i�i!��S@fonttools-4.51.0.dist-info/RECORDPK66�\ѽ@
Memory