PK aZZZ�?�Ƿ � fontTools/__init__.pyimport logging
from fontTools.misc.loggingTools import configLogger
log = logging.getLogger(__name__)
version = __version__ = "4.51.0"
__all__ = ["version", "log", "configLogger"]
PK aZZZ��A�� � fontTools/__main__.pyimport sys
def main(args=None):
if args is None:
args = sys.argv[1:]
# TODO Handle library-wide options. Eg.:
# --unicodedata
# --verbose / other logging stuff
# TODO Allow a way to run arbitrary modules? Useful for setting
# library-wide options and calling another library. Eg.:
#
# $ fonttools --unicodedata=... fontmake ...
#
# This allows for a git-like command where thirdparty commands
# can be added. Should we just try importing the fonttools
# module first and try without if it fails?
if len(sys.argv) < 2:
sys.argv.append("help")
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
sys.argv[1] = "help"
mod = "fontTools." + sys.argv[1]
sys.argv[1] = sys.argv[0] + " " + sys.argv[1]
del sys.argv[0]
import runpy
runpy.run_module(mod, run_name="__main__")
if __name__ == "__main__":
sys.exit(main())
PK aZZZ�1ƅl3 l3 fontTools/afmLib.py"""Module for reading and writing AFM (Adobe Font Metrics) files.
Note that this has been designed to read in AFM files generated by Fontographer
and has not been tested on many other files. In particular, it does not
implement the whole Adobe AFM specification [#f1]_ but, it should read most
"common" AFM files.
Here is an example of using `afmLib` to read, modify and write an AFM file:
>>> from fontTools.afmLib import AFM
>>> f = AFM("Tests/afmLib/data/TestAFM.afm")
>>>
>>> # Accessing a pair gets you the kern value
>>> f[("V","A")]
-60
>>>
>>> # Accessing a glyph name gets you metrics
>>> f["A"]
(65, 668, (8, -25, 660, 666))
>>> # (charnum, width, bounding box)
>>>
>>> # Accessing an attribute gets you metadata
>>> f.FontName
'TestFont-Regular'
>>> f.FamilyName
'TestFont'
>>> f.Weight
'Regular'
>>> f.XHeight
500
>>> f.Ascender
750
>>>
>>> # Attributes and items can also be set
>>> f[("A","V")] = -150 # Tighten kerning
>>> f.FontName = "TestFont Squished"
>>>
>>> # And the font written out again (remove the # in front)
>>> #f.write("testfont-squished.afm")
.. rubric:: Footnotes
.. [#f1] `Adobe Technote 5004 <https://www.adobe.com/content/dam/acom/en/devnet/font/pdfs/5004.AFM_Spec.pdf>`_,
Adobe Font Metrics File Format Specification.
"""
import re
# every single line starts with a "word"
identifierRE = re.compile(r"^([A-Za-z]+).*")
# regular expression to parse char lines
charRE = re.compile(
r"(-?\d+)" # charnum
r"\s*;\s*WX\s+" # ; WX
r"(-?\d+)" # width
r"\s*;\s*N\s+" # ; N
r"([.A-Za-z0-9_]+)" # charname
r"\s*;\s*B\s+" # ; B
r"(-?\d+)" # left
r"\s+"
r"(-?\d+)" # bottom
r"\s+"
r"(-?\d+)" # right
r"\s+"
r"(-?\d+)" # top
r"\s*;\s*" # ;
)
# regular expression to parse kerning lines
kernRE = re.compile(
r"([.A-Za-z0-9_]+)" # leftchar
r"\s+"
r"([.A-Za-z0-9_]+)" # rightchar
r"\s+"
r"(-?\d+)" # value
r"\s*"
)
# regular expressions to parse composite info lines of the form:
# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ;
compositeRE = re.compile(
r"([.A-Za-z0-9_]+)" # char name
r"\s+"
r"(\d+)" # number of parts
r"\s*;\s*"
)
componentRE = re.compile(
r"PCC\s+" # PPC
r"([.A-Za-z0-9_]+)" # base char name
r"\s+"
r"(-?\d+)" # x offset
r"\s+"
r"(-?\d+)" # y offset
r"\s*;\s*"
)
preferredAttributeOrder = [
"FontName",
"FullName",
"FamilyName",
"Weight",
"ItalicAngle",
"IsFixedPitch",
"FontBBox",
"UnderlinePosition",
"UnderlineThickness",
"Version",
"Notice",
"EncodingScheme",
"CapHeight",
"XHeight",
"Ascender",
"Descender",
]
class error(Exception):
pass
class AFM(object):
_attrs = None
_keywords = [
"StartFontMetrics",
"EndFontMetrics",
"StartCharMetrics",
"EndCharMetrics",
"StartKernData",
"StartKernPairs",
"EndKernPairs",
"EndKernData",
"StartComposites",
"EndComposites",
]
def __init__(self, path=None):
"""AFM file reader.
Instantiating an object with a path name will cause the file to be opened,
read, and parsed. Alternatively the path can be left unspecified, and a
file can be parsed later with the :meth:`read` method."""
self._attrs = {}
self._chars = {}
self._kerning = {}
self._index = {}
self._comments = []
self._composites = {}
if path is not None:
self.read(path)
def read(self, path):
"""Opens, reads and parses a file."""
lines = readlines(path)
for line in lines:
if not line.strip():
continue
m = identifierRE.match(line)
if m is None:
raise error("syntax error in AFM file: " + repr(line))
pos = m.regs[1][1]
word = line[:pos]
rest = line[pos:].strip()
if word in self._keywords:
continue
if word == "C":
self.parsechar(rest)
elif word == "KPX":
self.parsekernpair(rest)
elif word == "CC":
self.parsecomposite(rest)
else:
self.parseattr(word, rest)
def parsechar(self, rest):
m = charRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
things = []
for fr, to in m.regs[1:]:
things.append(rest[fr:to])
charname = things[2]
del things[2]
charnum, width, l, b, r, t = (int(thing) for thing in things)
self._chars[charname] = charnum, width, (l, b, r, t)
def parsekernpair(self, rest):
m = kernRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
things = []
for fr, to in m.regs[1:]:
things.append(rest[fr:to])
leftchar, rightchar, value = things
value = int(value)
self._kerning[(leftchar, rightchar)] = value
def parseattr(self, word, rest):
if word == "FontBBox":
l, b, r, t = [int(thing) for thing in rest.split()]
self._attrs[word] = l, b, r, t
elif word == "Comment":
self._comments.append(rest)
else:
try:
value = int(rest)
except (ValueError, OverflowError):
self._attrs[word] = rest
else:
self._attrs[word] = value
def parsecomposite(self, rest):
m = compositeRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
charname = m.group(1)
ncomponents = int(m.group(2))
rest = rest[m.regs[0][1] :]
components = []
while True:
m = componentRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
basechar = m.group(1)
xoffset = int(m.group(2))
yoffset = int(m.group(3))
components.append((basechar, xoffset, yoffset))
rest = rest[m.regs[0][1] :]
if not rest:
break
assert len(components) == ncomponents
self._composites[charname] = components
def write(self, path, sep="\r"):
"""Writes out an AFM font to the given path."""
import time
lines = [
"StartFontMetrics 2.0",
"Comment Generated by afmLib; at %s"
% (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time()))),
]
# write comments, assuming (possibly wrongly!) they should
# all appear at the top
for comment in self._comments:
lines.append("Comment " + comment)
# write attributes, first the ones we know about, in
# a preferred order
attrs = self._attrs
for attr in preferredAttributeOrder:
if attr in attrs:
value = attrs[attr]
if attr == "FontBBox":
value = "%s %s %s %s" % value
lines.append(attr + " " + str(value))
# then write the attributes we don't know about,
# in alphabetical order
items = sorted(attrs.items())
for attr, value in items:
if attr in preferredAttributeOrder:
continue
lines.append(attr + " " + str(value))
# write char metrics
lines.append("StartCharMetrics " + repr(len(self._chars)))
items = [
(charnum, (charname, width, box))
for charname, (charnum, width, box) in self._chars.items()
]
def myKey(a):
"""Custom key function to make sure unencoded chars (-1)
end up at the end of the list after sorting."""
if a[0] == -1:
a = (0xFFFF,) + a[1:] # 0xffff is an arbitrary large number
return a
items.sort(key=myKey)
for charnum, (charname, width, (l, b, r, t)) in items:
lines.append(
"C %d ; WX %d ; N %s ; B %d %d %d %d ;"
% (charnum, width, charname, l, b, r, t)
)
lines.append("EndCharMetrics")
# write kerning info
lines.append("StartKernData")
lines.append("StartKernPairs " + repr(len(self._kerning)))
items = sorted(self._kerning.items())
for (leftchar, rightchar), value in items:
lines.append("KPX %s %s %d" % (leftchar, rightchar, value))
lines.append("EndKernPairs")
lines.append("EndKernData")
if self._composites:
composites = sorted(self._composites.items())
lines.append("StartComposites %s" % len(self._composites))
for charname, components in composites:
line = "CC %s %s ;" % (charname, len(components))
for basechar, xoffset, yoffset in components:
line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset)
lines.append(line)
lines.append("EndComposites")
lines.append("EndFontMetrics")
writelines(path, lines, sep)
def has_kernpair(self, pair):
"""Returns `True` if the given glyph pair (specified as a tuple) exists
in the kerning dictionary."""
return pair in self._kerning
def kernpairs(self):
"""Returns a list of all kern pairs in the kerning dictionary."""
return list(self._kerning.keys())
def has_char(self, char):
"""Returns `True` if the given glyph exists in the font."""
return char in self._chars
def chars(self):
"""Returns a list of all glyph names in the font."""
return list(self._chars.keys())
def comments(self):
"""Returns all comments from the file."""
return self._comments
def addComment(self, comment):
"""Adds a new comment to the file."""
self._comments.append(comment)
def addComposite(self, glyphName, components):
"""Specifies that the glyph `glyphName` is made up of the given components.
The components list should be of the following form::
[
(glyphname, xOffset, yOffset),
...
]
"""
self._composites[glyphName] = components
def __getattr__(self, attr):
if attr in self._attrs:
return self._attrs[attr]
else:
raise AttributeError(attr)
def __setattr__(self, attr, value):
# all attrs *not* starting with "_" are consider to be AFM keywords
if attr[:1] == "_":
self.__dict__[attr] = value
else:
self._attrs[attr] = value
def __delattr__(self, attr):
# all attrs *not* starting with "_" are consider to be AFM keywords
if attr[:1] == "_":
try:
del self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
else:
try:
del self._attrs[attr]
except KeyError:
raise AttributeError(attr)
def __getitem__(self, key):
if isinstance(key, tuple):
# key is a tuple, return the kernpair
return self._kerning[key]
else:
# return the metrics instead
return self._chars[key]
def __setitem__(self, key, value):
if isinstance(key, tuple):
# key is a tuple, set kernpair
self._kerning[key] = value
else:
# set char metrics
self._chars[key] = value
def __delitem__(self, key):
if isinstance(key, tuple):
# key is a tuple, del kernpair
del self._kerning[key]
else:
# del char metrics
del self._chars[key]
def __repr__(self):
if hasattr(self, "FullName"):
return "<AFM object for %s>" % self.FullName
else:
return "<AFM object at %x>" % id(self)
def readlines(path):
with open(path, "r", encoding="ascii") as f:
data = f.read()
return data.splitlines()
def writelines(path, lines, sep="\r"):
with open(path, "w", encoding="ascii", newline=sep) as f:
f.write("\n".join(lines) + "\n")
if __name__ == "__main__":
import EasyDialogs
path = EasyDialogs.AskFileForOpen()
if path:
afm = AFM(path)
char = "A"
if afm.has_char(char):
print(afm[char]) # print charnum, width and boundingbox
pair = ("A", "V")
if afm.has_kernpair(pair):
print(afm[pair]) # print kerning value for pair
print(afm.Version) # various other afm entries have become attributes
print(afm.Weight)
# afm.comments() returns a list of all Comment lines found in the AFM
print(afm.comments())
# print afm.chars()
# print afm.kernpairs()
print(afm)
afm.write(path + ".muck")
PK aZZZ�]�O� O� fontTools/agl.py# -*- coding: utf-8 -*-
# The tables below are taken from
# https://github.com/adobe-type-tools/agl-aglfn/raw/4036a9ca80a62f64f9de4f7321a9a045ad0ecfd6/glyphlist.txt
# and
# https://github.com/adobe-type-tools/agl-aglfn/raw/4036a9ca80a62f64f9de4f7321a9a045ad0ecfd6/aglfn.txt
"""
Interface to the Adobe Glyph List
This module exists to convert glyph names from the Adobe Glyph List
to their Unicode equivalents. Example usage:
>>> from fontTools.agl import toUnicode
>>> toUnicode("nahiragana")
'な'
It also contains two dictionaries, ``UV2AGL`` and ``AGL2UV``, which map from
Unicode codepoints to AGL names and vice versa:
>>> import fontTools
>>> fontTools.agl.UV2AGL[ord("?")]
'question'
>>> fontTools.agl.AGL2UV["wcircumflex"]
373
This is used by fontTools when it has to construct glyph names for a font which
doesn't include any (e.g. format 3.0 post tables).
"""
from fontTools.misc.textTools import tostr
import re
_aglText = """\
# -----------------------------------------------------------
# Copyright 2002-2019 Adobe (http://www.adobe.com/).
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# Neither the name of Adobe nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------
# Name: Adobe Glyph List
# Table version: 2.0
# Date: September 20, 2002
# URL: https://github.com/adobe-type-tools/agl-aglfn
#
# Format: two semicolon-delimited fields:
# (1) glyph name--upper/lowercase letters and digits
# (2) Unicode scalar value--four uppercase hexadecimal digits
#
A;0041
AE;00C6
AEacute;01FC
AEmacron;01E2
AEsmall;F7E6
Aacute;00C1
Aacutesmall;F7E1
Abreve;0102
Abreveacute;1EAE
Abrevecyrillic;04D0
Abrevedotbelow;1EB6
Abrevegrave;1EB0
Abrevehookabove;1EB2
Abrevetilde;1EB4
Acaron;01CD
Acircle;24B6
Acircumflex;00C2
Acircumflexacute;1EA4
Acircumflexdotbelow;1EAC
Acircumflexgrave;1EA6
Acircumflexhookabove;1EA8
Acircumflexsmall;F7E2
Acircumflextilde;1EAA
Acute;F6C9
Acutesmall;F7B4
Acyrillic;0410
Adblgrave;0200
Adieresis;00C4
Adieresiscyrillic;04D2
Adieresismacron;01DE
Adieresissmall;F7E4
Adotbelow;1EA0
Adotmacron;01E0
Agrave;00C0
Agravesmall;F7E0
Ahookabove;1EA2
Aiecyrillic;04D4
Ainvertedbreve;0202
Alpha;0391
Alphatonos;0386
Amacron;0100
Amonospace;FF21
Aogonek;0104
Aring;00C5
Aringacute;01FA
Aringbelow;1E00
Aringsmall;F7E5
Asmall;F761
Atilde;00C3
Atildesmall;F7E3
Aybarmenian;0531
B;0042
Bcircle;24B7
Bdotaccent;1E02
Bdotbelow;1E04
Becyrillic;0411
Benarmenian;0532
Beta;0392
Bhook;0181
Blinebelow;1E06
Bmonospace;FF22
Brevesmall;F6F4
Bsmall;F762
Btopbar;0182
C;0043
Caarmenian;053E
Cacute;0106
Caron;F6CA
Caronsmall;F6F5
Ccaron;010C
Ccedilla;00C7
Ccedillaacute;1E08
Ccedillasmall;F7E7
Ccircle;24B8
Ccircumflex;0108
Cdot;010A
Cdotaccent;010A
Cedillasmall;F7B8
Chaarmenian;0549
Cheabkhasiancyrillic;04BC
Checyrillic;0427
Chedescenderabkhasiancyrillic;04BE
Chedescendercyrillic;04B6
Chedieresiscyrillic;04F4
Cheharmenian;0543
Chekhakassiancyrillic;04CB
Cheverticalstrokecyrillic;04B8
Chi;03A7
Chook;0187
Circumflexsmall;F6F6
Cmonospace;FF23
Coarmenian;0551
Csmall;F763
D;0044
DZ;01F1
DZcaron;01C4
Daarmenian;0534
Dafrican;0189
Dcaron;010E
Dcedilla;1E10
Dcircle;24B9
Dcircumflexbelow;1E12
Dcroat;0110
Ddotaccent;1E0A
Ddotbelow;1E0C
Decyrillic;0414
Deicoptic;03EE
Delta;2206
Deltagreek;0394
Dhook;018A
Dieresis;F6CB
DieresisAcute;F6CC
DieresisGrave;F6CD
Dieresissmall;F7A8
Digammagreek;03DC
Djecyrillic;0402
Dlinebelow;1E0E
Dmonospace;FF24
Dotaccentsmall;F6F7
Dslash;0110
Dsmall;F764
Dtopbar;018B
Dz;01F2
Dzcaron;01C5
Dzeabkhasiancyrillic;04E0
Dzecyrillic;0405
Dzhecyrillic;040F
E;0045
Eacute;00C9
Eacutesmall;F7E9
Ebreve;0114
Ecaron;011A
Ecedillabreve;1E1C
Echarmenian;0535
Ecircle;24BA
Ecircumflex;00CA
Ecircumflexacute;1EBE
Ecircumflexbelow;1E18
Ecircumflexdotbelow;1EC6
Ecircumflexgrave;1EC0
Ecircumflexhookabove;1EC2
Ecircumflexsmall;F7EA
Ecircumflextilde;1EC4
Ecyrillic;0404
Edblgrave;0204
Edieresis;00CB
Edieresissmall;F7EB
Edot;0116
Edotaccent;0116
Edotbelow;1EB8
Efcyrillic;0424
Egrave;00C8
Egravesmall;F7E8
Eharmenian;0537
Ehookabove;1EBA
Eightroman;2167
Einvertedbreve;0206
Eiotifiedcyrillic;0464
Elcyrillic;041B
Elevenroman;216A
Emacron;0112
Emacronacute;1E16
Emacrongrave;1E14
Emcyrillic;041C
Emonospace;FF25
Encyrillic;041D
Endescendercyrillic;04A2
Eng;014A
Enghecyrillic;04A4
Enhookcyrillic;04C7
Eogonek;0118
Eopen;0190
Epsilon;0395
Epsilontonos;0388
Ercyrillic;0420
Ereversed;018E
Ereversedcyrillic;042D
Escyrillic;0421
Esdescendercyrillic;04AA
Esh;01A9
Esmall;F765
Eta;0397
Etarmenian;0538
Etatonos;0389
Eth;00D0
Ethsmall;F7F0
Etilde;1EBC
Etildebelow;1E1A
Euro;20AC
Ezh;01B7
Ezhcaron;01EE
Ezhreversed;01B8
F;0046
Fcircle;24BB
Fdotaccent;1E1E
Feharmenian;0556
Feicoptic;03E4
Fhook;0191
Fitacyrillic;0472
Fiveroman;2164
Fmonospace;FF26
Fourroman;2163
Fsmall;F766
G;0047
GBsquare;3387
Gacute;01F4
Gamma;0393
Gammaafrican;0194
Gangiacoptic;03EA
Gbreve;011E
Gcaron;01E6
Gcedilla;0122
Gcircle;24BC
Gcircumflex;011C
Gcommaaccent;0122
Gdot;0120
Gdotaccent;0120
Gecyrillic;0413
Ghadarmenian;0542
Ghemiddlehookcyrillic;0494
Ghestrokecyrillic;0492
Gheupturncyrillic;0490
Ghook;0193
Gimarmenian;0533
Gjecyrillic;0403
Gmacron;1E20
Gmonospace;FF27
Grave;F6CE
Gravesmall;F760
Gsmall;F767
Gsmallhook;029B
Gstroke;01E4
H;0048
H18533;25CF
H18543;25AA
H18551;25AB
H22073;25A1
HPsquare;33CB
Haabkhasiancyrillic;04A8
Hadescendercyrillic;04B2
Hardsigncyrillic;042A
Hbar;0126
Hbrevebelow;1E2A
Hcedilla;1E28
Hcircle;24BD
Hcircumflex;0124
Hdieresis;1E26
Hdotaccent;1E22
Hdotbelow;1E24
Hmonospace;FF28
Hoarmenian;0540
Horicoptic;03E8
Hsmall;F768
Hungarumlaut;F6CF
Hungarumlautsmall;F6F8
Hzsquare;3390
I;0049
IAcyrillic;042F
IJ;0132
IUcyrillic;042E
Iacute;00CD
Iacutesmall;F7ED
Ibreve;012C
Icaron;01CF
Icircle;24BE
Icircumflex;00CE
Icircumflexsmall;F7EE
Icyrillic;0406
Idblgrave;0208
Idieresis;00CF
Idieresisacute;1E2E
Idieresiscyrillic;04E4
Idieresissmall;F7EF
Idot;0130
Idotaccent;0130
Idotbelow;1ECA
Iebrevecyrillic;04D6
Iecyrillic;0415
Ifraktur;2111
Igrave;00CC
Igravesmall;F7EC
Ihookabove;1EC8
Iicyrillic;0418
Iinvertedbreve;020A
Iishortcyrillic;0419
Imacron;012A
Imacroncyrillic;04E2
Imonospace;FF29
Iniarmenian;053B
Iocyrillic;0401
Iogonek;012E
Iota;0399
Iotaafrican;0196
Iotadieresis;03AA
Iotatonos;038A
Ismall;F769
Istroke;0197
Itilde;0128
Itildebelow;1E2C
Izhitsacyrillic;0474
Izhitsadblgravecyrillic;0476
J;004A
Jaarmenian;0541
Jcircle;24BF
Jcircumflex;0134
Jecyrillic;0408
Jheharmenian;054B
Jmonospace;FF2A
Jsmall;F76A
K;004B
KBsquare;3385
KKsquare;33CD
Kabashkircyrillic;04A0
Kacute;1E30
Kacyrillic;041A
Kadescendercyrillic;049A
Kahookcyrillic;04C3
Kappa;039A
Kastrokecyrillic;049E
Kaverticalstrokecyrillic;049C
Kcaron;01E8
Kcedilla;0136
Kcircle;24C0
Kcommaaccent;0136
Kdotbelow;1E32
Keharmenian;0554
Kenarmenian;053F
Khacyrillic;0425
Kheicoptic;03E6
Khook;0198
Kjecyrillic;040C
Klinebelow;1E34
Kmonospace;FF2B
Koppacyrillic;0480
Koppagreek;03DE
Ksicyrillic;046E
Ksmall;F76B
L;004C
LJ;01C7
LL;F6BF
Lacute;0139
Lambda;039B
Lcaron;013D
Lcedilla;013B
Lcircle;24C1
Lcircumflexbelow;1E3C
Lcommaaccent;013B
Ldot;013F
Ldotaccent;013F
Ldotbelow;1E36
Ldotbelowmacron;1E38
Liwnarmenian;053C
Lj;01C8
Ljecyrillic;0409
Llinebelow;1E3A
Lmonospace;FF2C
Lslash;0141
Lslashsmall;F6F9
Lsmall;F76C
M;004D
MBsquare;3386
Macron;F6D0
Macronsmall;F7AF
Macute;1E3E
Mcircle;24C2
Mdotaccent;1E40
Mdotbelow;1E42
Menarmenian;0544
Mmonospace;FF2D
Msmall;F76D
Mturned;019C
Mu;039C
N;004E
NJ;01CA
Nacute;0143
Ncaron;0147
Ncedilla;0145
Ncircle;24C3
Ncircumflexbelow;1E4A
Ncommaaccent;0145
Ndotaccent;1E44
Ndotbelow;1E46
Nhookleft;019D
Nineroman;2168
Nj;01CB
Njecyrillic;040A
Nlinebelow;1E48
Nmonospace;FF2E
Nowarmenian;0546
Nsmall;F76E
Ntilde;00D1
Ntildesmall;F7F1
Nu;039D
O;004F
OE;0152
OEsmall;F6FA
Oacute;00D3
Oacutesmall;F7F3
Obarredcyrillic;04E8
Obarreddieresiscyrillic;04EA
Obreve;014E
Ocaron;01D1
Ocenteredtilde;019F
Ocircle;24C4
Ocircumflex;00D4
Ocircumflexacute;1ED0
Ocircumflexdotbelow;1ED8
Ocircumflexgrave;1ED2
Ocircumflexhookabove;1ED4
Ocircumflexsmall;F7F4
Ocircumflextilde;1ED6
Ocyrillic;041E
Odblacute;0150
Odblgrave;020C
Odieresis;00D6
Odieresiscyrillic;04E6
Odieresissmall;F7F6
Odotbelow;1ECC
Ogoneksmall;F6FB
Ograve;00D2
Ogravesmall;F7F2
Oharmenian;0555
Ohm;2126
Ohookabove;1ECE
Ohorn;01A0
Ohornacute;1EDA
Ohorndotbelow;1EE2
Ohorngrave;1EDC
Ohornhookabove;1EDE
Ohorntilde;1EE0
Ohungarumlaut;0150
Oi;01A2
Oinvertedbreve;020E
Omacron;014C
Omacronacute;1E52
Omacrongrave;1E50
Omega;2126
Omegacyrillic;0460
Omegagreek;03A9
Omegaroundcyrillic;047A
Omegatitlocyrillic;047C
Omegatonos;038F
Omicron;039F
Omicrontonos;038C
Omonospace;FF2F
Oneroman;2160
Oogonek;01EA
Oogonekmacron;01EC
Oopen;0186
Oslash;00D8
Oslashacute;01FE
Oslashsmall;F7F8
Osmall;F76F
Ostrokeacute;01FE
Otcyrillic;047E
Otilde;00D5
Otildeacute;1E4C
Otildedieresis;1E4E
Otildesmall;F7F5
P;0050
Pacute;1E54
Pcircle;24C5
Pdotaccent;1E56
Pecyrillic;041F
Peharmenian;054A
Pemiddlehookcyrillic;04A6
Phi;03A6
Phook;01A4
Pi;03A0
Piwrarmenian;0553
Pmonospace;FF30
Psi;03A8
Psicyrillic;0470
Psmall;F770
Q;0051
Qcircle;24C6
Qmonospace;FF31
Qsmall;F771
R;0052
Raarmenian;054C
Racute;0154
Rcaron;0158
Rcedilla;0156
Rcircle;24C7
Rcommaaccent;0156
Rdblgrave;0210
Rdotaccent;1E58
Rdotbelow;1E5A
Rdotbelowmacron;1E5C
Reharmenian;0550
Rfraktur;211C
Rho;03A1
Ringsmall;F6FC
Rinvertedbreve;0212
Rlinebelow;1E5E
Rmonospace;FF32
Rsmall;F772
Rsmallinverted;0281
Rsmallinvertedsuperior;02B6
S;0053
SF010000;250C
SF020000;2514
SF030000;2510
SF040000;2518
SF050000;253C
SF060000;252C
SF070000;2534
SF080000;251C
SF090000;2524
SF100000;2500
SF110000;2502
SF190000;2561
SF200000;2562
SF210000;2556
SF220000;2555
SF230000;2563
SF240000;2551
SF250000;2557
SF260000;255D
SF270000;255C
SF280000;255B
SF360000;255E
SF370000;255F
SF380000;255A
SF390000;2554
SF400000;2569
SF410000;2566
SF420000;2560
SF430000;2550
SF440000;256C
SF450000;2567
SF460000;2568
SF470000;2564
SF480000;2565
SF490000;2559
SF500000;2558
SF510000;2552
SF520000;2553
SF530000;256B
SF540000;256A
Sacute;015A
Sacutedotaccent;1E64
Sampigreek;03E0
Scaron;0160
Scarondotaccent;1E66
Scaronsmall;F6FD
Scedilla;015E
Schwa;018F
Schwacyrillic;04D8
Schwadieresiscyrillic;04DA
Scircle;24C8
Scircumflex;015C
Scommaaccent;0218
Sdotaccent;1E60
Sdotbelow;1E62
Sdotbelowdotaccent;1E68
Seharmenian;054D
Sevenroman;2166
Shaarmenian;0547
Shacyrillic;0428
Shchacyrillic;0429
Sheicoptic;03E2
Shhacyrillic;04BA
Shimacoptic;03EC
Sigma;03A3
Sixroman;2165
Smonospace;FF33
Softsigncyrillic;042C
Ssmall;F773
Stigmagreek;03DA
T;0054
Tau;03A4
Tbar;0166
Tcaron;0164
Tcedilla;0162
Tcircle;24C9
Tcircumflexbelow;1E70
Tcommaaccent;0162
Tdotaccent;1E6A
Tdotbelow;1E6C
Tecyrillic;0422
Tedescendercyrillic;04AC
Tenroman;2169
Tetsecyrillic;04B4
Theta;0398
Thook;01AC
Thorn;00DE
Thornsmall;F7FE
Threeroman;2162
Tildesmall;F6FE
Tiwnarmenian;054F
Tlinebelow;1E6E
Tmonospace;FF34
Toarmenian;0539
Tonefive;01BC
Tonesix;0184
Tonetwo;01A7
Tretroflexhook;01AE
Tsecyrillic;0426
Tshecyrillic;040B
Tsmall;F774
Twelveroman;216B
Tworoman;2161
U;0055
Uacute;00DA
Uacutesmall;F7FA
Ubreve;016C
Ucaron;01D3
Ucircle;24CA
Ucircumflex;00DB
Ucircumflexbelow;1E76
Ucircumflexsmall;F7FB
Ucyrillic;0423
Udblacute;0170
Udblgrave;0214
Udieresis;00DC
Udieresisacute;01D7
Udieresisbelow;1E72
Udieresiscaron;01D9
Udieresiscyrillic;04F0
Udieresisgrave;01DB
Udieresismacron;01D5
Udieresissmall;F7FC
Udotbelow;1EE4
Ugrave;00D9
Ugravesmall;F7F9
Uhookabove;1EE6
Uhorn;01AF
Uhornacute;1EE8
Uhorndotbelow;1EF0
Uhorngrave;1EEA
Uhornhookabove;1EEC
Uhorntilde;1EEE
Uhungarumlaut;0170
Uhungarumlautcyrillic;04F2
Uinvertedbreve;0216
Ukcyrillic;0478
Umacron;016A
Umacroncyrillic;04EE
Umacrondieresis;1E7A
Umonospace;FF35
Uogonek;0172
Upsilon;03A5
Upsilon1;03D2
Upsilonacutehooksymbolgreek;03D3
Upsilonafrican;01B1
Upsilondieresis;03AB
Upsilondieresishooksymbolgreek;03D4
Upsilonhooksymbol;03D2
Upsilontonos;038E
Uring;016E
Ushortcyrillic;040E
Usmall;F775
Ustraightcyrillic;04AE
Ustraightstrokecyrillic;04B0
Utilde;0168
Utildeacute;1E78
Utildebelow;1E74
V;0056
Vcircle;24CB
Vdotbelow;1E7E
Vecyrillic;0412
Vewarmenian;054E
Vhook;01B2
Vmonospace;FF36
Voarmenian;0548
Vsmall;F776
Vtilde;1E7C
W;0057
Wacute;1E82
Wcircle;24CC
Wcircumflex;0174
Wdieresis;1E84
Wdotaccent;1E86
Wdotbelow;1E88
Wgrave;1E80
Wmonospace;FF37
Wsmall;F777
X;0058
Xcircle;24CD
Xdieresis;1E8C
Xdotaccent;1E8A
Xeharmenian;053D
Xi;039E
Xmonospace;FF38
Xsmall;F778
Y;0059
Yacute;00DD
Yacutesmall;F7FD
Yatcyrillic;0462
Ycircle;24CE
Ycircumflex;0176
Ydieresis;0178
Ydieresissmall;F7FF
Ydotaccent;1E8E
Ydotbelow;1EF4
Yericyrillic;042B
Yerudieresiscyrillic;04F8
Ygrave;1EF2
Yhook;01B3
Yhookabove;1EF6
Yiarmenian;0545
Yicyrillic;0407
Yiwnarmenian;0552
Ymonospace;FF39
Ysmall;F779
Ytilde;1EF8
Yusbigcyrillic;046A
Yusbigiotifiedcyrillic;046C
Yuslittlecyrillic;0466
Yuslittleiotifiedcyrillic;0468
Z;005A
Zaarmenian;0536
Zacute;0179
Zcaron;017D
Zcaronsmall;F6FF
Zcircle;24CF
Zcircumflex;1E90
Zdot;017B
Zdotaccent;017B
Zdotbelow;1E92
Zecyrillic;0417
Zedescendercyrillic;0498
Zedieresiscyrillic;04DE
Zeta;0396
Zhearmenian;053A
Zhebrevecyrillic;04C1
Zhecyrillic;0416
Zhedescendercyrillic;0496
Zhedieresiscyrillic;04DC
Zlinebelow;1E94
Zmonospace;FF3A
Zsmall;F77A
Zstroke;01B5
a;0061
aabengali;0986
aacute;00E1
aadeva;0906
aagujarati;0A86
aagurmukhi;0A06
aamatragurmukhi;0A3E
aarusquare;3303
aavowelsignbengali;09BE
aavowelsigndeva;093E
aavowelsigngujarati;0ABE
abbreviationmarkarmenian;055F
abbreviationsigndeva;0970
abengali;0985
abopomofo;311A
abreve;0103
abreveacute;1EAF
abrevecyrillic;04D1
abrevedotbelow;1EB7
abrevegrave;1EB1
abrevehookabove;1EB3
abrevetilde;1EB5
acaron;01CE
acircle;24D0
acircumflex;00E2
acircumflexacute;1EA5
acircumflexdotbelow;1EAD
acircumflexgrave;1EA7
acircumflexhookabove;1EA9
acircumflextilde;1EAB
acute;00B4
acutebelowcmb;0317
acutecmb;0301
acutecomb;0301
acutedeva;0954
acutelowmod;02CF
acutetonecmb;0341
acyrillic;0430
adblgrave;0201
addakgurmukhi;0A71
adeva;0905
adieresis;00E4
adieresiscyrillic;04D3
adieresismacron;01DF
adotbelow;1EA1
adotmacron;01E1
ae;00E6
aeacute;01FD
aekorean;3150
aemacron;01E3
afii00208;2015
afii08941;20A4
afii10017;0410
afii10018;0411
afii10019;0412
afii10020;0413
afii10021;0414
afii10022;0415
afii10023;0401
afii10024;0416
afii10025;0417
afii10026;0418
afii10027;0419
afii10028;041A
afii10029;041B
afii10030;041C
afii10031;041D
afii10032;041E
afii10033;041F
afii10034;0420
afii10035;0421
afii10036;0422
afii10037;0423
afii10038;0424
afii10039;0425
afii10040;0426
afii10041;0427
afii10042;0428
afii10043;0429
afii10044;042A
afii10045;042B
afii10046;042C
afii10047;042D
afii10048;042E
afii10049;042F
afii10050;0490
afii10051;0402
afii10052;0403
afii10053;0404
afii10054;0405
afii10055;0406
afii10056;0407
afii10057;0408
afii10058;0409
afii10059;040A
afii10060;040B
afii10061;040C
afii10062;040E
afii10063;F6C4
afii10064;F6C5
afii10065;0430
afii10066;0431
afii10067;0432
afii10068;0433
afii10069;0434
afii10070;0435
afii10071;0451
afii10072;0436
afii10073;0437
afii10074;0438
afii10075;0439
afii10076;043A
afii10077;043B
afii10078;043C
afii10079;043D
afii10080;043E
afii10081;043F
afii10082;0440
afii10083;0441
afii10084;0442
afii10085;0443
afii10086;0444
afii10087;0445
afii10088;0446
afii10089;0447
afii10090;0448
afii10091;0449
afii10092;044A
afii10093;044B
afii10094;044C
afii10095;044D
afii10096;044E
afii10097;044F
afii10098;0491
afii10099;0452
afii10100;0453
afii10101;0454
afii10102;0455
afii10103;0456
afii10104;0457
afii10105;0458
afii10106;0459
afii10107;045A
afii10108;045B
afii10109;045C
afii10110;045E
afii10145;040F
afii10146;0462
afii10147;0472
afii10148;0474
afii10192;F6C6
afii10193;045F
afii10194;0463
afii10195;0473
afii10196;0475
afii10831;F6C7
afii10832;F6C8
afii10846;04D9
afii299;200E
afii300;200F
afii301;200D
afii57381;066A
afii57388;060C
afii57392;0660
afii57393;0661
afii57394;0662
afii57395;0663
afii57396;0664
afii57397;0665
afii57398;0666
afii57399;0667
afii57400;0668
afii57401;0669
afii57403;061B
afii57407;061F
afii57409;0621
afii57410;0622
afii57411;0623
afii57412;0624
afii57413;0625
afii57414;0626
afii57415;0627
afii57416;0628
afii57417;0629
afii57418;062A
afii57419;062B
afii57420;062C
afii57421;062D
afii57422;062E
afii57423;062F
afii57424;0630
afii57425;0631
afii57426;0632
afii57427;0633
afii57428;0634
afii57429;0635
afii57430;0636
afii57431;0637
afii57432;0638
afii57433;0639
afii57434;063A
afii57440;0640
afii57441;0641
afii57442;0642
afii57443;0643
afii57444;0644
afii57445;0645
afii57446;0646
afii57448;0648
afii57449;0649
afii57450;064A
afii57451;064B
afii57452;064C
afii57453;064D
afii57454;064E
afii57455;064F
afii57456;0650
afii57457;0651
afii57458;0652
afii57470;0647
afii57505;06A4
afii57506;067E
afii57507;0686
afii57508;0698
afii57509;06AF
afii57511;0679
afii57512;0688
afii57513;0691
afii57514;06BA
afii57519;06D2
afii57534;06D5
afii57636;20AA
afii57645;05BE
afii57658;05C3
afii57664;05D0
afii57665;05D1
afii57666;05D2
afii57667;05D3
afii57668;05D4
afii57669;05D5
afii57670;05D6
afii57671;05D7
afii57672;05D8
afii57673;05D9
afii57674;05DA
afii57675;05DB
afii57676;05DC
afii57677;05DD
afii57678;05DE
afii57679;05DF
afii57680;05E0
afii57681;05E1
afii57682;05E2
afii57683;05E3
afii57684;05E4
afii57685;05E5
afii57686;05E6
afii57687;05E7
afii57688;05E8
afii57689;05E9
afii57690;05EA
afii57694;FB2A
afii57695;FB2B
afii57700;FB4B
afii57705;FB1F
afii57716;05F0
afii57717;05F1
afii57718;05F2
afii57723;FB35
afii57793;05B4
afii57794;05B5
afii57795;05B6
afii57796;05BB
afii57797;05B8
afii57798;05B7
afii57799;05B0
afii57800;05B2
afii57801;05B1
afii57802;05B3
afii57803;05C2
afii57804;05C1
afii57806;05B9
afii57807;05BC
afii57839;05BD
afii57841;05BF
afii57842;05C0
afii57929;02BC
afii61248;2105
afii61289;2113
afii61352;2116
afii61573;202C
afii61574;202D
afii61575;202E
afii61664;200C
afii63167;066D
afii64937;02BD
agrave;00E0
agujarati;0A85
agurmukhi;0A05
ahiragana;3042
ahookabove;1EA3
aibengali;0990
aibopomofo;311E
aideva;0910
aiecyrillic;04D5
aigujarati;0A90
aigurmukhi;0A10
aimatragurmukhi;0A48
ainarabic;0639
ainfinalarabic;FECA
aininitialarabic;FECB
ainmedialarabic;FECC
ainvertedbreve;0203
aivowelsignbengali;09C8
aivowelsigndeva;0948
aivowelsigngujarati;0AC8
akatakana;30A2
akatakanahalfwidth;FF71
akorean;314F
alef;05D0
alefarabic;0627
alefdageshhebrew;FB30
aleffinalarabic;FE8E
alefhamzaabovearabic;0623
alefhamzaabovefinalarabic;FE84
alefhamzabelowarabic;0625
alefhamzabelowfinalarabic;FE88
alefhebrew;05D0
aleflamedhebrew;FB4F
alefmaddaabovearabic;0622
alefmaddaabovefinalarabic;FE82
alefmaksuraarabic;0649
alefmaksurafinalarabic;FEF0
alefmaksurainitialarabic;FEF3
alefmaksuramedialarabic;FEF4
alefpatahhebrew;FB2E
alefqamatshebrew;FB2F
aleph;2135
allequal;224C
alpha;03B1
alphatonos;03AC
amacron;0101
amonospace;FF41
ampersand;0026
ampersandmonospace;FF06
ampersandsmall;F726
amsquare;33C2
anbopomofo;3122
angbopomofo;3124
angkhankhuthai;0E5A
angle;2220
anglebracketleft;3008
anglebracketleftvertical;FE3F
anglebracketright;3009
anglebracketrightvertical;FE40
angleleft;2329
angleright;232A
angstrom;212B
anoteleia;0387
anudattadeva;0952
anusvarabengali;0982
anusvaradeva;0902
anusvaragujarati;0A82
aogonek;0105
apaatosquare;3300
aparen;249C
apostrophearmenian;055A
apostrophemod;02BC
apple;F8FF
approaches;2250
approxequal;2248
approxequalorimage;2252
approximatelyequal;2245
araeaekorean;318E
araeakorean;318D
arc;2312
arighthalfring;1E9A
aring;00E5
aringacute;01FB
aringbelow;1E01
arrowboth;2194
arrowdashdown;21E3
arrowdashleft;21E0
arrowdashright;21E2
arrowdashup;21E1
arrowdblboth;21D4
arrowdbldown;21D3
arrowdblleft;21D0
arrowdblright;21D2
arrowdblup;21D1
arrowdown;2193
arrowdownleft;2199
arrowdownright;2198
arrowdownwhite;21E9
arrowheaddownmod;02C5
arrowheadleftmod;02C2
arrowheadrightmod;02C3
arrowheadupmod;02C4
arrowhorizex;F8E7
arrowleft;2190
arrowleftdbl;21D0
arrowleftdblstroke;21CD
arrowleftoverright;21C6
arrowleftwhite;21E6
arrowright;2192
arrowrightdblstroke;21CF
arrowrightheavy;279E
arrowrightoverleft;21C4
arrowrightwhite;21E8
arrowtableft;21E4
arrowtabright;21E5
arrowup;2191
arrowupdn;2195
arrowupdnbse;21A8
arrowupdownbase;21A8
arrowupleft;2196
arrowupleftofdown;21C5
arrowupright;2197
arrowupwhite;21E7
arrowvertex;F8E6
asciicircum;005E
asciicircummonospace;FF3E
asciitilde;007E
asciitildemonospace;FF5E
ascript;0251
ascriptturned;0252
asmallhiragana;3041
asmallkatakana;30A1
asmallkatakanahalfwidth;FF67
asterisk;002A
asteriskaltonearabic;066D
asteriskarabic;066D
asteriskmath;2217
asteriskmonospace;FF0A
asterisksmall;FE61
asterism;2042
asuperior;F6E9
asymptoticallyequal;2243
at;0040
atilde;00E3
atmonospace;FF20
atsmall;FE6B
aturned;0250
aubengali;0994
aubopomofo;3120
audeva;0914
augujarati;0A94
augurmukhi;0A14
aulengthmarkbengali;09D7
aumatragurmukhi;0A4C
auvowelsignbengali;09CC
auvowelsigndeva;094C
auvowelsigngujarati;0ACC
avagrahadeva;093D
aybarmenian;0561
ayin;05E2
ayinaltonehebrew;FB20
ayinhebrew;05E2
b;0062
babengali;09AC
backslash;005C
backslashmonospace;FF3C
badeva;092C
bagujarati;0AAC
bagurmukhi;0A2C
bahiragana;3070
bahtthai;0E3F
bakatakana;30D0
bar;007C
barmonospace;FF5C
bbopomofo;3105
bcircle;24D1
bdotaccent;1E03
bdotbelow;1E05
beamedsixteenthnotes;266C
because;2235
becyrillic;0431
beharabic;0628
behfinalarabic;FE90
behinitialarabic;FE91
behiragana;3079
behmedialarabic;FE92
behmeeminitialarabic;FC9F
behmeemisolatedarabic;FC08
behnoonfinalarabic;FC6D
bekatakana;30D9
benarmenian;0562
bet;05D1
beta;03B2
betasymbolgreek;03D0
betdagesh;FB31
betdageshhebrew;FB31
bethebrew;05D1
betrafehebrew;FB4C
bhabengali;09AD
bhadeva;092D
bhagujarati;0AAD
bhagurmukhi;0A2D
bhook;0253
bihiragana;3073
bikatakana;30D3
bilabialclick;0298
bindigurmukhi;0A02
birusquare;3331
blackcircle;25CF
blackdiamond;25C6
blackdownpointingtriangle;25BC
blackleftpointingpointer;25C4
blackleftpointingtriangle;25C0
blacklenticularbracketleft;3010
blacklenticularbracketleftvertical;FE3B
blacklenticularbracketright;3011
blacklenticularbracketrightvertical;FE3C
blacklowerlefttriangle;25E3
blacklowerrighttriangle;25E2
blackrectangle;25AC
blackrightpointingpointer;25BA
blackrightpointingtriangle;25B6
blacksmallsquare;25AA
blacksmilingface;263B
blacksquare;25A0
blackstar;2605
blackupperlefttriangle;25E4
blackupperrighttriangle;25E5
blackuppointingsmalltriangle;25B4
blackuppointingtriangle;25B2
blank;2423
blinebelow;1E07
block;2588
bmonospace;FF42
bobaimaithai;0E1A
bohiragana;307C
bokatakana;30DC
bparen;249D
bqsquare;33C3
braceex;F8F4
braceleft;007B
braceleftbt;F8F3
braceleftmid;F8F2
braceleftmonospace;FF5B
braceleftsmall;FE5B
bracelefttp;F8F1
braceleftvertical;FE37
braceright;007D
bracerightbt;F8FE
bracerightmid;F8FD
bracerightmonospace;FF5D
bracerightsmall;FE5C
bracerighttp;F8FC
bracerightvertical;FE38
bracketleft;005B
bracketleftbt;F8F0
bracketleftex;F8EF
bracketleftmonospace;FF3B
bracketlefttp;F8EE
bracketright;005D
bracketrightbt;F8FB
bracketrightex;F8FA
bracketrightmonospace;FF3D
bracketrighttp;F8F9
breve;02D8
brevebelowcmb;032E
brevecmb;0306
breveinvertedbelowcmb;032F
breveinvertedcmb;0311
breveinverteddoublecmb;0361
bridgebelowcmb;032A
bridgeinvertedbelowcmb;033A
brokenbar;00A6
bstroke;0180
bsuperior;F6EA
btopbar;0183
buhiragana;3076
bukatakana;30D6
bullet;2022
bulletinverse;25D8
bulletoperator;2219
bullseye;25CE
c;0063
caarmenian;056E
cabengali;099A
cacute;0107
cadeva;091A
cagujarati;0A9A
cagurmukhi;0A1A
calsquare;3388
candrabindubengali;0981
candrabinducmb;0310
candrabindudeva;0901
candrabindugujarati;0A81
capslock;21EA
careof;2105
caron;02C7
caronbelowcmb;032C
caroncmb;030C
carriagereturn;21B5
cbopomofo;3118
ccaron;010D
ccedilla;00E7
ccedillaacute;1E09
ccircle;24D2
ccircumflex;0109
ccurl;0255
cdot;010B
cdotaccent;010B
cdsquare;33C5
cedilla;00B8
cedillacmb;0327
cent;00A2
centigrade;2103
centinferior;F6DF
centmonospace;FFE0
centoldstyle;F7A2
centsuperior;F6E0
chaarmenian;0579
chabengali;099B
chadeva;091B
chagujarati;0A9B
chagurmukhi;0A1B
chbopomofo;3114
cheabkhasiancyrillic;04BD
checkmark;2713
checyrillic;0447
chedescenderabkhasiancyrillic;04BF
chedescendercyrillic;04B7
chedieresiscyrillic;04F5
cheharmenian;0573
chekhakassiancyrillic;04CC
cheverticalstrokecyrillic;04B9
chi;03C7
chieuchacirclekorean;3277
chieuchaparenkorean;3217
chieuchcirclekorean;3269
chieuchkorean;314A
chieuchparenkorean;3209
chochangthai;0E0A
chochanthai;0E08
chochingthai;0E09
chochoethai;0E0C
chook;0188
cieucacirclekorean;3276
cieucaparenkorean;3216
cieuccirclekorean;3268
cieuckorean;3148
cieucparenkorean;3208
cieucuparenkorean;321C
circle;25CB
circlemultiply;2297
circleot;2299
circleplus;2295
circlepostalmark;3036
circlewithlefthalfblack;25D0
circlewithrighthalfblack;25D1
circumflex;02C6
circumflexbelowcmb;032D
circumflexcmb;0302
clear;2327
clickalveolar;01C2
clickdental;01C0
clicklateral;01C1
clickretroflex;01C3
club;2663
clubsuitblack;2663
clubsuitwhite;2667
cmcubedsquare;33A4
cmonospace;FF43
cmsquaredsquare;33A0
coarmenian;0581
colon;003A
colonmonetary;20A1
colonmonospace;FF1A
colonsign;20A1
colonsmall;FE55
colontriangularhalfmod;02D1
colontriangularmod;02D0
comma;002C
commaabovecmb;0313
commaaboverightcmb;0315
commaaccent;F6C3
commaarabic;060C
commaarmenian;055D
commainferior;F6E1
commamonospace;FF0C
commareversedabovecmb;0314
commareversedmod;02BD
commasmall;FE50
commasuperior;F6E2
commaturnedabovecmb;0312
commaturnedmod;02BB
compass;263C
congruent;2245
contourintegral;222E
control;2303
controlACK;0006
controlBEL;0007
controlBS;0008
controlCAN;0018
controlCR;000D
controlDC1;0011
controlDC2;0012
controlDC3;0013
controlDC4;0014
controlDEL;007F
controlDLE;0010
controlEM;0019
controlENQ;0005
controlEOT;0004
controlESC;001B
controlETB;0017
controlETX;0003
controlFF;000C
controlFS;001C
controlGS;001D
controlHT;0009
controlLF;000A
controlNAK;0015
controlRS;001E
controlSI;000F
controlSO;000E
controlSOT;0002
controlSTX;0001
controlSUB;001A
controlSYN;0016
controlUS;001F
controlVT;000B
copyright;00A9
copyrightsans;F8E9
copyrightserif;F6D9
cornerbracketleft;300C
cornerbracketlefthalfwidth;FF62
cornerbracketleftvertical;FE41
cornerbracketright;300D
cornerbracketrighthalfwidth;FF63
cornerbracketrightvertical;FE42
corporationsquare;337F
cosquare;33C7
coverkgsquare;33C6
cparen;249E
cruzeiro;20A2
cstretched;0297
curlyand;22CF
curlyor;22CE
currency;00A4
cyrBreve;F6D1
cyrFlex;F6D2
cyrbreve;F6D4
cyrflex;F6D5
d;0064
daarmenian;0564
dabengali;09A6
dadarabic;0636
dadeva;0926
dadfinalarabic;FEBE
dadinitialarabic;FEBF
dadmedialarabic;FEC0
dagesh;05BC
dageshhebrew;05BC
dagger;2020
daggerdbl;2021
dagujarati;0AA6
dagurmukhi;0A26
dahiragana;3060
dakatakana;30C0
dalarabic;062F
dalet;05D3
daletdagesh;FB33
daletdageshhebrew;FB33
dalethatafpatah;05D3 05B2
dalethatafpatahhebrew;05D3 05B2
dalethatafsegol;05D3 05B1
dalethatafsegolhebrew;05D3 05B1
dalethebrew;05D3
dalethiriq;05D3 05B4
dalethiriqhebrew;05D3 05B4
daletholam;05D3 05B9
daletholamhebrew;05D3 05B9
daletpatah;05D3 05B7
daletpatahhebrew;05D3 05B7
daletqamats;05D3 05B8
daletqamatshebrew;05D3 05B8
daletqubuts;05D3 05BB
daletqubutshebrew;05D3 05BB
daletsegol;05D3 05B6
daletsegolhebrew;05D3 05B6
daletsheva;05D3 05B0
daletshevahebrew;05D3 05B0
dalettsere;05D3 05B5
dalettserehebrew;05D3 05B5
dalfinalarabic;FEAA
dammaarabic;064F
dammalowarabic;064F
dammatanaltonearabic;064C
dammatanarabic;064C
danda;0964
dargahebrew;05A7
dargalefthebrew;05A7
dasiapneumatacyrilliccmb;0485
dblGrave;F6D3
dblanglebracketleft;300A
dblanglebracketleftvertical;FE3D
dblanglebracketright;300B
dblanglebracketrightvertical;FE3E
dblarchinvertedbelowcmb;032B
dblarrowleft;21D4
dblarrowright;21D2
dbldanda;0965
dblgrave;F6D6
dblgravecmb;030F
dblintegral;222C
dbllowline;2017
dbllowlinecmb;0333
dbloverlinecmb;033F
dblprimemod;02BA
dblverticalbar;2016
dblverticallineabovecmb;030E
dbopomofo;3109
dbsquare;33C8
dcaron;010F
dcedilla;1E11
dcircle;24D3
dcircumflexbelow;1E13
dcroat;0111
ddabengali;09A1
ddadeva;0921
ddagujarati;0AA1
ddagurmukhi;0A21
ddalarabic;0688
ddalfinalarabic;FB89
dddhadeva;095C
ddhabengali;09A2
ddhadeva;0922
ddhagujarati;0AA2
ddhagurmukhi;0A22
ddotaccent;1E0B
ddotbelow;1E0D
decimalseparatorarabic;066B
decimalseparatorpersian;066B
decyrillic;0434
degree;00B0
dehihebrew;05AD
dehiragana;3067
deicoptic;03EF
dekatakana;30C7
deleteleft;232B
deleteright;2326
delta;03B4
deltaturned;018D
denominatorminusonenumeratorbengali;09F8
dezh;02A4
dhabengali;09A7
dhadeva;0927
dhagujarati;0AA7
dhagurmukhi;0A27
dhook;0257
dialytikatonos;0385
dialytikatonoscmb;0344
diamond;2666
diamondsuitwhite;2662
dieresis;00A8
dieresisacute;F6D7
dieresisbelowcmb;0324
dieresiscmb;0308
dieresisgrave;F6D8
dieresistonos;0385
dihiragana;3062
dikatakana;30C2
dittomark;3003
divide;00F7
divides;2223
divisionslash;2215
djecyrillic;0452
dkshade;2593
dlinebelow;1E0F
dlsquare;3397
dmacron;0111
dmonospace;FF44
dnblock;2584
dochadathai;0E0E
dodekthai;0E14
dohiragana;3069
dokatakana;30C9
dollar;0024
dollarinferior;F6E3
dollarmonospace;FF04
dollaroldstyle;F724
dollarsmall;FE69
dollarsuperior;F6E4
dong;20AB
dorusquare;3326
dotaccent;02D9
dotaccentcmb;0307
dotbelowcmb;0323
dotbelowcomb;0323
dotkatakana;30FB
dotlessi;0131
dotlessj;F6BE
dotlessjstrokehook;0284
dotmath;22C5
dottedcircle;25CC
doubleyodpatah;FB1F
doubleyodpatahhebrew;FB1F
downtackbelowcmb;031E
downtackmod;02D5
dparen;249F
dsuperior;F6EB
dtail;0256
dtopbar;018C
duhiragana;3065
dukatakana;30C5
dz;01F3
dzaltone;02A3
dzcaron;01C6
dzcurl;02A5
dzeabkhasiancyrillic;04E1
dzecyrillic;0455
dzhecyrillic;045F
e;0065
eacute;00E9
earth;2641
ebengali;098F
ebopomofo;311C
ebreve;0115
ecandradeva;090D
ecandragujarati;0A8D
ecandravowelsigndeva;0945
ecandravowelsigngujarati;0AC5
ecaron;011B
ecedillabreve;1E1D
echarmenian;0565
echyiwnarmenian;0587
ecircle;24D4
ecircumflex;00EA
ecircumflexacute;1EBF
ecircumflexbelow;1E19
ecircumflexdotbelow;1EC7
ecircumflexgrave;1EC1
ecircumflexhookabove;1EC3
ecircumflextilde;1EC5
ecyrillic;0454
edblgrave;0205
edeva;090F
edieresis;00EB
edot;0117
edotaccent;0117
edotbelow;1EB9
eegurmukhi;0A0F
eematragurmukhi;0A47
efcyrillic;0444
egrave;00E8
egujarati;0A8F
eharmenian;0567
ehbopomofo;311D
ehiragana;3048
ehookabove;1EBB
eibopomofo;311F
eight;0038
eightarabic;0668
eightbengali;09EE
eightcircle;2467
eightcircleinversesansserif;2791
eightdeva;096E
eighteencircle;2471
eighteenparen;2485
eighteenperiod;2499
eightgujarati;0AEE
eightgurmukhi;0A6E
eighthackarabic;0668
eighthangzhou;3028
eighthnotebeamed;266B
eightideographicparen;3227
eightinferior;2088
eightmonospace;FF18
eightoldstyle;F738
eightparen;247B
eightperiod;248F
eightpersian;06F8
eightroman;2177
eightsuperior;2078
eightthai;0E58
einvertedbreve;0207
eiotifiedcyrillic;0465
ekatakana;30A8
ekatakanahalfwidth;FF74
ekonkargurmukhi;0A74
ekorean;3154
elcyrillic;043B
element;2208
elevencircle;246A
elevenparen;247E
elevenperiod;2492
elevenroman;217A
ellipsis;2026
ellipsisvertical;22EE
emacron;0113
emacronacute;1E17
emacrongrave;1E15
emcyrillic;043C
emdash;2014
emdashvertical;FE31
emonospace;FF45
emphasismarkarmenian;055B
emptyset;2205
enbopomofo;3123
encyrillic;043D
endash;2013
endashvertical;FE32
endescendercyrillic;04A3
eng;014B
engbopomofo;3125
enghecyrillic;04A5
enhookcyrillic;04C8
enspace;2002
eogonek;0119
eokorean;3153
eopen;025B
eopenclosed;029A
eopenreversed;025C
eopenreversedclosed;025E
eopenreversedhook;025D
eparen;24A0
epsilon;03B5
epsilontonos;03AD
equal;003D
equalmonospace;FF1D
equalsmall;FE66
equalsuperior;207C
equivalence;2261
erbopomofo;3126
ercyrillic;0440
ereversed;0258
ereversedcyrillic;044D
escyrillic;0441
esdescendercyrillic;04AB
esh;0283
eshcurl;0286
eshortdeva;090E
eshortvowelsigndeva;0946
eshreversedloop;01AA
eshsquatreversed;0285
esmallhiragana;3047
esmallkatakana;30A7
esmallkatakanahalfwidth;FF6A
estimated;212E
esuperior;F6EC
eta;03B7
etarmenian;0568
etatonos;03AE
eth;00F0
etilde;1EBD
etildebelow;1E1B
etnahtafoukhhebrew;0591
etnahtafoukhlefthebrew;0591
etnahtahebrew;0591
etnahtalefthebrew;0591
eturned;01DD
eukorean;3161
euro;20AC
evowelsignbengali;09C7
evowelsigndeva;0947
evowelsigngujarati;0AC7
exclam;0021
exclamarmenian;055C
exclamdbl;203C
exclamdown;00A1
exclamdownsmall;F7A1
exclammonospace;FF01
exclamsmall;F721
existential;2203
ezh;0292
ezhcaron;01EF
ezhcurl;0293
ezhreversed;01B9
ezhtail;01BA
f;0066
fadeva;095E
fagurmukhi;0A5E
fahrenheit;2109
fathaarabic;064E
fathalowarabic;064E
fathatanarabic;064B
fbopomofo;3108
fcircle;24D5
fdotaccent;1E1F
feharabic;0641
feharmenian;0586
fehfinalarabic;FED2
fehinitialarabic;FED3
fehmedialarabic;FED4
feicoptic;03E5
female;2640
ff;FB00
ffi;FB03
ffl;FB04
fi;FB01
fifteencircle;246E
fifteenparen;2482
fifteenperiod;2496
figuredash;2012
filledbox;25A0
filledrect;25AC
finalkaf;05DA
finalkafdagesh;FB3A
finalkafdageshhebrew;FB3A
finalkafhebrew;05DA
finalkafqamats;05DA 05B8
finalkafqamatshebrew;05DA 05B8
finalkafsheva;05DA 05B0
finalkafshevahebrew;05DA 05B0
finalmem;05DD
finalmemhebrew;05DD
finalnun;05DF
finalnunhebrew;05DF
finalpe;05E3
finalpehebrew;05E3
finaltsadi;05E5
finaltsadihebrew;05E5
firsttonechinese;02C9
fisheye;25C9
fitacyrillic;0473
five;0035
fivearabic;0665
fivebengali;09EB
fivecircle;2464
fivecircleinversesansserif;278E
fivedeva;096B
fiveeighths;215D
fivegujarati;0AEB
fivegurmukhi;0A6B
fivehackarabic;0665
fivehangzhou;3025
fiveideographicparen;3224
fiveinferior;2085
fivemonospace;FF15
fiveoldstyle;F735
fiveparen;2478
fiveperiod;248C
fivepersian;06F5
fiveroman;2174
fivesuperior;2075
fivethai;0E55
fl;FB02
florin;0192
fmonospace;FF46
fmsquare;3399
fofanthai;0E1F
fofathai;0E1D
fongmanthai;0E4F
forall;2200
four;0034
fourarabic;0664
fourbengali;09EA
fourcircle;2463
fourcircleinversesansserif;278D
fourdeva;096A
fourgujarati;0AEA
fourgurmukhi;0A6A
fourhackarabic;0664
fourhangzhou;3024
fourideographicparen;3223
fourinferior;2084
fourmonospace;FF14
fournumeratorbengali;09F7
fouroldstyle;F734
fourparen;2477
fourperiod;248B
fourpersian;06F4
fourroman;2173
foursuperior;2074
fourteencircle;246D
fourteenparen;2481
fourteenperiod;2495
fourthai;0E54
fourthtonechinese;02CB
fparen;24A1
fraction;2044
franc;20A3
g;0067
gabengali;0997
gacute;01F5
gadeva;0917
gafarabic;06AF
gaffinalarabic;FB93
gafinitialarabic;FB94
gafmedialarabic;FB95
gagujarati;0A97
gagurmukhi;0A17
gahiragana;304C
gakatakana;30AC
gamma;03B3
gammalatinsmall;0263
gammasuperior;02E0
gangiacoptic;03EB
gbopomofo;310D
gbreve;011F
gcaron;01E7
gcedilla;0123
gcircle;24D6
gcircumflex;011D
gcommaaccent;0123
gdot;0121
gdotaccent;0121
gecyrillic;0433
gehiragana;3052
gekatakana;30B2
geometricallyequal;2251
gereshaccenthebrew;059C
gereshhebrew;05F3
gereshmuqdamhebrew;059D
germandbls;00DF
gershayimaccenthebrew;059E
gershayimhebrew;05F4
getamark;3013
ghabengali;0998
ghadarmenian;0572
ghadeva;0918
ghagujarati;0A98
ghagurmukhi;0A18
ghainarabic;063A
ghainfinalarabic;FECE
ghaininitialarabic;FECF
ghainmedialarabic;FED0
ghemiddlehookcyrillic;0495
ghestrokecyrillic;0493
gheupturncyrillic;0491
ghhadeva;095A
ghhagurmukhi;0A5A
ghook;0260
ghzsquare;3393
gihiragana;304E
gikatakana;30AE
gimarmenian;0563
gimel;05D2
gimeldagesh;FB32
gimeldageshhebrew;FB32
gimelhebrew;05D2
gjecyrillic;0453
glottalinvertedstroke;01BE
glottalstop;0294
glottalstopinverted;0296
glottalstopmod;02C0
glottalstopreversed;0295
glottalstopreversedmod;02C1
glottalstopreversedsuperior;02E4
glottalstopstroke;02A1
glottalstopstrokereversed;02A2
gmacron;1E21
gmonospace;FF47
gohiragana;3054
gokatakana;30B4
gparen;24A2
gpasquare;33AC
gradient;2207
grave;0060
gravebelowcmb;0316
gravecmb;0300
gravecomb;0300
gravedeva;0953
gravelowmod;02CE
gravemonospace;FF40
gravetonecmb;0340
greater;003E
greaterequal;2265
greaterequalorless;22DB
greatermonospace;FF1E
greaterorequivalent;2273
greaterorless;2277
greateroverequal;2267
greatersmall;FE65
gscript;0261
gstroke;01E5
guhiragana;3050
guillemotleft;00AB
guillemotright;00BB
guilsinglleft;2039
guilsinglright;203A
gukatakana;30B0
guramusquare;3318
gysquare;33C9
h;0068
haabkhasiancyrillic;04A9
haaltonearabic;06C1
habengali;09B9
hadescendercyrillic;04B3
hadeva;0939
hagujarati;0AB9
hagurmukhi;0A39
haharabic;062D
hahfinalarabic;FEA2
hahinitialarabic;FEA3
hahiragana;306F
hahmedialarabic;FEA4
haitusquare;332A
hakatakana;30CF
hakatakanahalfwidth;FF8A
halantgurmukhi;0A4D
hamzaarabic;0621
hamzadammaarabic;0621 064F
hamzadammatanarabic;0621 064C
hamzafathaarabic;0621 064E
hamzafathatanarabic;0621 064B
hamzalowarabic;0621
hamzalowkasraarabic;0621 0650
hamzalowkasratanarabic;0621 064D
hamzasukunarabic;0621 0652
hangulfiller;3164
hardsigncyrillic;044A
harpoonleftbarbup;21BC
harpoonrightbarbup;21C0
hasquare;33CA
hatafpatah;05B2
hatafpatah16;05B2
hatafpatah23;05B2
hatafpatah2f;05B2
hatafpatahhebrew;05B2
hatafpatahnarrowhebrew;05B2
hatafpatahquarterhebrew;05B2
hatafpatahwidehebrew;05B2
hatafqamats;05B3
hatafqamats1b;05B3
hatafqamats28;05B3
hatafqamats34;05B3
hatafqamatshebrew;05B3
hatafqamatsnarrowhebrew;05B3
hatafqamatsquarterhebrew;05B3
hatafqamatswidehebrew;05B3
hatafsegol;05B1
hatafsegol17;05B1
hatafsegol24;05B1
hatafsegol30;05B1
hatafsegolhebrew;05B1
hatafsegolnarrowhebrew;05B1
hatafsegolquarterhebrew;05B1
hatafsegolwidehebrew;05B1
hbar;0127
hbopomofo;310F
hbrevebelow;1E2B
hcedilla;1E29
hcircle;24D7
hcircumflex;0125
hdieresis;1E27
hdotaccent;1E23
hdotbelow;1E25
he;05D4
heart;2665
heartsuitblack;2665
heartsuitwhite;2661
hedagesh;FB34
hedageshhebrew;FB34
hehaltonearabic;06C1
heharabic;0647
hehebrew;05D4
hehfinalaltonearabic;FBA7
hehfinalalttwoarabic;FEEA
hehfinalarabic;FEEA
hehhamzaabovefinalarabic;FBA5
hehhamzaaboveisolatedarabic;FBA4
hehinitialaltonearabic;FBA8
hehinitialarabic;FEEB
hehiragana;3078
hehmedialaltonearabic;FBA9
hehmedialarabic;FEEC
heiseierasquare;337B
hekatakana;30D8
hekatakanahalfwidth;FF8D
hekutaarusquare;3336
henghook;0267
herutusquare;3339
het;05D7
hethebrew;05D7
hhook;0266
hhooksuperior;02B1
hieuhacirclekorean;327B
hieuhaparenkorean;321B
hieuhcirclekorean;326D
hieuhkorean;314E
hieuhparenkorean;320D
hihiragana;3072
hikatakana;30D2
hikatakanahalfwidth;FF8B
hiriq;05B4
hiriq14;05B4
hiriq21;05B4
hiriq2d;05B4
hiriqhebrew;05B4
hiriqnarrowhebrew;05B4
hiriqquarterhebrew;05B4
hiriqwidehebrew;05B4
hlinebelow;1E96
hmonospace;FF48
hoarmenian;0570
hohipthai;0E2B
hohiragana;307B
hokatakana;30DB
hokatakanahalfwidth;FF8E
holam;05B9
holam19;05B9
holam26;05B9
holam32;05B9
holamhebrew;05B9
holamnarrowhebrew;05B9
holamquarterhebrew;05B9
holamwidehebrew;05B9
honokhukthai;0E2E
hookabovecomb;0309
hookcmb;0309
hookpalatalizedbelowcmb;0321
hookretroflexbelowcmb;0322
hoonsquare;3342
horicoptic;03E9
horizontalbar;2015
horncmb;031B
hotsprings;2668
house;2302
hparen;24A3
hsuperior;02B0
hturned;0265
huhiragana;3075
huiitosquare;3333
hukatakana;30D5
hukatakanahalfwidth;FF8C
hungarumlaut;02DD
hungarumlautcmb;030B
hv;0195
hyphen;002D
hypheninferior;F6E5
hyphenmonospace;FF0D
hyphensmall;FE63
hyphensuperior;F6E6
hyphentwo;2010
i;0069
iacute;00ED
iacyrillic;044F
ibengali;0987
ibopomofo;3127
ibreve;012D
icaron;01D0
icircle;24D8
icircumflex;00EE
icyrillic;0456
idblgrave;0209
ideographearthcircle;328F
ideographfirecircle;328B
ideographicallianceparen;323F
ideographiccallparen;323A
ideographiccentrecircle;32A5
ideographicclose;3006
ideographiccomma;3001
ideographiccommaleft;FF64
ideographiccongratulationparen;3237
ideographiccorrectcircle;32A3
ideographicearthparen;322F
ideographicenterpriseparen;323D
ideographicexcellentcircle;329D
ideographicfestivalparen;3240
ideographicfinancialcircle;3296
ideographicfinancialparen;3236
ideographicfireparen;322B
ideographichaveparen;3232
ideographichighcircle;32A4
ideographiciterationmark;3005
ideographiclaborcircle;3298
ideographiclaborparen;3238
ideographicleftcircle;32A7
ideographiclowcircle;32A6
ideographicmedicinecircle;32A9
ideographicmetalparen;322E
ideographicmoonparen;322A
ideographicnameparen;3234
ideographicperiod;3002
ideographicprintcircle;329E
ideographicreachparen;3243
ideographicrepresentparen;3239
ideographicresourceparen;323E
ideographicrightcircle;32A8
ideographicsecretcircle;3299
ideographicselfparen;3242
ideographicsocietyparen;3233
ideographicspace;3000
ideographicspecialparen;3235
ideographicstockparen;3231
ideographicstudyparen;323B
ideographicsunparen;3230
ideographicsuperviseparen;323C
ideographicwaterparen;322C
ideographicwoodparen;322D
ideographiczero;3007
ideographmetalcircle;328E
ideographmooncircle;328A
ideographnamecircle;3294
ideographsuncircle;3290
ideographwatercircle;328C
ideographwoodcircle;328D
ideva;0907
idieresis;00EF
idieresisacute;1E2F
idieresiscyrillic;04E5
idotbelow;1ECB
iebrevecyrillic;04D7
iecyrillic;0435
ieungacirclekorean;3275
ieungaparenkorean;3215
ieungcirclekorean;3267
ieungkorean;3147
ieungparenkorean;3207
igrave;00EC
igujarati;0A87
igurmukhi;0A07
ihiragana;3044
ihookabove;1EC9
iibengali;0988
iicyrillic;0438
iideva;0908
iigujarati;0A88
iigurmukhi;0A08
iimatragurmukhi;0A40
iinvertedbreve;020B
iishortcyrillic;0439
iivowelsignbengali;09C0
iivowelsigndeva;0940
iivowelsigngujarati;0AC0
ij;0133
ikatakana;30A4
ikatakanahalfwidth;FF72
ikorean;3163
ilde;02DC
iluyhebrew;05AC
imacron;012B
imacroncyrillic;04E3
imageorapproximatelyequal;2253
imatragurmukhi;0A3F
imonospace;FF49
increment;2206
infinity;221E
iniarmenian;056B
integral;222B
integralbottom;2321
integralbt;2321
integralex;F8F5
integraltop;2320
integraltp;2320
intersection;2229
intisquare;3305
invbullet;25D8
invcircle;25D9
invsmileface;263B
iocyrillic;0451
iogonek;012F
iota;03B9
iotadieresis;03CA
iotadieresistonos;0390
iotalatin;0269
iotatonos;03AF
iparen;24A4
irigurmukhi;0A72
ismallhiragana;3043
ismallkatakana;30A3
ismallkatakanahalfwidth;FF68
issharbengali;09FA
istroke;0268
isuperior;F6ED
iterationhiragana;309D
iterationkatakana;30FD
itilde;0129
itildebelow;1E2D
iubopomofo;3129
iucyrillic;044E
ivowelsignbengali;09BF
ivowelsigndeva;093F
ivowelsigngujarati;0ABF
izhitsacyrillic;0475
izhitsadblgravecyrillic;0477
j;006A
jaarmenian;0571
jabengali;099C
jadeva;091C
jagujarati;0A9C
jagurmukhi;0A1C
jbopomofo;3110
jcaron;01F0
jcircle;24D9
jcircumflex;0135
jcrossedtail;029D
jdotlessstroke;025F
jecyrillic;0458
jeemarabic;062C
jeemfinalarabic;FE9E
jeeminitialarabic;FE9F
jeemmedialarabic;FEA0
jeharabic;0698
jehfinalarabic;FB8B
jhabengali;099D
jhadeva;091D
jhagujarati;0A9D
jhagurmukhi;0A1D
jheharmenian;057B
jis;3004
jmonospace;FF4A
jparen;24A5
jsuperior;02B2
k;006B
kabashkircyrillic;04A1
kabengali;0995
kacute;1E31
kacyrillic;043A
kadescendercyrillic;049B
kadeva;0915
kaf;05DB
kafarabic;0643
kafdagesh;FB3B
kafdageshhebrew;FB3B
kaffinalarabic;FEDA
kafhebrew;05DB
kafinitialarabic;FEDB
kafmedialarabic;FEDC
kafrafehebrew;FB4D
kagujarati;0A95
kagurmukhi;0A15
kahiragana;304B
kahookcyrillic;04C4
kakatakana;30AB
kakatakanahalfwidth;FF76
kappa;03BA
kappasymbolgreek;03F0
kapyeounmieumkorean;3171
kapyeounphieuphkorean;3184
kapyeounpieupkorean;3178
kapyeounssangpieupkorean;3179
karoriisquare;330D
kashidaautoarabic;0640
kashidaautonosidebearingarabic;0640
kasmallkatakana;30F5
kasquare;3384
kasraarabic;0650
kasratanarabic;064D
kastrokecyrillic;049F
katahiraprolongmarkhalfwidth;FF70
kaverticalstrokecyrillic;049D
kbopomofo;310E
kcalsquare;3389
kcaron;01E9
kcedilla;0137
kcircle;24DA
kcommaaccent;0137
kdotbelow;1E33
keharmenian;0584
kehiragana;3051
kekatakana;30B1
kekatakanahalfwidth;FF79
kenarmenian;056F
kesmallkatakana;30F6
kgreenlandic;0138
khabengali;0996
khacyrillic;0445
khadeva;0916
khagujarati;0A96
khagurmukhi;0A16
khaharabic;062E
khahfinalarabic;FEA6
khahinitialarabic;FEA7
khahmedialarabic;FEA8
kheicoptic;03E7
khhadeva;0959
khhagurmukhi;0A59
khieukhacirclekorean;3278
khieukhaparenkorean;3218
khieukhcirclekorean;326A
khieukhkorean;314B
khieukhparenkorean;320A
khokhaithai;0E02
khokhonthai;0E05
khokhuatthai;0E03
khokhwaithai;0E04
khomutthai;0E5B
khook;0199
khorakhangthai;0E06
khzsquare;3391
kihiragana;304D
kikatakana;30AD
kikatakanahalfwidth;FF77
kiroguramusquare;3315
kiromeetorusquare;3316
kirosquare;3314
kiyeokacirclekorean;326E
kiyeokaparenkorean;320E
kiyeokcirclekorean;3260
kiyeokkorean;3131
kiyeokparenkorean;3200
kiyeoksioskorean;3133
kjecyrillic;045C
klinebelow;1E35
klsquare;3398
kmcubedsquare;33A6
kmonospace;FF4B
kmsquaredsquare;33A2
kohiragana;3053
kohmsquare;33C0
kokaithai;0E01
kokatakana;30B3
kokatakanahalfwidth;FF7A
kooposquare;331E
koppacyrillic;0481
koreanstandardsymbol;327F
koroniscmb;0343
kparen;24A6
kpasquare;33AA
ksicyrillic;046F
ktsquare;33CF
kturned;029E
kuhiragana;304F
kukatakana;30AF
kukatakanahalfwidth;FF78
kvsquare;33B8
kwsquare;33BE
l;006C
labengali;09B2
lacute;013A
ladeva;0932
lagujarati;0AB2
lagurmukhi;0A32
lakkhangyaothai;0E45
lamaleffinalarabic;FEFC
lamalefhamzaabovefinalarabic;FEF8
lamalefhamzaaboveisolatedarabic;FEF7
lamalefhamzabelowfinalarabic;FEFA
lamalefhamzabelowisolatedarabic;FEF9
lamalefisolatedarabic;FEFB
lamalefmaddaabovefinalarabic;FEF6
lamalefmaddaaboveisolatedarabic;FEF5
lamarabic;0644
lambda;03BB
lambdastroke;019B
lamed;05DC
lameddagesh;FB3C
lameddageshhebrew;FB3C
lamedhebrew;05DC
lamedholam;05DC 05B9
lamedholamdagesh;05DC 05B9 05BC
lamedholamdageshhebrew;05DC 05B9 05BC
lamedholamhebrew;05DC 05B9
lamfinalarabic;FEDE
lamhahinitialarabic;FCCA
laminitialarabic;FEDF
lamjeeminitialarabic;FCC9
lamkhahinitialarabic;FCCB
lamlamhehisolatedarabic;FDF2
lammedialarabic;FEE0
lammeemhahinitialarabic;FD88
lammeeminitialarabic;FCCC
lammeemjeeminitialarabic;FEDF FEE4 FEA0
lammeemkhahinitialarabic;FEDF FEE4 FEA8
largecircle;25EF
lbar;019A
lbelt;026C
lbopomofo;310C
lcaron;013E
lcedilla;013C
lcircle;24DB
lcircumflexbelow;1E3D
lcommaaccent;013C
ldot;0140
ldotaccent;0140
ldotbelow;1E37
ldotbelowmacron;1E39
leftangleabovecmb;031A
lefttackbelowcmb;0318
less;003C
lessequal;2264
lessequalorgreater;22DA
lessmonospace;FF1C
lessorequivalent;2272
lessorgreater;2276
lessoverequal;2266
lesssmall;FE64
lezh;026E
lfblock;258C
lhookretroflex;026D
lira;20A4
liwnarmenian;056C
lj;01C9
ljecyrillic;0459
ll;F6C0
lladeva;0933
llagujarati;0AB3
llinebelow;1E3B
llladeva;0934
llvocalicbengali;09E1
llvocalicdeva;0961
llvocalicvowelsignbengali;09E3
llvocalicvowelsigndeva;0963
lmiddletilde;026B
lmonospace;FF4C
lmsquare;33D0
lochulathai;0E2C
logicaland;2227
logicalnot;00AC
logicalnotreversed;2310
logicalor;2228
lolingthai;0E25
longs;017F
lowlinecenterline;FE4E
lowlinecmb;0332
lowlinedashed;FE4D
lozenge;25CA
lparen;24A7
lslash;0142
lsquare;2113
lsuperior;F6EE
ltshade;2591
luthai;0E26
lvocalicbengali;098C
lvocalicdeva;090C
lvocalicvowelsignbengali;09E2
lvocalicvowelsigndeva;0962
lxsquare;33D3
m;006D
mabengali;09AE
macron;00AF
macronbelowcmb;0331
macroncmb;0304
macronlowmod;02CD
macronmonospace;FFE3
macute;1E3F
madeva;092E
magujarati;0AAE
magurmukhi;0A2E
mahapakhhebrew;05A4
mahapakhlefthebrew;05A4
mahiragana;307E
maichattawalowleftthai;F895
maichattawalowrightthai;F894
maichattawathai;0E4B
maichattawaupperleftthai;F893
maieklowleftthai;F88C
maieklowrightthai;F88B
maiekthai;0E48
maiekupperleftthai;F88A
maihanakatleftthai;F884
maihanakatthai;0E31
maitaikhuleftthai;F889
maitaikhuthai;0E47
maitholowleftthai;F88F
maitholowrightthai;F88E
maithothai;0E49
maithoupperleftthai;F88D
maitrilowleftthai;F892
maitrilowrightthai;F891
maitrithai;0E4A
maitriupperleftthai;F890
maiyamokthai;0E46
makatakana;30DE
makatakanahalfwidth;FF8F
male;2642
mansyonsquare;3347
maqafhebrew;05BE
mars;2642
masoracirclehebrew;05AF
masquare;3383
mbopomofo;3107
mbsquare;33D4
mcircle;24DC
mcubedsquare;33A5
mdotaccent;1E41
mdotbelow;1E43
meemarabic;0645
meemfinalarabic;FEE2
meeminitialarabic;FEE3
meemmedialarabic;FEE4
meemmeeminitialarabic;FCD1
meemmeemisolatedarabic;FC48
meetorusquare;334D
mehiragana;3081
meizierasquare;337E
mekatakana;30E1
mekatakanahalfwidth;FF92
mem;05DE
memdagesh;FB3E
memdageshhebrew;FB3E
memhebrew;05DE
menarmenian;0574
merkhahebrew;05A5
merkhakefulahebrew;05A6
merkhakefulalefthebrew;05A6
merkhalefthebrew;05A5
mhook;0271
mhzsquare;3392
middledotkatakanahalfwidth;FF65
middot;00B7
mieumacirclekorean;3272
mieumaparenkorean;3212
mieumcirclekorean;3264
mieumkorean;3141
mieumpansioskorean;3170
mieumparenkorean;3204
mieumpieupkorean;316E
mieumsioskorean;316F
mihiragana;307F
mikatakana;30DF
mikatakanahalfwidth;FF90
minus;2212
minusbelowcmb;0320
minuscircle;2296
minusmod;02D7
minusplus;2213
minute;2032
miribaarusquare;334A
mirisquare;3349
mlonglegturned;0270
mlsquare;3396
mmcubedsquare;33A3
mmonospace;FF4D
mmsquaredsquare;339F
mohiragana;3082
mohmsquare;33C1
mokatakana;30E2
mokatakanahalfwidth;FF93
molsquare;33D6
momathai;0E21
moverssquare;33A7
moverssquaredsquare;33A8
mparen;24A8
mpasquare;33AB
mssquare;33B3
msuperior;F6EF
mturned;026F
mu;00B5
mu1;00B5
muasquare;3382
muchgreater;226B
muchless;226A
mufsquare;338C
mugreek;03BC
mugsquare;338D
muhiragana;3080
mukatakana;30E0
mukatakanahalfwidth;FF91
mulsquare;3395
multiply;00D7
mumsquare;339B
munahhebrew;05A3
munahlefthebrew;05A3
musicalnote;266A
musicalnotedbl;266B
musicflatsign;266D
musicsharpsign;266F
mussquare;33B2
muvsquare;33B6
muwsquare;33BC
mvmegasquare;33B9
mvsquare;33B7
mwmegasquare;33BF
mwsquare;33BD
n;006E
nabengali;09A8
nabla;2207
nacute;0144
nadeva;0928
nagujarati;0AA8
nagurmukhi;0A28
nahiragana;306A
nakatakana;30CA
nakatakanahalfwidth;FF85
napostrophe;0149
nasquare;3381
nbopomofo;310B
nbspace;00A0
ncaron;0148
ncedilla;0146
ncircle;24DD
ncircumflexbelow;1E4B
ncommaaccent;0146
ndotaccent;1E45
ndotbelow;1E47
nehiragana;306D
nekatakana;30CD
nekatakanahalfwidth;FF88
newsheqelsign;20AA
nfsquare;338B
ngabengali;0999
ngadeva;0919
ngagujarati;0A99
ngagurmukhi;0A19
ngonguthai;0E07
nhiragana;3093
nhookleft;0272
nhookretroflex;0273
nieunacirclekorean;326F
nieunaparenkorean;320F
nieuncieuckorean;3135
nieuncirclekorean;3261
nieunhieuhkorean;3136
nieunkorean;3134
nieunpansioskorean;3168
nieunparenkorean;3201
nieunsioskorean;3167
nieuntikeutkorean;3166
nihiragana;306B
nikatakana;30CB
nikatakanahalfwidth;FF86
nikhahitleftthai;F899
nikhahitthai;0E4D
nine;0039
ninearabic;0669
ninebengali;09EF
ninecircle;2468
ninecircleinversesansserif;2792
ninedeva;096F
ninegujarati;0AEF
ninegurmukhi;0A6F
ninehackarabic;0669
ninehangzhou;3029
nineideographicparen;3228
nineinferior;2089
ninemonospace;FF19
nineoldstyle;F739
nineparen;247C
nineperiod;2490
ninepersian;06F9
nineroman;2178
ninesuperior;2079
nineteencircle;2472
nineteenparen;2486
nineteenperiod;249A
ninethai;0E59
nj;01CC
njecyrillic;045A
nkatakana;30F3
nkatakanahalfwidth;FF9D
nlegrightlong;019E
nlinebelow;1E49
nmonospace;FF4E
nmsquare;339A
nnabengali;09A3
nnadeva;0923
nnagujarati;0AA3
nnagurmukhi;0A23
nnnadeva;0929
nohiragana;306E
nokatakana;30CE
nokatakanahalfwidth;FF89
nonbreakingspace;00A0
nonenthai;0E13
nonuthai;0E19
noonarabic;0646
noonfinalarabic;FEE6
noonghunnaarabic;06BA
noonghunnafinalarabic;FB9F
noonhehinitialarabic;FEE7 FEEC
nooninitialarabic;FEE7
noonjeeminitialarabic;FCD2
noonjeemisolatedarabic;FC4B
noonmedialarabic;FEE8
noonmeeminitialarabic;FCD5
noonmeemisolatedarabic;FC4E
noonnoonfinalarabic;FC8D
notcontains;220C
notelement;2209
notelementof;2209
notequal;2260
notgreater;226F
notgreaternorequal;2271
notgreaternorless;2279
notidentical;2262
notless;226E
notlessnorequal;2270
notparallel;2226
notprecedes;2280
notsubset;2284
notsucceeds;2281
notsuperset;2285
nowarmenian;0576
nparen;24A9
nssquare;33B1
nsuperior;207F
ntilde;00F1
nu;03BD
nuhiragana;306C
nukatakana;30CC
nukatakanahalfwidth;FF87
nuktabengali;09BC
nuktadeva;093C
nuktagujarati;0ABC
nuktagurmukhi;0A3C
numbersign;0023
numbersignmonospace;FF03
numbersignsmall;FE5F
numeralsigngreek;0374
numeralsignlowergreek;0375
numero;2116
nun;05E0
nundagesh;FB40
nundageshhebrew;FB40
nunhebrew;05E0
nvsquare;33B5
nwsquare;33BB
nyabengali;099E
nyadeva;091E
nyagujarati;0A9E
nyagurmukhi;0A1E
o;006F
oacute;00F3
oangthai;0E2D
obarred;0275
obarredcyrillic;04E9
obarreddieresiscyrillic;04EB
obengali;0993
obopomofo;311B
obreve;014F
ocandradeva;0911
ocandragujarati;0A91
ocandravowelsigndeva;0949
ocandravowelsigngujarati;0AC9
ocaron;01D2
ocircle;24DE
ocircumflex;00F4
ocircumflexacute;1ED1
ocircumflexdotbelow;1ED9
ocircumflexgrave;1ED3
ocircumflexhookabove;1ED5
ocircumflextilde;1ED7
ocyrillic;043E
odblacute;0151
odblgrave;020D
odeva;0913
odieresis;00F6
odieresiscyrillic;04E7
odotbelow;1ECD
oe;0153
oekorean;315A
ogonek;02DB
ogonekcmb;0328
ograve;00F2
ogujarati;0A93
oharmenian;0585
ohiragana;304A
ohookabove;1ECF
ohorn;01A1
ohornacute;1EDB
ohorndotbelow;1EE3
ohorngrave;1EDD
ohornhookabove;1EDF
ohorntilde;1EE1
ohungarumlaut;0151
oi;01A3
oinvertedbreve;020F
okatakana;30AA
okatakanahalfwidth;FF75
okorean;3157
olehebrew;05AB
omacron;014D
omacronacute;1E53
omacrongrave;1E51
omdeva;0950
omega;03C9
omega1;03D6
omegacyrillic;0461
omegalatinclosed;0277
omegaroundcyrillic;047B
omegatitlocyrillic;047D
omegatonos;03CE
omgujarati;0AD0
omicron;03BF
omicrontonos;03CC
omonospace;FF4F
one;0031
onearabic;0661
onebengali;09E7
onecircle;2460
onecircleinversesansserif;278A
onedeva;0967
onedotenleader;2024
oneeighth;215B
onefitted;F6DC
onegujarati;0AE7
onegurmukhi;0A67
onehackarabic;0661
onehalf;00BD
onehangzhou;3021
oneideographicparen;3220
oneinferior;2081
onemonospace;FF11
onenumeratorbengali;09F4
oneoldstyle;F731
oneparen;2474
oneperiod;2488
onepersian;06F1
onequarter;00BC
oneroman;2170
onesuperior;00B9
onethai;0E51
onethird;2153
oogonek;01EB
oogonekmacron;01ED
oogurmukhi;0A13
oomatragurmukhi;0A4B
oopen;0254
oparen;24AA
openbullet;25E6
option;2325
ordfeminine;00AA
ordmasculine;00BA
orthogonal;221F
oshortdeva;0912
oshortvowelsigndeva;094A
oslash;00F8
oslashacute;01FF
osmallhiragana;3049
osmallkatakana;30A9
osmallkatakanahalfwidth;FF6B
ostrokeacute;01FF
osuperior;F6F0
otcyrillic;047F
otilde;00F5
otildeacute;1E4D
otildedieresis;1E4F
oubopomofo;3121
overline;203E
overlinecenterline;FE4A
overlinecmb;0305
overlinedashed;FE49
overlinedblwavy;FE4C
overlinewavy;FE4B
overscore;00AF
ovowelsignbengali;09CB
ovowelsigndeva;094B
ovowelsigngujarati;0ACB
p;0070
paampssquare;3380
paasentosquare;332B
pabengali;09AA
pacute;1E55
padeva;092A
pagedown;21DF
pageup;21DE
pagujarati;0AAA
pagurmukhi;0A2A
pahiragana;3071
paiyannoithai;0E2F
pakatakana;30D1
palatalizationcyrilliccmb;0484
palochkacyrillic;04C0
pansioskorean;317F
paragraph;00B6
parallel;2225
parenleft;0028
parenleftaltonearabic;FD3E
parenleftbt;F8ED
parenleftex;F8EC
parenleftinferior;208D
parenleftmonospace;FF08
parenleftsmall;FE59
parenleftsuperior;207D
parenlefttp;F8EB
parenleftvertical;FE35
parenright;0029
parenrightaltonearabic;FD3F
parenrightbt;F8F8
parenrightex;F8F7
parenrightinferior;208E
parenrightmonospace;FF09
parenrightsmall;FE5A
parenrightsuperior;207E
parenrighttp;F8F6
parenrightvertical;FE36
partialdiff;2202
paseqhebrew;05C0
pashtahebrew;0599
pasquare;33A9
patah;05B7
patah11;05B7
patah1d;05B7
patah2a;05B7
patahhebrew;05B7
patahnarrowhebrew;05B7
patahquarterhebrew;05B7
patahwidehebrew;05B7
pazerhebrew;05A1
pbopomofo;3106
pcircle;24DF
pdotaccent;1E57
pe;05E4
pecyrillic;043F
pedagesh;FB44
pedageshhebrew;FB44
peezisquare;333B
pefinaldageshhebrew;FB43
peharabic;067E
peharmenian;057A
pehebrew;05E4
pehfinalarabic;FB57
pehinitialarabic;FB58
pehiragana;307A
pehmedialarabic;FB59
pekatakana;30DA
pemiddlehookcyrillic;04A7
perafehebrew;FB4E
percent;0025
percentarabic;066A
percentmonospace;FF05
percentsmall;FE6A
period;002E
periodarmenian;0589
periodcentered;00B7
periodhalfwidth;FF61
periodinferior;F6E7
periodmonospace;FF0E
periodsmall;FE52
periodsuperior;F6E8
perispomenigreekcmb;0342
perpendicular;22A5
perthousand;2030
peseta;20A7
pfsquare;338A
phabengali;09AB
phadeva;092B
phagujarati;0AAB
phagurmukhi;0A2B
phi;03C6
phi1;03D5
phieuphacirclekorean;327A
phieuphaparenkorean;321A
phieuphcirclekorean;326C
phieuphkorean;314D
phieuphparenkorean;320C
philatin;0278
phinthuthai;0E3A
phisymbolgreek;03D5
phook;01A5
phophanthai;0E1E
phophungthai;0E1C
phosamphaothai;0E20
pi;03C0
pieupacirclekorean;3273
pieupaparenkorean;3213
pieupcieuckorean;3176
pieupcirclekorean;3265
pieupkiyeokkorean;3172
pieupkorean;3142
pieupparenkorean;3205
pieupsioskiyeokkorean;3174
pieupsioskorean;3144
pieupsiostikeutkorean;3175
pieupthieuthkorean;3177
pieuptikeutkorean;3173
pihiragana;3074
pikatakana;30D4
pisymbolgreek;03D6
piwrarmenian;0583
plus;002B
plusbelowcmb;031F
pluscircle;2295
plusminus;00B1
plusmod;02D6
plusmonospace;FF0B
plussmall;FE62
plussuperior;207A
pmonospace;FF50
pmsquare;33D8
pohiragana;307D
pointingindexdownwhite;261F
pointingindexleftwhite;261C
pointingindexrightwhite;261E
pointingindexupwhite;261D
pokatakana;30DD
poplathai;0E1B
postalmark;3012
postalmarkface;3020
pparen;24AB
precedes;227A
prescription;211E
primemod;02B9
primereversed;2035
product;220F
projective;2305
prolongedkana;30FC
propellor;2318
propersubset;2282
propersuperset;2283
proportion;2237
proportional;221D
psi;03C8
psicyrillic;0471
psilipneumatacyrilliccmb;0486
pssquare;33B0
puhiragana;3077
pukatakana;30D7
pvsquare;33B4
pwsquare;33BA
q;0071
qadeva;0958
qadmahebrew;05A8
qafarabic;0642
qaffinalarabic;FED6
qafinitialarabic;FED7
qafmedialarabic;FED8
qamats;05B8
qamats10;05B8
qamats1a;05B8
qamats1c;05B8
qamats27;05B8
qamats29;05B8
qamats33;05B8
qamatsde;05B8
qamatshebrew;05B8
qamatsnarrowhebrew;05B8
qamatsqatanhebrew;05B8
qamatsqatannarrowhebrew;05B8
qamatsqatanquarterhebrew;05B8
qamatsqatanwidehebrew;05B8
qamatsquarterhebrew;05B8
qamatswidehebrew;05B8
qarneyparahebrew;059F
qbopomofo;3111
qcircle;24E0
qhook;02A0
qmonospace;FF51
qof;05E7
qofdagesh;FB47
qofdageshhebrew;FB47
qofhatafpatah;05E7 05B2
qofhatafpatahhebrew;05E7 05B2
qofhatafsegol;05E7 05B1
qofhatafsegolhebrew;05E7 05B1
qofhebrew;05E7
qofhiriq;05E7 05B4
qofhiriqhebrew;05E7 05B4
qofholam;05E7 05B9
qofholamhebrew;05E7 05B9
qofpatah;05E7 05B7
qofpatahhebrew;05E7 05B7
qofqamats;05E7 05B8
qofqamatshebrew;05E7 05B8
qofqubuts;05E7 05BB
qofqubutshebrew;05E7 05BB
qofsegol;05E7 05B6
qofsegolhebrew;05E7 05B6
qofsheva;05E7 05B0
qofshevahebrew;05E7 05B0
qoftsere;05E7 05B5
qoftserehebrew;05E7 05B5
qparen;24AC
quarternote;2669
qubuts;05BB
qubuts18;05BB
qubuts25;05BB
qubuts31;05BB
qubutshebrew;05BB
qubutsnarrowhebrew;05BB
qubutsquarterhebrew;05BB
qubutswidehebrew;05BB
question;003F
questionarabic;061F
questionarmenian;055E
questiondown;00BF
questiondownsmall;F7BF
questiongreek;037E
questionmonospace;FF1F
questionsmall;F73F
quotedbl;0022
quotedblbase;201E
quotedblleft;201C
quotedblmonospace;FF02
quotedblprime;301E
quotedblprimereversed;301D
quotedblright;201D
quoteleft;2018
quoteleftreversed;201B
quotereversed;201B
quoteright;2019
quoterightn;0149
quotesinglbase;201A
quotesingle;0027
quotesinglemonospace;FF07
r;0072
raarmenian;057C
rabengali;09B0
racute;0155
radeva;0930
radical;221A
radicalex;F8E5
radoverssquare;33AE
radoverssquaredsquare;33AF
radsquare;33AD
rafe;05BF
rafehebrew;05BF
ragujarati;0AB0
ragurmukhi;0A30
rahiragana;3089
rakatakana;30E9
rakatakanahalfwidth;FF97
ralowerdiagonalbengali;09F1
ramiddlediagonalbengali;09F0
ramshorn;0264
ratio;2236
rbopomofo;3116
rcaron;0159
rcedilla;0157
rcircle;24E1
rcommaaccent;0157
rdblgrave;0211
rdotaccent;1E59
rdotbelow;1E5B
rdotbelowmacron;1E5D
referencemark;203B
reflexsubset;2286
reflexsuperset;2287
registered;00AE
registersans;F8E8
registerserif;F6DA
reharabic;0631
reharmenian;0580
rehfinalarabic;FEAE
rehiragana;308C
rehyehaleflamarabic;0631 FEF3 FE8E 0644
rekatakana;30EC
rekatakanahalfwidth;FF9A
resh;05E8
reshdageshhebrew;FB48
reshhatafpatah;05E8 05B2
reshhatafpatahhebrew;05E8 05B2
reshhatafsegol;05E8 05B1
reshhatafsegolhebrew;05E8 05B1
reshhebrew;05E8
reshhiriq;05E8 05B4
reshhiriqhebrew;05E8 05B4
reshholam;05E8 05B9
reshholamhebrew;05E8 05B9
reshpatah;05E8 05B7
reshpatahhebrew;05E8 05B7
reshqamats;05E8 05B8
reshqamatshebrew;05E8 05B8
reshqubuts;05E8 05BB
reshqubutshebrew;05E8 05BB
reshsegol;05E8 05B6
reshsegolhebrew;05E8 05B6
reshsheva;05E8 05B0
reshshevahebrew;05E8 05B0
reshtsere;05E8 05B5
reshtserehebrew;05E8 05B5
reversedtilde;223D
reviahebrew;0597
reviamugrashhebrew;0597
revlogicalnot;2310
rfishhook;027E
rfishhookreversed;027F
rhabengali;09DD
rhadeva;095D
rho;03C1
rhook;027D
rhookturned;027B
rhookturnedsuperior;02B5
rhosymbolgreek;03F1
rhotichookmod;02DE
rieulacirclekorean;3271
rieulaparenkorean;3211
rieulcirclekorean;3263
rieulhieuhkorean;3140
rieulkiyeokkorean;313A
rieulkiyeoksioskorean;3169
rieulkorean;3139
rieulmieumkorean;313B
rieulpansioskorean;316C
rieulparenkorean;3203
rieulphieuphkorean;313F
rieulpieupkorean;313C
rieulpieupsioskorean;316B
rieulsioskorean;313D
rieulthieuthkorean;313E
rieultikeutkorean;316A
rieulyeorinhieuhkorean;316D
rightangle;221F
righttackbelowcmb;0319
righttriangle;22BF
rihiragana;308A
rikatakana;30EA
rikatakanahalfwidth;FF98
ring;02DA
ringbelowcmb;0325
ringcmb;030A
ringhalfleft;02BF
ringhalfleftarmenian;0559
ringhalfleftbelowcmb;031C
ringhalfleftcentered;02D3
ringhalfright;02BE
ringhalfrightbelowcmb;0339
ringhalfrightcentered;02D2
rinvertedbreve;0213
rittorusquare;3351
rlinebelow;1E5F
rlongleg;027C
rlonglegturned;027A
rmonospace;FF52
rohiragana;308D
rokatakana;30ED
rokatakanahalfwidth;FF9B
roruathai;0E23
rparen;24AD
rrabengali;09DC
rradeva;0931
rragurmukhi;0A5C
rreharabic;0691
rrehfinalarabic;FB8D
rrvocalicbengali;09E0
rrvocalicdeva;0960
rrvocalicgujarati;0AE0
rrvocalicvowelsignbengali;09C4
rrvocalicvowelsigndeva;0944
rrvocalicvowelsigngujarati;0AC4
rsuperior;F6F1
rtblock;2590
rturned;0279
rturnedsuperior;02B4
ruhiragana;308B
rukatakana;30EB
rukatakanahalfwidth;FF99
rupeemarkbengali;09F2
rupeesignbengali;09F3
rupiah;F6DD
ruthai;0E24
rvocalicbengali;098B
rvocalicdeva;090B
rvocalicgujarati;0A8B
rvocalicvowelsignbengali;09C3
rvocalicvowelsigndeva;0943
rvocalicvowelsigngujarati;0AC3
s;0073
sabengali;09B8
sacute;015B
sacutedotaccent;1E65
sadarabic;0635
sadeva;0938
sadfinalarabic;FEBA
sadinitialarabic;FEBB
sadmedialarabic;FEBC
sagujarati;0AB8
sagurmukhi;0A38
sahiragana;3055
sakatakana;30B5
sakatakanahalfwidth;FF7B
sallallahoualayhewasallamarabic;FDFA
samekh;05E1
samekhdagesh;FB41
samekhdageshhebrew;FB41
samekhhebrew;05E1
saraaathai;0E32
saraaethai;0E41
saraaimaimalaithai;0E44
saraaimaimuanthai;0E43
saraamthai;0E33
saraathai;0E30
saraethai;0E40
saraiileftthai;F886
saraiithai;0E35
saraileftthai;F885
saraithai;0E34
saraothai;0E42
saraueeleftthai;F888
saraueethai;0E37
saraueleftthai;F887
sarauethai;0E36
sarauthai;0E38
sarauuthai;0E39
sbopomofo;3119
scaron;0161
scarondotaccent;1E67
scedilla;015F
schwa;0259
schwacyrillic;04D9
schwadieresiscyrillic;04DB
schwahook;025A
scircle;24E2
scircumflex;015D
scommaaccent;0219
sdotaccent;1E61
sdotbelow;1E63
sdotbelowdotaccent;1E69
seagullbelowcmb;033C
second;2033
secondtonechinese;02CA
section;00A7
seenarabic;0633
seenfinalarabic;FEB2
seeninitialarabic;FEB3
seenmedialarabic;FEB4
segol;05B6
segol13;05B6
segol1f;05B6
segol2c;05B6
segolhebrew;05B6
segolnarrowhebrew;05B6
segolquarterhebrew;05B6
segoltahebrew;0592
segolwidehebrew;05B6
seharmenian;057D
sehiragana;305B
sekatakana;30BB
sekatakanahalfwidth;FF7E
semicolon;003B
semicolonarabic;061B
semicolonmonospace;FF1B
semicolonsmall;FE54
semivoicedmarkkana;309C
semivoicedmarkkanahalfwidth;FF9F
sentisquare;3322
sentosquare;3323
seven;0037
sevenarabic;0667
sevenbengali;09ED
sevencircle;2466
sevencircleinversesansserif;2790
sevendeva;096D
seveneighths;215E
sevengujarati;0AED
sevengurmukhi;0A6D
sevenhackarabic;0667
sevenhangzhou;3027
sevenideographicparen;3226
seveninferior;2087
sevenmonospace;FF17
sevenoldstyle;F737
sevenparen;247A
sevenperiod;248E
sevenpersian;06F7
sevenroman;2176
sevensuperior;2077
seventeencircle;2470
seventeenparen;2484
seventeenperiod;2498
seventhai;0E57
sfthyphen;00AD
shaarmenian;0577
shabengali;09B6
shacyrillic;0448
shaddaarabic;0651
shaddadammaarabic;FC61
shaddadammatanarabic;FC5E
shaddafathaarabic;FC60
shaddafathatanarabic;0651 064B
shaddakasraarabic;FC62
shaddakasratanarabic;FC5F
shade;2592
shadedark;2593
shadelight;2591
shademedium;2592
shadeva;0936
shagujarati;0AB6
shagurmukhi;0A36
shalshelethebrew;0593
shbopomofo;3115
shchacyrillic;0449
sheenarabic;0634
sheenfinalarabic;FEB6
sheeninitialarabic;FEB7
sheenmedialarabic;FEB8
sheicoptic;03E3
sheqel;20AA
sheqelhebrew;20AA
sheva;05B0
sheva115;05B0
sheva15;05B0
sheva22;05B0
sheva2e;05B0
shevahebrew;05B0
shevanarrowhebrew;05B0
shevaquarterhebrew;05B0
shevawidehebrew;05B0
shhacyrillic;04BB
shimacoptic;03ED
shin;05E9
shindagesh;FB49
shindageshhebrew;FB49
shindageshshindot;FB2C
shindageshshindothebrew;FB2C
shindageshsindot;FB2D
shindageshsindothebrew;FB2D
shindothebrew;05C1
shinhebrew;05E9
shinshindot;FB2A
shinshindothebrew;FB2A
shinsindot;FB2B
shinsindothebrew;FB2B
shook;0282
sigma;03C3
sigma1;03C2
sigmafinal;03C2
sigmalunatesymbolgreek;03F2
sihiragana;3057
sikatakana;30B7
sikatakanahalfwidth;FF7C
siluqhebrew;05BD
siluqlefthebrew;05BD
similar;223C
sindothebrew;05C2
siosacirclekorean;3274
siosaparenkorean;3214
sioscieuckorean;317E
sioscirclekorean;3266
sioskiyeokkorean;317A
sioskorean;3145
siosnieunkorean;317B
siosparenkorean;3206
siospieupkorean;317D
siostikeutkorean;317C
six;0036
sixarabic;0666
sixbengali;09EC
sixcircle;2465
sixcircleinversesansserif;278F
sixdeva;096C
sixgujarati;0AEC
sixgurmukhi;0A6C
sixhackarabic;0666
sixhangzhou;3026
sixideographicparen;3225
sixinferior;2086
sixmonospace;FF16
sixoldstyle;F736
sixparen;2479
sixperiod;248D
sixpersian;06F6
sixroman;2175
sixsuperior;2076
sixteencircle;246F
sixteencurrencydenominatorbengali;09F9
sixteenparen;2483
sixteenperiod;2497
sixthai;0E56
slash;002F
slashmonospace;FF0F
slong;017F
slongdotaccent;1E9B
smileface;263A
smonospace;FF53
sofpasuqhebrew;05C3
softhyphen;00AD
softsigncyrillic;044C
sohiragana;305D
sokatakana;30BD
sokatakanahalfwidth;FF7F
soliduslongoverlaycmb;0338
solidusshortoverlaycmb;0337
sorusithai;0E29
sosalathai;0E28
sosothai;0E0B
sosuathai;0E2A
space;0020
spacehackarabic;0020
spade;2660
spadesuitblack;2660
spadesuitwhite;2664
sparen;24AE
squarebelowcmb;033B
squarecc;33C4
squarecm;339D
squarediagonalcrosshatchfill;25A9
squarehorizontalfill;25A4
squarekg;338F
squarekm;339E
squarekmcapital;33CE
squareln;33D1
squarelog;33D2
squaremg;338E
squaremil;33D5
squaremm;339C
squaremsquared;33A1
squareorthogonalcrosshatchfill;25A6
squareupperlefttolowerrightfill;25A7
squareupperrighttolowerleftfill;25A8
squareverticalfill;25A5
squarewhitewithsmallblack;25A3
srsquare;33DB
ssabengali;09B7
ssadeva;0937
ssagujarati;0AB7
ssangcieuckorean;3149
ssanghieuhkorean;3185
ssangieungkorean;3180
ssangkiyeokkorean;3132
ssangnieunkorean;3165
ssangpieupkorean;3143
ssangsioskorean;3146
ssangtikeutkorean;3138
ssuperior;F6F2
sterling;00A3
sterlingmonospace;FFE1
strokelongoverlaycmb;0336
strokeshortoverlaycmb;0335
subset;2282
subsetnotequal;228A
subsetorequal;2286
succeeds;227B
suchthat;220B
suhiragana;3059
sukatakana;30B9
sukatakanahalfwidth;FF7D
sukunarabic;0652
summation;2211
sun;263C
superset;2283
supersetnotequal;228B
supersetorequal;2287
svsquare;33DC
syouwaerasquare;337C
t;0074
tabengali;09A4
tackdown;22A4
tackleft;22A3
tadeva;0924
tagujarati;0AA4
tagurmukhi;0A24
taharabic;0637
tahfinalarabic;FEC2
tahinitialarabic;FEC3
tahiragana;305F
tahmedialarabic;FEC4
taisyouerasquare;337D
takatakana;30BF
takatakanahalfwidth;FF80
tatweelarabic;0640
tau;03C4
tav;05EA
tavdages;FB4A
tavdagesh;FB4A
tavdageshhebrew;FB4A
tavhebrew;05EA
tbar;0167
tbopomofo;310A
tcaron;0165
tccurl;02A8
tcedilla;0163
tcheharabic;0686
tchehfinalarabic;FB7B
tchehinitialarabic;FB7C
tchehmedialarabic;FB7D
tchehmeeminitialarabic;FB7C FEE4
tcircle;24E3
tcircumflexbelow;1E71
tcommaaccent;0163
tdieresis;1E97
tdotaccent;1E6B
tdotbelow;1E6D
tecyrillic;0442
tedescendercyrillic;04AD
teharabic;062A
tehfinalarabic;FE96
tehhahinitialarabic;FCA2
tehhahisolatedarabic;FC0C
tehinitialarabic;FE97
tehiragana;3066
tehjeeminitialarabic;FCA1
tehjeemisolatedarabic;FC0B
tehmarbutaarabic;0629
tehmarbutafinalarabic;FE94
tehmedialarabic;FE98
tehmeeminitialarabic;FCA4
tehmeemisolatedarabic;FC0E
tehnoonfinalarabic;FC73
tekatakana;30C6
tekatakanahalfwidth;FF83
telephone;2121
telephoneblack;260E
telishagedolahebrew;05A0
telishaqetanahebrew;05A9
tencircle;2469
tenideographicparen;3229
tenparen;247D
tenperiod;2491
tenroman;2179
tesh;02A7
tet;05D8
tetdagesh;FB38
tetdageshhebrew;FB38
tethebrew;05D8
tetsecyrillic;04B5
tevirhebrew;059B
tevirlefthebrew;059B
thabengali;09A5
thadeva;0925
thagujarati;0AA5
thagurmukhi;0A25
thalarabic;0630
thalfinalarabic;FEAC
thanthakhatlowleftthai;F898
thanthakhatlowrightthai;F897
thanthakhatthai;0E4C
thanthakhatupperleftthai;F896
theharabic;062B
thehfinalarabic;FE9A
thehinitialarabic;FE9B
thehmedialarabic;FE9C
thereexists;2203
therefore;2234
theta;03B8
theta1;03D1
thetasymbolgreek;03D1
thieuthacirclekorean;3279
thieuthaparenkorean;3219
thieuthcirclekorean;326B
thieuthkorean;314C
thieuthparenkorean;320B
thirteencircle;246C
thirteenparen;2480
thirteenperiod;2494
thonangmonthothai;0E11
thook;01AD
thophuthaothai;0E12
thorn;00FE
thothahanthai;0E17
thothanthai;0E10
thothongthai;0E18
thothungthai;0E16
thousandcyrillic;0482
thousandsseparatorarabic;066C
thousandsseparatorpersian;066C
three;0033
threearabic;0663
threebengali;09E9
threecircle;2462
threecircleinversesansserif;278C
threedeva;0969
threeeighths;215C
threegujarati;0AE9
threegurmukhi;0A69
threehackarabic;0663
threehangzhou;3023
threeideographicparen;3222
threeinferior;2083
threemonospace;FF13
threenumeratorbengali;09F6
threeoldstyle;F733
threeparen;2476
threeperiod;248A
threepersian;06F3
threequarters;00BE
threequartersemdash;F6DE
threeroman;2172
threesuperior;00B3
threethai;0E53
thzsquare;3394
tihiragana;3061
tikatakana;30C1
tikatakanahalfwidth;FF81
tikeutacirclekorean;3270
tikeutaparenkorean;3210
tikeutcirclekorean;3262
tikeutkorean;3137
tikeutparenkorean;3202
tilde;02DC
tildebelowcmb;0330
tildecmb;0303
tildecomb;0303
tildedoublecmb;0360
tildeoperator;223C
tildeoverlaycmb;0334
tildeverticalcmb;033E
timescircle;2297
tipehahebrew;0596
tipehalefthebrew;0596
tippigurmukhi;0A70
titlocyrilliccmb;0483
tiwnarmenian;057F
tlinebelow;1E6F
tmonospace;FF54
toarmenian;0569
tohiragana;3068
tokatakana;30C8
tokatakanahalfwidth;FF84
tonebarextrahighmod;02E5
tonebarextralowmod;02E9
tonebarhighmod;02E6
tonebarlowmod;02E8
tonebarmidmod;02E7
tonefive;01BD
tonesix;0185
tonetwo;01A8
tonos;0384
tonsquare;3327
topatakthai;0E0F
tortoiseshellbracketleft;3014
tortoiseshellbracketleftsmall;FE5D
tortoiseshellbracketleftvertical;FE39
tortoiseshellbracketright;3015
tortoiseshellbracketrightsmall;FE5E
tortoiseshellbracketrightvertical;FE3A
totaothai;0E15
tpalatalhook;01AB
tparen;24AF
trademark;2122
trademarksans;F8EA
trademarkserif;F6DB
tretroflexhook;0288
triagdn;25BC
triaglf;25C4
triagrt;25BA
triagup;25B2
ts;02A6
tsadi;05E6
tsadidagesh;FB46
tsadidageshhebrew;FB46
tsadihebrew;05E6
tsecyrillic;0446
tsere;05B5
tsere12;05B5
tsere1e;05B5
tsere2b;05B5
tserehebrew;05B5
tserenarrowhebrew;05B5
tserequarterhebrew;05B5
tserewidehebrew;05B5
tshecyrillic;045B
tsuperior;F6F3
ttabengali;099F
ttadeva;091F
ttagujarati;0A9F
ttagurmukhi;0A1F
tteharabic;0679
ttehfinalarabic;FB67
ttehinitialarabic;FB68
ttehmedialarabic;FB69
tthabengali;09A0
tthadeva;0920
tthagujarati;0AA0
tthagurmukhi;0A20
tturned;0287
tuhiragana;3064
tukatakana;30C4
tukatakanahalfwidth;FF82
tusmallhiragana;3063
tusmallkatakana;30C3
tusmallkatakanahalfwidth;FF6F
twelvecircle;246B
twelveparen;247F
twelveperiod;2493
twelveroman;217B
twentycircle;2473
twentyhangzhou;5344
twentyparen;2487
twentyperiod;249B
two;0032
twoarabic;0662
twobengali;09E8
twocircle;2461
twocircleinversesansserif;278B
twodeva;0968
twodotenleader;2025
twodotleader;2025
twodotleadervertical;FE30
twogujarati;0AE8
twogurmukhi;0A68
twohackarabic;0662
twohangzhou;3022
twoideographicparen;3221
twoinferior;2082
twomonospace;FF12
twonumeratorbengali;09F5
twooldstyle;F732
twoparen;2475
twoperiod;2489
twopersian;06F2
tworoman;2171
twostroke;01BB
twosuperior;00B2
twothai;0E52
twothirds;2154
u;0075
uacute;00FA
ubar;0289
ubengali;0989
ubopomofo;3128
ubreve;016D
ucaron;01D4
ucircle;24E4
ucircumflex;00FB
ucircumflexbelow;1E77
ucyrillic;0443
udattadeva;0951
udblacute;0171
udblgrave;0215
udeva;0909
udieresis;00FC
udieresisacute;01D8
udieresisbelow;1E73
udieresiscaron;01DA
udieresiscyrillic;04F1
udieresisgrave;01DC
udieresismacron;01D6
udotbelow;1EE5
ugrave;00F9
ugujarati;0A89
ugurmukhi;0A09
uhiragana;3046
uhookabove;1EE7
uhorn;01B0
uhornacute;1EE9
uhorndotbelow;1EF1
uhorngrave;1EEB
uhornhookabove;1EED
uhorntilde;1EEF
uhungarumlaut;0171
uhungarumlautcyrillic;04F3
uinvertedbreve;0217
ukatakana;30A6
ukatakanahalfwidth;FF73
ukcyrillic;0479
ukorean;315C
umacron;016B
umacroncyrillic;04EF
umacrondieresis;1E7B
umatragurmukhi;0A41
umonospace;FF55
underscore;005F
underscoredbl;2017
underscoremonospace;FF3F
underscorevertical;FE33
underscorewavy;FE4F
union;222A
universal;2200
uogonek;0173
uparen;24B0
upblock;2580
upperdothebrew;05C4
upsilon;03C5
upsilondieresis;03CB
upsilondieresistonos;03B0
upsilonlatin;028A
upsilontonos;03CD
uptackbelowcmb;031D
uptackmod;02D4
uragurmukhi;0A73
uring;016F
ushortcyrillic;045E
usmallhiragana;3045
usmallkatakana;30A5
usmallkatakanahalfwidth;FF69
ustraightcyrillic;04AF
ustraightstrokecyrillic;04B1
utilde;0169
utildeacute;1E79
utildebelow;1E75
uubengali;098A
uudeva;090A
uugujarati;0A8A
uugurmukhi;0A0A
uumatragurmukhi;0A42
uuvowelsignbengali;09C2
uuvowelsigndeva;0942
uuvowelsigngujarati;0AC2
uvowelsignbengali;09C1
uvowelsigndeva;0941
uvowelsigngujarati;0AC1
v;0076
vadeva;0935
vagujarati;0AB5
vagurmukhi;0A35
vakatakana;30F7
vav;05D5
vavdagesh;FB35
vavdagesh65;FB35
vavdageshhebrew;FB35
vavhebrew;05D5
vavholam;FB4B
vavholamhebrew;FB4B
vavvavhebrew;05F0
vavyodhebrew;05F1
vcircle;24E5
vdotbelow;1E7F
vecyrillic;0432
veharabic;06A4
vehfinalarabic;FB6B
vehinitialarabic;FB6C
vehmedialarabic;FB6D
vekatakana;30F9
venus;2640
verticalbar;007C
verticallineabovecmb;030D
verticallinebelowcmb;0329
verticallinelowmod;02CC
verticallinemod;02C8
vewarmenian;057E
vhook;028B
vikatakana;30F8
viramabengali;09CD
viramadeva;094D
viramagujarati;0ACD
visargabengali;0983
visargadeva;0903
visargagujarati;0A83
vmonospace;FF56
voarmenian;0578
voicediterationhiragana;309E
voicediterationkatakana;30FE
voicedmarkkana;309B
voicedmarkkanahalfwidth;FF9E
vokatakana;30FA
vparen;24B1
vtilde;1E7D
vturned;028C
vuhiragana;3094
vukatakana;30F4
w;0077
wacute;1E83
waekorean;3159
wahiragana;308F
wakatakana;30EF
wakatakanahalfwidth;FF9C
wakorean;3158
wasmallhiragana;308E
wasmallkatakana;30EE
wattosquare;3357
wavedash;301C
wavyunderscorevertical;FE34
wawarabic;0648
wawfinalarabic;FEEE
wawhamzaabovearabic;0624
wawhamzaabovefinalarabic;FE86
wbsquare;33DD
wcircle;24E6
wcircumflex;0175
wdieresis;1E85
wdotaccent;1E87
wdotbelow;1E89
wehiragana;3091
weierstrass;2118
wekatakana;30F1
wekorean;315E
weokorean;315D
wgrave;1E81
whitebullet;25E6
whitecircle;25CB
whitecircleinverse;25D9
whitecornerbracketleft;300E
whitecornerbracketleftvertical;FE43
whitecornerbracketright;300F
whitecornerbracketrightvertical;FE44
whitediamond;25C7
whitediamondcontainingblacksmalldiamond;25C8
whitedownpointingsmalltriangle;25BF
whitedownpointingtriangle;25BD
whiteleftpointingsmalltriangle;25C3
whiteleftpointingtriangle;25C1
whitelenticularbracketleft;3016
whitelenticularbracketright;3017
whiterightpointingsmalltriangle;25B9
whiterightpointingtriangle;25B7
whitesmallsquare;25AB
whitesmilingface;263A
whitesquare;25A1
whitestar;2606
whitetelephone;260F
whitetortoiseshellbracketleft;3018
whitetortoiseshellbracketright;3019
whiteuppointingsmalltriangle;25B5
whiteuppointingtriangle;25B3
wihiragana;3090
wikatakana;30F0
wikorean;315F
wmonospace;FF57
wohiragana;3092
wokatakana;30F2
wokatakanahalfwidth;FF66
won;20A9
wonmonospace;FFE6
wowaenthai;0E27
wparen;24B2
wring;1E98
wsuperior;02B7
wturned;028D
wynn;01BF
x;0078
xabovecmb;033D
xbopomofo;3112
xcircle;24E7
xdieresis;1E8D
xdotaccent;1E8B
xeharmenian;056D
xi;03BE
xmonospace;FF58
xparen;24B3
xsuperior;02E3
y;0079
yaadosquare;334E
yabengali;09AF
yacute;00FD
yadeva;092F
yaekorean;3152
yagujarati;0AAF
yagurmukhi;0A2F
yahiragana;3084
yakatakana;30E4
yakatakanahalfwidth;FF94
yakorean;3151
yamakkanthai;0E4E
yasmallhiragana;3083
yasmallkatakana;30E3
yasmallkatakanahalfwidth;FF6C
yatcyrillic;0463
ycircle;24E8
ycircumflex;0177
ydieresis;00FF
ydotaccent;1E8F
ydotbelow;1EF5
yeharabic;064A
yehbarreearabic;06D2
yehbarreefinalarabic;FBAF
yehfinalarabic;FEF2
yehhamzaabovearabic;0626
yehhamzaabovefinalarabic;FE8A
yehhamzaaboveinitialarabic;FE8B
yehhamzaabovemedialarabic;FE8C
yehinitialarabic;FEF3
yehmedialarabic;FEF4
yehmeeminitialarabic;FCDD
yehmeemisolatedarabic;FC58
yehnoonfinalarabic;FC94
yehthreedotsbelowarabic;06D1
yekorean;3156
yen;00A5
yenmonospace;FFE5
yeokorean;3155
yeorinhieuhkorean;3186
yerahbenyomohebrew;05AA
yerahbenyomolefthebrew;05AA
yericyrillic;044B
yerudieresiscyrillic;04F9
yesieungkorean;3181
yesieungpansioskorean;3183
yesieungsioskorean;3182
yetivhebrew;059A
ygrave;1EF3
yhook;01B4
yhookabove;1EF7
yiarmenian;0575
yicyrillic;0457
yikorean;3162
yinyang;262F
yiwnarmenian;0582
ymonospace;FF59
yod;05D9
yoddagesh;FB39
yoddageshhebrew;FB39
yodhebrew;05D9
yodyodhebrew;05F2
yodyodpatahhebrew;FB1F
yohiragana;3088
yoikorean;3189
yokatakana;30E8
yokatakanahalfwidth;FF96
yokorean;315B
yosmallhiragana;3087
yosmallkatakana;30E7
yosmallkatakanahalfwidth;FF6E
yotgreek;03F3
yoyaekorean;3188
yoyakorean;3187
yoyakthai;0E22
yoyingthai;0E0D
yparen;24B4
ypogegrammeni;037A
ypogegrammenigreekcmb;0345
yr;01A6
yring;1E99
ysuperior;02B8
ytilde;1EF9
yturned;028E
yuhiragana;3086
yuikorean;318C
yukatakana;30E6
yukatakanahalfwidth;FF95
yukorean;3160
yusbigcyrillic;046B
yusbigiotifiedcyrillic;046D
yuslittlecyrillic;0467
yuslittleiotifiedcyrillic;0469
yusmallhiragana;3085
yusmallkatakana;30E5
yusmallkatakanahalfwidth;FF6D
yuyekorean;318B
yuyeokorean;318A
yyabengali;09DF
yyadeva;095F
z;007A
zaarmenian;0566
zacute;017A
zadeva;095B
zagurmukhi;0A5B
zaharabic;0638
zahfinalarabic;FEC6
zahinitialarabic;FEC7
zahiragana;3056
zahmedialarabic;FEC8
zainarabic;0632
zainfinalarabic;FEB0
zakatakana;30B6
zaqefgadolhebrew;0595
zaqefqatanhebrew;0594
zarqahebrew;0598
zayin;05D6
zayindagesh;FB36
zayindageshhebrew;FB36
zayinhebrew;05D6
zbopomofo;3117
zcaron;017E
zcircle;24E9
zcircumflex;1E91
zcurl;0291
zdot;017C
zdotaccent;017C
zdotbelow;1E93
zecyrillic;0437
zedescendercyrillic;0499
zedieresiscyrillic;04DF
zehiragana;305C
zekatakana;30BC
zero;0030
zeroarabic;0660
zerobengali;09E6
zerodeva;0966
zerogujarati;0AE6
zerogurmukhi;0A66
zerohackarabic;0660
zeroinferior;2080
zeromonospace;FF10
zerooldstyle;F730
zeropersian;06F0
zerosuperior;2070
zerothai;0E50
zerowidthjoiner;FEFF
zerowidthnonjoiner;200C
zerowidthspace;200B
zeta;03B6
zhbopomofo;3113
zhearmenian;056A
zhebrevecyrillic;04C2
zhecyrillic;0436
zhedescendercyrillic;0497
zhedieresiscyrillic;04DD
zihiragana;3058
zikatakana;30B8
zinorhebrew;05AE
zlinebelow;1E95
zmonospace;FF5A
zohiragana;305E
zokatakana;30BE
zparen;24B5
zretroflexhook;0290
zstroke;01B6
zuhiragana;305A
zukatakana;30BA
# END
"""
_aglfnText = """\
# -----------------------------------------------------------
# Copyright 2002-2019 Adobe (http://www.adobe.com/).
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# Neither the name of Adobe nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------
# Name: Adobe Glyph List For New Fonts
# Table version: 1.7
# Date: November 6, 2008
# URL: https://github.com/adobe-type-tools/agl-aglfn
#
# Description:
#
# AGLFN (Adobe Glyph List For New Fonts) provides a list of base glyph
# names that are recommended for new fonts, which are compatible with
# the AGL (Adobe Glyph List) Specification, and which should be used
# as described in Section 6 of that document. AGLFN comprises the set
# of glyph names from AGL that map via the AGL Specification rules to
# the semantically correct UV (Unicode Value). For example, "Asmall"
# is omitted because AGL maps this glyph name to the PUA (Private Use
# Area) value U+F761, rather than to the UV that maps from the glyph
# name "A." Also omitted is "ffi," because AGL maps this to the
# Alphabetic Presentation Forms value U+FB03, rather than decomposing
# it into the following sequence of three UVs: U+0066, U+0066, and
# U+0069. The name "arrowvertex" has been omitted because this glyph
# now has a real UV, and AGL is now incorrect in mapping it to the PUA
# value U+F8E6. If you do not find an appropriate name for your glyph
# in this list, then please refer to Section 6 of the AGL
# Specification.
#
# Format: three semicolon-delimited fields:
# (1) Standard UV or CUS UV--four uppercase hexadecimal digits
# (2) Glyph name--upper/lowercase letters and digits
# (3) Character names: Unicode character names for standard UVs, and
# descriptive names for CUS UVs--uppercase letters, hyphen, and
# space
#
# The records are sorted by glyph name in increasing ASCII order,
# entries with the same glyph name are sorted in decreasing priority
# order, the UVs and Unicode character names are provided for
# convenience, lines starting with "#" are comments, and blank lines
# should be ignored.
#
# Revision History:
#
# 1.7 [6 November 2008]
# - Reverted to the original 1.4 and earlier mappings for Delta,
# Omega, and mu.
# - Removed mappings for "afii" names. These should now be assigned
# "uni" names.
# - Removed mappings for "commaaccent" names. These should now be
# assigned "uni" names.
#
# 1.6 [30 January 2006]
# - Completed work intended in 1.5.
#
# 1.5 [23 November 2005]
# - Removed duplicated block at end of file.
# - Changed mappings:
# 2206;Delta;INCREMENT changed to 0394;Delta;GREEK CAPITAL LETTER DELTA
# 2126;Omega;OHM SIGN changed to 03A9;Omega;GREEK CAPITAL LETTER OMEGA
# 03BC;mu;MICRO SIGN changed to 03BC;mu;GREEK SMALL LETTER MU
# - Corrected statement above about why "ffi" is omitted.
#
# 1.4 [24 September 2003]
# - Changed version to 1.4, to avoid confusion with the AGL 1.3.
# - Fixed spelling errors in the header.
# - Fully removed "arrowvertex," as it is mapped only to a PUA Unicode
# value in some fonts.
#
# 1.1 [17 April 2003]
# - Renamed [Tt]cedilla back to [Tt]commaaccent.
#
# 1.0 [31 January 2003]
# - Original version.
# - Derived from the AGLv1.2 by:
# removing the PUA area codes;
# removing duplicate Unicode mappings; and
# renaming "tcommaaccent" to "tcedilla" and "Tcommaaccent" to "Tcedilla"
#
0041;A;LATIN CAPITAL LETTER A
00C6;AE;LATIN CAPITAL LETTER AE
01FC;AEacute;LATIN CAPITAL LETTER AE WITH ACUTE
00C1;Aacute;LATIN CAPITAL LETTER A WITH ACUTE
0102;Abreve;LATIN CAPITAL LETTER A WITH BREVE
00C2;Acircumflex;LATIN CAPITAL LETTER A WITH CIRCUMFLEX
00C4;Adieresis;LATIN CAPITAL LETTER A WITH DIAERESIS
00C0;Agrave;LATIN CAPITAL LETTER A WITH GRAVE
0391;Alpha;GREEK CAPITAL LETTER ALPHA
0386;Alphatonos;GREEK CAPITAL LETTER ALPHA WITH TONOS
0100;Amacron;LATIN CAPITAL LETTER A WITH MACRON
0104;Aogonek;LATIN CAPITAL LETTER A WITH OGONEK
00C5;Aring;LATIN CAPITAL LETTER A WITH RING ABOVE
01FA;Aringacute;LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE
00C3;Atilde;LATIN CAPITAL LETTER A WITH TILDE
0042;B;LATIN CAPITAL LETTER B
0392;Beta;GREEK CAPITAL LETTER BETA
0043;C;LATIN CAPITAL LETTER C
0106;Cacute;LATIN CAPITAL LETTER C WITH ACUTE
010C;Ccaron;LATIN CAPITAL LETTER C WITH CARON
00C7;Ccedilla;LATIN CAPITAL LETTER C WITH CEDILLA
0108;Ccircumflex;LATIN CAPITAL LETTER C WITH CIRCUMFLEX
010A;Cdotaccent;LATIN CAPITAL LETTER C WITH DOT ABOVE
03A7;Chi;GREEK CAPITAL LETTER CHI
0044;D;LATIN CAPITAL LETTER D
010E;Dcaron;LATIN CAPITAL LETTER D WITH CARON
0110;Dcroat;LATIN CAPITAL LETTER D WITH STROKE
2206;Delta;INCREMENT
0045;E;LATIN CAPITAL LETTER E
00C9;Eacute;LATIN CAPITAL LETTER E WITH ACUTE
0114;Ebreve;LATIN CAPITAL LETTER E WITH BREVE
011A;Ecaron;LATIN CAPITAL LETTER E WITH CARON
00CA;Ecircumflex;LATIN CAPITAL LETTER E WITH CIRCUMFLEX
00CB;Edieresis;LATIN CAPITAL LETTER E WITH DIAERESIS
0116;Edotaccent;LATIN CAPITAL LETTER E WITH DOT ABOVE
00C8;Egrave;LATIN CAPITAL LETTER E WITH GRAVE
0112;Emacron;LATIN CAPITAL LETTER E WITH MACRON
014A;Eng;LATIN CAPITAL LETTER ENG
0118;Eogonek;LATIN CAPITAL LETTER E WITH OGONEK
0395;Epsilon;GREEK CAPITAL LETTER EPSILON
0388;Epsilontonos;GREEK CAPITAL LETTER EPSILON WITH TONOS
0397;Eta;GREEK CAPITAL LETTER ETA
0389;Etatonos;GREEK CAPITAL LETTER ETA WITH TONOS
00D0;Eth;LATIN CAPITAL LETTER ETH
20AC;Euro;EURO SIGN
0046;F;LATIN CAPITAL LETTER F
0047;G;LATIN CAPITAL LETTER G
0393;Gamma;GREEK CAPITAL LETTER GAMMA
011E;Gbreve;LATIN CAPITAL LETTER G WITH BREVE
01E6;Gcaron;LATIN CAPITAL LETTER G WITH CARON
011C;Gcircumflex;LATIN CAPITAL LETTER G WITH CIRCUMFLEX
0120;Gdotaccent;LATIN CAPITAL LETTER G WITH DOT ABOVE
0048;H;LATIN CAPITAL LETTER H
25CF;H18533;BLACK CIRCLE
25AA;H18543;BLACK SMALL SQUARE
25AB;H18551;WHITE SMALL SQUARE
25A1;H22073;WHITE SQUARE
0126;Hbar;LATIN CAPITAL LETTER H WITH STROKE
0124;Hcircumflex;LATIN CAPITAL LETTER H WITH CIRCUMFLEX
0049;I;LATIN CAPITAL LETTER I
0132;IJ;LATIN CAPITAL LIGATURE IJ
00CD;Iacute;LATIN CAPITAL LETTER I WITH ACUTE
012C;Ibreve;LATIN CAPITAL LETTER I WITH BREVE
00CE;Icircumflex;LATIN CAPITAL LETTER I WITH CIRCUMFLEX
00CF;Idieresis;LATIN CAPITAL LETTER I WITH DIAERESIS
0130;Idotaccent;LATIN CAPITAL LETTER I WITH DOT ABOVE
2111;Ifraktur;BLACK-LETTER CAPITAL I
00CC;Igrave;LATIN CAPITAL LETTER I WITH GRAVE
012A;Imacron;LATIN CAPITAL LETTER I WITH MACRON
012E;Iogonek;LATIN CAPITAL LETTER I WITH OGONEK
0399;Iota;GREEK CAPITAL LETTER IOTA
03AA;Iotadieresis;GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
038A;Iotatonos;GREEK CAPITAL LETTER IOTA WITH TONOS
0128;Itilde;LATIN CAPITAL LETTER I WITH TILDE
004A;J;LATIN CAPITAL LETTER J
0134;Jcircumflex;LATIN CAPITAL LETTER J WITH CIRCUMFLEX
004B;K;LATIN CAPITAL LETTER K
039A;Kappa;GREEK CAPITAL LETTER KAPPA
004C;L;LATIN CAPITAL LETTER L
0139;Lacute;LATIN CAPITAL LETTER L WITH ACUTE
039B;Lambda;GREEK CAPITAL LETTER LAMDA
013D;Lcaron;LATIN CAPITAL LETTER L WITH CARON
013F;Ldot;LATIN CAPITAL LETTER L WITH MIDDLE DOT
0141;Lslash;LATIN CAPITAL LETTER L WITH STROKE
004D;M;LATIN CAPITAL LETTER M
039C;Mu;GREEK CAPITAL LETTER MU
004E;N;LATIN CAPITAL LETTER N
0143;Nacute;LATIN CAPITAL LETTER N WITH ACUTE
0147;Ncaron;LATIN CAPITAL LETTER N WITH CARON
00D1;Ntilde;LATIN CAPITAL LETTER N WITH TILDE
039D;Nu;GREEK CAPITAL LETTER NU
004F;O;LATIN CAPITAL LETTER O
0152;OE;LATIN CAPITAL LIGATURE OE
00D3;Oacute;LATIN CAPITAL LETTER O WITH ACUTE
014E;Obreve;LATIN CAPITAL LETTER O WITH BREVE
00D4;Ocircumflex;LATIN CAPITAL LETTER O WITH CIRCUMFLEX
00D6;Odieresis;LATIN CAPITAL LETTER O WITH DIAERESIS
00D2;Ograve;LATIN CAPITAL LETTER O WITH GRAVE
01A0;Ohorn;LATIN CAPITAL LETTER O WITH HORN
0150;Ohungarumlaut;LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
014C;Omacron;LATIN CAPITAL LETTER O WITH MACRON
2126;Omega;OHM SIGN
038F;Omegatonos;GREEK CAPITAL LETTER OMEGA WITH TONOS
039F;Omicron;GREEK CAPITAL LETTER OMICRON
038C;Omicrontonos;GREEK CAPITAL LETTER OMICRON WITH TONOS
00D8;Oslash;LATIN CAPITAL LETTER O WITH STROKE
01FE;Oslashacute;LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
00D5;Otilde;LATIN CAPITAL LETTER O WITH TILDE
0050;P;LATIN CAPITAL LETTER P
03A6;Phi;GREEK CAPITAL LETTER PHI
03A0;Pi;GREEK CAPITAL LETTER PI
03A8;Psi;GREEK CAPITAL LETTER PSI
0051;Q;LATIN CAPITAL LETTER Q
0052;R;LATIN CAPITAL LETTER R
0154;Racute;LATIN CAPITAL LETTER R WITH ACUTE
0158;Rcaron;LATIN CAPITAL LETTER R WITH CARON
211C;Rfraktur;BLACK-LETTER CAPITAL R
03A1;Rho;GREEK CAPITAL LETTER RHO
0053;S;LATIN CAPITAL LETTER S
250C;SF010000;BOX DRAWINGS LIGHT DOWN AND RIGHT
2514;SF020000;BOX DRAWINGS LIGHT UP AND RIGHT
2510;SF030000;BOX DRAWINGS LIGHT DOWN AND LEFT
2518;SF040000;BOX DRAWINGS LIGHT UP AND LEFT
253C;SF050000;BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
252C;SF060000;BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
2534;SF070000;BOX DRAWINGS LIGHT UP AND HORIZONTAL
251C;SF080000;BOX DRAWINGS LIGHT VERTICAL AND RIGHT
2524;SF090000;BOX DRAWINGS LIGHT VERTICAL AND LEFT
2500;SF100000;BOX DRAWINGS LIGHT HORIZONTAL
2502;SF110000;BOX DRAWINGS LIGHT VERTICAL
2561;SF190000;BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
2562;SF200000;BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
2556;SF210000;BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
2555;SF220000;BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
2563;SF230000;BOX DRAWINGS DOUBLE VERTICAL AND LEFT
2551;SF240000;BOX DRAWINGS DOUBLE VERTICAL
2557;SF250000;BOX DRAWINGS DOUBLE DOWN AND LEFT
255D;SF260000;BOX DRAWINGS DOUBLE UP AND LEFT
255C;SF270000;BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
255B;SF280000;BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
255E;SF360000;BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
255F;SF370000;BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
255A;SF380000;BOX DRAWINGS DOUBLE UP AND RIGHT
2554;SF390000;BOX DRAWINGS DOUBLE DOWN AND RIGHT
2569;SF400000;BOX DRAWINGS DOUBLE UP AND HORIZONTAL
2566;SF410000;BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
2560;SF420000;BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
2550;SF430000;BOX DRAWINGS DOUBLE HORIZONTAL
256C;SF440000;BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
2567;SF450000;BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
2568;SF460000;BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
2564;SF470000;BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
2565;SF480000;BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
2559;SF490000;BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
2558;SF500000;BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
2552;SF510000;BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
2553;SF520000;BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
256B;SF530000;BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
256A;SF540000;BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
015A;Sacute;LATIN CAPITAL LETTER S WITH ACUTE
0160;Scaron;LATIN CAPITAL LETTER S WITH CARON
015E;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA
015C;Scircumflex;LATIN CAPITAL LETTER S WITH CIRCUMFLEX
03A3;Sigma;GREEK CAPITAL LETTER SIGMA
0054;T;LATIN CAPITAL LETTER T
03A4;Tau;GREEK CAPITAL LETTER TAU
0166;Tbar;LATIN CAPITAL LETTER T WITH STROKE
0164;Tcaron;LATIN CAPITAL LETTER T WITH CARON
0398;Theta;GREEK CAPITAL LETTER THETA
00DE;Thorn;LATIN CAPITAL LETTER THORN
0055;U;LATIN CAPITAL LETTER U
00DA;Uacute;LATIN CAPITAL LETTER U WITH ACUTE
016C;Ubreve;LATIN CAPITAL LETTER U WITH BREVE
00DB;Ucircumflex;LATIN CAPITAL LETTER U WITH CIRCUMFLEX
00DC;Udieresis;LATIN CAPITAL LETTER U WITH DIAERESIS
00D9;Ugrave;LATIN CAPITAL LETTER U WITH GRAVE
01AF;Uhorn;LATIN CAPITAL LETTER U WITH HORN
0170;Uhungarumlaut;LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
016A;Umacron;LATIN CAPITAL LETTER U WITH MACRON
0172;Uogonek;LATIN CAPITAL LETTER U WITH OGONEK
03A5;Upsilon;GREEK CAPITAL LETTER UPSILON
03D2;Upsilon1;GREEK UPSILON WITH HOOK SYMBOL
03AB;Upsilondieresis;GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
038E;Upsilontonos;GREEK CAPITAL LETTER UPSILON WITH TONOS
016E;Uring;LATIN CAPITAL LETTER U WITH RING ABOVE
0168;Utilde;LATIN CAPITAL LETTER U WITH TILDE
0056;V;LATIN CAPITAL LETTER V
0057;W;LATIN CAPITAL LETTER W
1E82;Wacute;LATIN CAPITAL LETTER W WITH ACUTE
0174;Wcircumflex;LATIN CAPITAL LETTER W WITH CIRCUMFLEX
1E84;Wdieresis;LATIN CAPITAL LETTER W WITH DIAERESIS
1E80;Wgrave;LATIN CAPITAL LETTER W WITH GRAVE
0058;X;LATIN CAPITAL LETTER X
039E;Xi;GREEK CAPITAL LETTER XI
0059;Y;LATIN CAPITAL LETTER Y
00DD;Yacute;LATIN CAPITAL LETTER Y WITH ACUTE
0176;Ycircumflex;LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
0178;Ydieresis;LATIN CAPITAL LETTER Y WITH DIAERESIS
1EF2;Ygrave;LATIN CAPITAL LETTER Y WITH GRAVE
005A;Z;LATIN CAPITAL LETTER Z
0179;Zacute;LATIN CAPITAL LETTER Z WITH ACUTE
017D;Zcaron;LATIN CAPITAL LETTER Z WITH CARON
017B;Zdotaccent;LATIN CAPITAL LETTER Z WITH DOT ABOVE
0396;Zeta;GREEK CAPITAL LETTER ZETA
0061;a;LATIN SMALL LETTER A
00E1;aacute;LATIN SMALL LETTER A WITH ACUTE
0103;abreve;LATIN SMALL LETTER A WITH BREVE
00E2;acircumflex;LATIN SMALL LETTER A WITH CIRCUMFLEX
00B4;acute;ACUTE ACCENT
0301;acutecomb;COMBINING ACUTE ACCENT
00E4;adieresis;LATIN SMALL LETTER A WITH DIAERESIS
00E6;ae;LATIN SMALL LETTER AE
01FD;aeacute;LATIN SMALL LETTER AE WITH ACUTE
00E0;agrave;LATIN SMALL LETTER A WITH GRAVE
2135;aleph;ALEF SYMBOL
03B1;alpha;GREEK SMALL LETTER ALPHA
03AC;alphatonos;GREEK SMALL LETTER ALPHA WITH TONOS
0101;amacron;LATIN SMALL LETTER A WITH MACRON
0026;ampersand;AMPERSAND
2220;angle;ANGLE
2329;angleleft;LEFT-POINTING ANGLE BRACKET
232A;angleright;RIGHT-POINTING ANGLE BRACKET
0387;anoteleia;GREEK ANO TELEIA
0105;aogonek;LATIN SMALL LETTER A WITH OGONEK
2248;approxequal;ALMOST EQUAL TO
00E5;aring;LATIN SMALL LETTER A WITH RING ABOVE
01FB;aringacute;LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE
2194;arrowboth;LEFT RIGHT ARROW
21D4;arrowdblboth;LEFT RIGHT DOUBLE ARROW
21D3;arrowdbldown;DOWNWARDS DOUBLE ARROW
21D0;arrowdblleft;LEFTWARDS DOUBLE ARROW
21D2;arrowdblright;RIGHTWARDS DOUBLE ARROW
21D1;arrowdblup;UPWARDS DOUBLE ARROW
2193;arrowdown;DOWNWARDS ARROW
2190;arrowleft;LEFTWARDS ARROW
2192;arrowright;RIGHTWARDS ARROW
2191;arrowup;UPWARDS ARROW
2195;arrowupdn;UP DOWN ARROW
21A8;arrowupdnbse;UP DOWN ARROW WITH BASE
005E;asciicircum;CIRCUMFLEX ACCENT
007E;asciitilde;TILDE
002A;asterisk;ASTERISK
2217;asteriskmath;ASTERISK OPERATOR
0040;at;COMMERCIAL AT
00E3;atilde;LATIN SMALL LETTER A WITH TILDE
0062;b;LATIN SMALL LETTER B
005C;backslash;REVERSE SOLIDUS
007C;bar;VERTICAL LINE
03B2;beta;GREEK SMALL LETTER BETA
2588;block;FULL BLOCK
007B;braceleft;LEFT CURLY BRACKET
007D;braceright;RIGHT CURLY BRACKET
005B;bracketleft;LEFT SQUARE BRACKET
005D;bracketright;RIGHT SQUARE BRACKET
02D8;breve;BREVE
00A6;brokenbar;BROKEN BAR
2022;bullet;BULLET
0063;c;LATIN SMALL LETTER C
0107;cacute;LATIN SMALL LETTER C WITH ACUTE
02C7;caron;CARON
21B5;carriagereturn;DOWNWARDS ARROW WITH CORNER LEFTWARDS
010D;ccaron;LATIN SMALL LETTER C WITH CARON
00E7;ccedilla;LATIN SMALL LETTER C WITH CEDILLA
0109;ccircumflex;LATIN SMALL LETTER C WITH CIRCUMFLEX
010B;cdotaccent;LATIN SMALL LETTER C WITH DOT ABOVE
00B8;cedilla;CEDILLA
00A2;cent;CENT SIGN
03C7;chi;GREEK SMALL LETTER CHI
25CB;circle;WHITE CIRCLE
2297;circlemultiply;CIRCLED TIMES
2295;circleplus;CIRCLED PLUS
02C6;circumflex;MODIFIER LETTER CIRCUMFLEX ACCENT
2663;club;BLACK CLUB SUIT
003A;colon;COLON
20A1;colonmonetary;COLON SIGN
002C;comma;COMMA
2245;congruent;APPROXIMATELY EQUAL TO
00A9;copyright;COPYRIGHT SIGN
00A4;currency;CURRENCY SIGN
0064;d;LATIN SMALL LETTER D
2020;dagger;DAGGER
2021;daggerdbl;DOUBLE DAGGER
010F;dcaron;LATIN SMALL LETTER D WITH CARON
0111;dcroat;LATIN SMALL LETTER D WITH STROKE
00B0;degree;DEGREE SIGN
03B4;delta;GREEK SMALL LETTER DELTA
2666;diamond;BLACK DIAMOND SUIT
00A8;dieresis;DIAERESIS
0385;dieresistonos;GREEK DIALYTIKA TONOS
00F7;divide;DIVISION SIGN
2593;dkshade;DARK SHADE
2584;dnblock;LOWER HALF BLOCK
0024;dollar;DOLLAR SIGN
20AB;dong;DONG SIGN
02D9;dotaccent;DOT ABOVE
0323;dotbelowcomb;COMBINING DOT BELOW
0131;dotlessi;LATIN SMALL LETTER DOTLESS I
22C5;dotmath;DOT OPERATOR
0065;e;LATIN SMALL LETTER E
00E9;eacute;LATIN SMALL LETTER E WITH ACUTE
0115;ebreve;LATIN SMALL LETTER E WITH BREVE
011B;ecaron;LATIN SMALL LETTER E WITH CARON
00EA;ecircumflex;LATIN SMALL LETTER E WITH CIRCUMFLEX
00EB;edieresis;LATIN SMALL LETTER E WITH DIAERESIS
0117;edotaccent;LATIN SMALL LETTER E WITH DOT ABOVE
00E8;egrave;LATIN SMALL LETTER E WITH GRAVE
0038;eight;DIGIT EIGHT
2208;element;ELEMENT OF
2026;ellipsis;HORIZONTAL ELLIPSIS
0113;emacron;LATIN SMALL LETTER E WITH MACRON
2014;emdash;EM DASH
2205;emptyset;EMPTY SET
2013;endash;EN DASH
014B;eng;LATIN SMALL LETTER ENG
0119;eogonek;LATIN SMALL LETTER E WITH OGONEK
03B5;epsilon;GREEK SMALL LETTER EPSILON
03AD;epsilontonos;GREEK SMALL LETTER EPSILON WITH TONOS
003D;equal;EQUALS SIGN
2261;equivalence;IDENTICAL TO
212E;estimated;ESTIMATED SYMBOL
03B7;eta;GREEK SMALL LETTER ETA
03AE;etatonos;GREEK SMALL LETTER ETA WITH TONOS
00F0;eth;LATIN SMALL LETTER ETH
0021;exclam;EXCLAMATION MARK
203C;exclamdbl;DOUBLE EXCLAMATION MARK
00A1;exclamdown;INVERTED EXCLAMATION MARK
2203;existential;THERE EXISTS
0066;f;LATIN SMALL LETTER F
2640;female;FEMALE SIGN
2012;figuredash;FIGURE DASH
25A0;filledbox;BLACK SQUARE
25AC;filledrect;BLACK RECTANGLE
0035;five;DIGIT FIVE
215D;fiveeighths;VULGAR FRACTION FIVE EIGHTHS
0192;florin;LATIN SMALL LETTER F WITH HOOK
0034;four;DIGIT FOUR
2044;fraction;FRACTION SLASH
20A3;franc;FRENCH FRANC SIGN
0067;g;LATIN SMALL LETTER G
03B3;gamma;GREEK SMALL LETTER GAMMA
011F;gbreve;LATIN SMALL LETTER G WITH BREVE
01E7;gcaron;LATIN SMALL LETTER G WITH CARON
011D;gcircumflex;LATIN SMALL LETTER G WITH CIRCUMFLEX
0121;gdotaccent;LATIN SMALL LETTER G WITH DOT ABOVE
00DF;germandbls;LATIN SMALL LETTER SHARP S
2207;gradient;NABLA
0060;grave;GRAVE ACCENT
0300;gravecomb;COMBINING GRAVE ACCENT
003E;greater;GREATER-THAN SIGN
2265;greaterequal;GREATER-THAN OR EQUAL TO
00AB;guillemotleft;LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
00BB;guillemotright;RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
2039;guilsinglleft;SINGLE LEFT-POINTING ANGLE QUOTATION MARK
203A;guilsinglright;SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0068;h;LATIN SMALL LETTER H
0127;hbar;LATIN SMALL LETTER H WITH STROKE
0125;hcircumflex;LATIN SMALL LETTER H WITH CIRCUMFLEX
2665;heart;BLACK HEART SUIT
0309;hookabovecomb;COMBINING HOOK ABOVE
2302;house;HOUSE
02DD;hungarumlaut;DOUBLE ACUTE ACCENT
002D;hyphen;HYPHEN-MINUS
0069;i;LATIN SMALL LETTER I
00ED;iacute;LATIN SMALL LETTER I WITH ACUTE
012D;ibreve;LATIN SMALL LETTER I WITH BREVE
00EE;icircumflex;LATIN SMALL LETTER I WITH CIRCUMFLEX
00EF;idieresis;LATIN SMALL LETTER I WITH DIAERESIS
00EC;igrave;LATIN SMALL LETTER I WITH GRAVE
0133;ij;LATIN SMALL LIGATURE IJ
012B;imacron;LATIN SMALL LETTER I WITH MACRON
221E;infinity;INFINITY
222B;integral;INTEGRAL
2321;integralbt;BOTTOM HALF INTEGRAL
2320;integraltp;TOP HALF INTEGRAL
2229;intersection;INTERSECTION
25D8;invbullet;INVERSE BULLET
25D9;invcircle;INVERSE WHITE CIRCLE
263B;invsmileface;BLACK SMILING FACE
012F;iogonek;LATIN SMALL LETTER I WITH OGONEK
03B9;iota;GREEK SMALL LETTER IOTA
03CA;iotadieresis;GREEK SMALL LETTER IOTA WITH DIALYTIKA
0390;iotadieresistonos;GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
03AF;iotatonos;GREEK SMALL LETTER IOTA WITH TONOS
0129;itilde;LATIN SMALL LETTER I WITH TILDE
006A;j;LATIN SMALL LETTER J
0135;jcircumflex;LATIN SMALL LETTER J WITH CIRCUMFLEX
006B;k;LATIN SMALL LETTER K
03BA;kappa;GREEK SMALL LETTER KAPPA
0138;kgreenlandic;LATIN SMALL LETTER KRA
006C;l;LATIN SMALL LETTER L
013A;lacute;LATIN SMALL LETTER L WITH ACUTE
03BB;lambda;GREEK SMALL LETTER LAMDA
013E;lcaron;LATIN SMALL LETTER L WITH CARON
0140;ldot;LATIN SMALL LETTER L WITH MIDDLE DOT
003C;less;LESS-THAN SIGN
2264;lessequal;LESS-THAN OR EQUAL TO
258C;lfblock;LEFT HALF BLOCK
20A4;lira;LIRA SIGN
2227;logicaland;LOGICAL AND
00AC;logicalnot;NOT SIGN
2228;logicalor;LOGICAL OR
017F;longs;LATIN SMALL LETTER LONG S
25CA;lozenge;LOZENGE
0142;lslash;LATIN SMALL LETTER L WITH STROKE
2591;ltshade;LIGHT SHADE
006D;m;LATIN SMALL LETTER M
00AF;macron;MACRON
2642;male;MALE SIGN
2212;minus;MINUS SIGN
2032;minute;PRIME
00B5;mu;MICRO SIGN
00D7;multiply;MULTIPLICATION SIGN
266A;musicalnote;EIGHTH NOTE
266B;musicalnotedbl;BEAMED EIGHTH NOTES
006E;n;LATIN SMALL LETTER N
0144;nacute;LATIN SMALL LETTER N WITH ACUTE
0149;napostrophe;LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
0148;ncaron;LATIN SMALL LETTER N WITH CARON
0039;nine;DIGIT NINE
2209;notelement;NOT AN ELEMENT OF
2260;notequal;NOT EQUAL TO
2284;notsubset;NOT A SUBSET OF
00F1;ntilde;LATIN SMALL LETTER N WITH TILDE
03BD;nu;GREEK SMALL LETTER NU
0023;numbersign;NUMBER SIGN
006F;o;LATIN SMALL LETTER O
00F3;oacute;LATIN SMALL LETTER O WITH ACUTE
014F;obreve;LATIN SMALL LETTER O WITH BREVE
00F4;ocircumflex;LATIN SMALL LETTER O WITH CIRCUMFLEX
00F6;odieresis;LATIN SMALL LETTER O WITH DIAERESIS
0153;oe;LATIN SMALL LIGATURE OE
02DB;ogonek;OGONEK
00F2;ograve;LATIN SMALL LETTER O WITH GRAVE
01A1;ohorn;LATIN SMALL LETTER O WITH HORN
0151;ohungarumlaut;LATIN SMALL LETTER O WITH DOUBLE ACUTE
014D;omacron;LATIN SMALL LETTER O WITH MACRON
03C9;omega;GREEK SMALL LETTER OMEGA
03D6;omega1;GREEK PI SYMBOL
03CE;omegatonos;GREEK SMALL LETTER OMEGA WITH TONOS
03BF;omicron;GREEK SMALL LETTER OMICRON
03CC;omicrontonos;GREEK SMALL LETTER OMICRON WITH TONOS
0031;one;DIGIT ONE
2024;onedotenleader;ONE DOT LEADER
215B;oneeighth;VULGAR FRACTION ONE EIGHTH
00BD;onehalf;VULGAR FRACTION ONE HALF
00BC;onequarter;VULGAR FRACTION ONE QUARTER
2153;onethird;VULGAR FRACTION ONE THIRD
25E6;openbullet;WHITE BULLET
00AA;ordfeminine;FEMININE ORDINAL INDICATOR
00BA;ordmasculine;MASCULINE ORDINAL INDICATOR
221F;orthogonal;RIGHT ANGLE
00F8;oslash;LATIN SMALL LETTER O WITH STROKE
01FF;oslashacute;LATIN SMALL LETTER O WITH STROKE AND ACUTE
00F5;otilde;LATIN SMALL LETTER O WITH TILDE
0070;p;LATIN SMALL LETTER P
00B6;paragraph;PILCROW SIGN
0028;parenleft;LEFT PARENTHESIS
0029;parenright;RIGHT PARENTHESIS
2202;partialdiff;PARTIAL DIFFERENTIAL
0025;percent;PERCENT SIGN
002E;period;FULL STOP
00B7;periodcentered;MIDDLE DOT
22A5;perpendicular;UP TACK
2030;perthousand;PER MILLE SIGN
20A7;peseta;PESETA SIGN
03C6;phi;GREEK SMALL LETTER PHI
03D5;phi1;GREEK PHI SYMBOL
03C0;pi;GREEK SMALL LETTER PI
002B;plus;PLUS SIGN
00B1;plusminus;PLUS-MINUS SIGN
211E;prescription;PRESCRIPTION TAKE
220F;product;N-ARY PRODUCT
2282;propersubset;SUBSET OF
2283;propersuperset;SUPERSET OF
221D;proportional;PROPORTIONAL TO
03C8;psi;GREEK SMALL LETTER PSI
0071;q;LATIN SMALL LETTER Q
003F;question;QUESTION MARK
00BF;questiondown;INVERTED QUESTION MARK
0022;quotedbl;QUOTATION MARK
201E;quotedblbase;DOUBLE LOW-9 QUOTATION MARK
201C;quotedblleft;LEFT DOUBLE QUOTATION MARK
201D;quotedblright;RIGHT DOUBLE QUOTATION MARK
2018;quoteleft;LEFT SINGLE QUOTATION MARK
201B;quotereversed;SINGLE HIGH-REVERSED-9 QUOTATION MARK
2019;quoteright;RIGHT SINGLE QUOTATION MARK
201A;quotesinglbase;SINGLE LOW-9 QUOTATION MARK
0027;quotesingle;APOSTROPHE
0072;r;LATIN SMALL LETTER R
0155;racute;LATIN SMALL LETTER R WITH ACUTE
221A;radical;SQUARE ROOT
0159;rcaron;LATIN SMALL LETTER R WITH CARON
2286;reflexsubset;SUBSET OF OR EQUAL TO
2287;reflexsuperset;SUPERSET OF OR EQUAL TO
00AE;registered;REGISTERED SIGN
2310;revlogicalnot;REVERSED NOT SIGN
03C1;rho;GREEK SMALL LETTER RHO
02DA;ring;RING ABOVE
2590;rtblock;RIGHT HALF BLOCK
0073;s;LATIN SMALL LETTER S
015B;sacute;LATIN SMALL LETTER S WITH ACUTE
0161;scaron;LATIN SMALL LETTER S WITH CARON
015F;scedilla;LATIN SMALL LETTER S WITH CEDILLA
015D;scircumflex;LATIN SMALL LETTER S WITH CIRCUMFLEX
2033;second;DOUBLE PRIME
00A7;section;SECTION SIGN
003B;semicolon;SEMICOLON
0037;seven;DIGIT SEVEN
215E;seveneighths;VULGAR FRACTION SEVEN EIGHTHS
2592;shade;MEDIUM SHADE
03C3;sigma;GREEK SMALL LETTER SIGMA
03C2;sigma1;GREEK SMALL LETTER FINAL SIGMA
223C;similar;TILDE OPERATOR
0036;six;DIGIT SIX
002F;slash;SOLIDUS
263A;smileface;WHITE SMILING FACE
0020;space;SPACE
2660;spade;BLACK SPADE SUIT
00A3;sterling;POUND SIGN
220B;suchthat;CONTAINS AS MEMBER
2211;summation;N-ARY SUMMATION
263C;sun;WHITE SUN WITH RAYS
0074;t;LATIN SMALL LETTER T
03C4;tau;GREEK SMALL LETTER TAU
0167;tbar;LATIN SMALL LETTER T WITH STROKE
0165;tcaron;LATIN SMALL LETTER T WITH CARON
2234;therefore;THEREFORE
03B8;theta;GREEK SMALL LETTER THETA
03D1;theta1;GREEK THETA SYMBOL
00FE;thorn;LATIN SMALL LETTER THORN
0033;three;DIGIT THREE
215C;threeeighths;VULGAR FRACTION THREE EIGHTHS
00BE;threequarters;VULGAR FRACTION THREE QUARTERS
02DC;tilde;SMALL TILDE
0303;tildecomb;COMBINING TILDE
0384;tonos;GREEK TONOS
2122;trademark;TRADE MARK SIGN
25BC;triagdn;BLACK DOWN-POINTING TRIANGLE
25C4;triaglf;BLACK LEFT-POINTING POINTER
25BA;triagrt;BLACK RIGHT-POINTING POINTER
25B2;triagup;BLACK UP-POINTING TRIANGLE
0032;two;DIGIT TWO
2025;twodotenleader;TWO DOT LEADER
2154;twothirds;VULGAR FRACTION TWO THIRDS
0075;u;LATIN SMALL LETTER U
00FA;uacute;LATIN SMALL LETTER U WITH ACUTE
016D;ubreve;LATIN SMALL LETTER U WITH BREVE
00FB;ucircumflex;LATIN SMALL LETTER U WITH CIRCUMFLEX
00FC;udieresis;LATIN SMALL LETTER U WITH DIAERESIS
00F9;ugrave;LATIN SMALL LETTER U WITH GRAVE
01B0;uhorn;LATIN SMALL LETTER U WITH HORN
0171;uhungarumlaut;LATIN SMALL LETTER U WITH DOUBLE ACUTE
016B;umacron;LATIN SMALL LETTER U WITH MACRON
005F;underscore;LOW LINE
2017;underscoredbl;DOUBLE LOW LINE
222A;union;UNION
2200;universal;FOR ALL
0173;uogonek;LATIN SMALL LETTER U WITH OGONEK
2580;upblock;UPPER HALF BLOCK
03C5;upsilon;GREEK SMALL LETTER UPSILON
03CB;upsilondieresis;GREEK SMALL LETTER UPSILON WITH DIALYTIKA
03B0;upsilondieresistonos;GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
03CD;upsilontonos;GREEK SMALL LETTER UPSILON WITH TONOS
016F;uring;LATIN SMALL LETTER U WITH RING ABOVE
0169;utilde;LATIN SMALL LETTER U WITH TILDE
0076;v;LATIN SMALL LETTER V
0077;w;LATIN SMALL LETTER W
1E83;wacute;LATIN SMALL LETTER W WITH ACUTE
0175;wcircumflex;LATIN SMALL LETTER W WITH CIRCUMFLEX
1E85;wdieresis;LATIN SMALL LETTER W WITH DIAERESIS
2118;weierstrass;SCRIPT CAPITAL P
1E81;wgrave;LATIN SMALL LETTER W WITH GRAVE
0078;x;LATIN SMALL LETTER X
03BE;xi;GREEK SMALL LETTER XI
0079;y;LATIN SMALL LETTER Y
00FD;yacute;LATIN SMALL LETTER Y WITH ACUTE
0177;ycircumflex;LATIN SMALL LETTER Y WITH CIRCUMFLEX
00FF;ydieresis;LATIN SMALL LETTER Y WITH DIAERESIS
00A5;yen;YEN SIGN
1EF3;ygrave;LATIN SMALL LETTER Y WITH GRAVE
007A;z;LATIN SMALL LETTER Z
017A;zacute;LATIN SMALL LETTER Z WITH ACUTE
017E;zcaron;LATIN SMALL LETTER Z WITH CARON
017C;zdotaccent;LATIN SMALL LETTER Z WITH DOT ABOVE
0030;zero;DIGIT ZERO
03B6;zeta;GREEK SMALL LETTER ZETA
# END
"""
class AGLError(Exception):
pass
LEGACY_AGL2UV = {}
AGL2UV = {}
UV2AGL = {}
def _builddicts():
import re
lines = _aglText.splitlines()
parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$")
for line in lines:
if not line or line[:1] == "#":
continue
m = parseAGL_RE.match(line)
if not m:
raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20]))
unicodes = m.group(2)
assert len(unicodes) % 5 == 4
unicodes = [int(unicode, 16) for unicode in unicodes.split()]
glyphName = tostr(m.group(1))
LEGACY_AGL2UV[glyphName] = unicodes
lines = _aglfnText.splitlines()
parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$")
for line in lines:
if not line or line[:1] == "#":
continue
m = parseAGLFN_RE.match(line)
if not m:
raise AGLError("syntax error in aglfn.txt: %s" % repr(line[:20]))
unicode = m.group(1)
assert len(unicode) == 4
unicode = int(unicode, 16)
glyphName = tostr(m.group(2))
AGL2UV[glyphName] = unicode
UV2AGL[unicode] = glyphName
_builddicts()
def toUnicode(glyph, isZapfDingbats=False):
"""Convert glyph names to Unicode, such as ``'longs_t.oldstyle'`` --> ``u'ſt'``
If ``isZapfDingbats`` is ``True``, the implementation recognizes additional
glyph names (as required by the AGL specification).
"""
# https://github.com/adobe-type-tools/agl-specification#2-the-mapping
#
# 1. Drop all the characters from the glyph name starting with
# the first occurrence of a period (U+002E; FULL STOP), if any.
glyph = glyph.split(".", 1)[0]
# 2. Split the remaining string into a sequence of components,
# using underscore (U+005F; LOW LINE) as the delimiter.
components = glyph.split("_")
# 3. Map each component to a character string according to the
# procedure below, and concatenate those strings; the result
# is the character string to which the glyph name is mapped.
result = [_glyphComponentToUnicode(c, isZapfDingbats) for c in components]
return "".join(result)
def _glyphComponentToUnicode(component, isZapfDingbats):
# If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats),
# and the component is in the ITC Zapf Dingbats Glyph List, then
# map it to the corresponding character in that list.
dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None
if dingbat:
return dingbat
# Otherwise, if the component is in AGL, then map it
# to the corresponding character in that list.
uchars = LEGACY_AGL2UV.get(component)
if uchars:
return "".join(map(chr, uchars))
# Otherwise, if the component is of the form "uni" (U+0075,
# U+006E, and U+0069) followed by a sequence of uppercase
# hexadecimal digits (0–9 and A–F, meaning U+0030 through
# U+0039 and U+0041 through U+0046), if the length of that
# sequence is a multiple of four, and if each group of four
# digits represents a value in the ranges 0000 through D7FF
# or E000 through FFFF, then interpret each as a Unicode scalar
# value and map the component to the string made of those
# scalar values. Note that the range and digit-length
# restrictions mean that the "uni" glyph name prefix can be
# used only with UVs in the Basic Multilingual Plane (BMP).
uni = _uniToUnicode(component)
if uni:
return uni
# Otherwise, if the component is of the form "u" (U+0075)
# followed by a sequence of four to six uppercase hexadecimal
# digits (0–9 and A–F, meaning U+0030 through U+0039 and
# U+0041 through U+0046), and those digits represents a value
# in the ranges 0000 through D7FF or E000 through 10FFFF, then
# interpret it as a Unicode scalar value and map the component
# to the string made of this scalar value.
uni = _uToUnicode(component)
if uni:
return uni
# Otherwise, map the component to an empty string.
return ""
# https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt
_AGL_ZAPF_DINGBATS = (
" ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀"
"❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇"
"①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔"
"↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰"
)
def _zapfDingbatsToUnicode(glyph):
"""Helper for toUnicode()."""
if len(glyph) < 2 or glyph[0] != "a":
return None
try:
gid = int(glyph[1:])
except ValueError:
return None
if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS):
return None
uchar = _AGL_ZAPF_DINGBATS[gid]
return uchar if uchar != " " else None
_re_uni = re.compile("^uni([0-9A-F]+)$")
def _uniToUnicode(component):
"""Helper for toUnicode() to handle "uniABCD" components."""
match = _re_uni.match(component)
if match is None:
return None
digits = match.group(1)
if len(digits) % 4 != 0:
return None
chars = [int(digits[i : i + 4], 16) for i in range(0, len(digits), 4)]
if any(c >= 0xD800 and c <= 0xDFFF for c in chars):
# The AGL specification explicitly excluded surrogate pairs.
return None
return "".join([chr(c) for c in chars])
_re_u = re.compile("^u([0-9A-F]{4,6})$")
def _uToUnicode(component):
"""Helper for toUnicode() to handle "u1ABCD" components."""
match = _re_u.match(component)
if match is None:
return None
digits = match.group(1)
try:
value = int(digits, 16)
except ValueError:
return None
if (value >= 0x0000 and value <= 0xD7FF) or (value >= 0xE000 and value <= 0x10FFFF):
return chr(value)
return None
PK aZZZ�9lт т fontTools/fontBuilder.py__all__ = ["FontBuilder"]
"""
This module is *experimental*, meaning it still may evolve and change.
The `FontBuilder` class is a convenient helper to construct working TTF or
OTF fonts from scratch.
Note that the various setup methods cannot be called in arbitrary order,
due to various interdependencies between OpenType tables. Here is an order
that works:
fb = FontBuilder(...)
fb.setupGlyphOrder(...)
fb.setupCharacterMap(...)
fb.setupGlyf(...) --or-- fb.setupCFF(...)
fb.setupHorizontalMetrics(...)
fb.setupHorizontalHeader()
fb.setupNameTable(...)
fb.setupOS2()
fb.addOpenTypeFeatures(...)
fb.setupPost()
fb.save(...)
Here is how to build a minimal TTF:
```python
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
def drawTestGlyph(pen):
pen.moveTo((100, 100))
pen.lineTo((100, 1000))
pen.qCurveTo((200, 900), (400, 900), (500, 1000))
pen.lineTo((500, 100))
pen.closePath()
fb = FontBuilder(1024, isTTF=True)
fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"])
fb.setupCharacterMap({32: "space", 65: "A", 97: "a"})
advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0}
familyName = "HelloTestFont"
styleName = "TotallyNormal"
version = "0.1"
nameStrings = dict(
familyName=dict(en=familyName, nl="HalloTestFont"),
styleName=dict(en=styleName, nl="TotaalNormaal"),
uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName,
fullName=familyName + "-" + styleName,
psName=familyName + "-" + styleName,
version="Version " + version,
)
pen = TTGlyphPen(None)
drawTestGlyph(pen)
glyph = pen.glyph()
glyphs = {".notdef": glyph, "space": glyph, "A": glyph, "a": glyph, ".null": glyph}
fb.setupGlyf(glyphs)
metrics = {}
glyphTable = fb.font["glyf"]
for gn, advanceWidth in advanceWidths.items():
metrics[gn] = (advanceWidth, glyphTable[gn].xMin)
fb.setupHorizontalMetrics(metrics)
fb.setupHorizontalHeader(ascent=824, descent=-200)
fb.setupNameTable(nameStrings)
fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200)
fb.setupPost()
fb.save("test.ttf")
```
And here's how to build a minimal OTF:
```python
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.t2CharStringPen import T2CharStringPen
def drawTestGlyph(pen):
pen.moveTo((100, 100))
pen.lineTo((100, 1000))
pen.curveTo((200, 900), (400, 900), (500, 1000))
pen.lineTo((500, 100))
pen.closePath()
fb = FontBuilder(1024, isTTF=False)
fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"])
fb.setupCharacterMap({32: "space", 65: "A", 97: "a"})
advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0}
familyName = "HelloTestFont"
styleName = "TotallyNormal"
version = "0.1"
nameStrings = dict(
familyName=dict(en=familyName, nl="HalloTestFont"),
styleName=dict(en=styleName, nl="TotaalNormaal"),
uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName,
fullName=familyName + "-" + styleName,
psName=familyName + "-" + styleName,
version="Version " + version,
)
pen = T2CharStringPen(600, None)
drawTestGlyph(pen)
charString = pen.getCharString()
charStrings = {
".notdef": charString,
"space": charString,
"A": charString,
"a": charString,
".null": charString,
}
fb.setupCFF(nameStrings["psName"], {"FullName": nameStrings["psName"]}, charStrings, {})
lsb = {gn: cs.calcBounds(None)[0] for gn, cs in charStrings.items()}
metrics = {}
for gn, advanceWidth in advanceWidths.items():
metrics[gn] = (advanceWidth, lsb[gn])
fb.setupHorizontalMetrics(metrics)
fb.setupHorizontalHeader(ascent=824, descent=200)
fb.setupNameTable(nameStrings)
fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200)
fb.setupPost()
fb.save("test.otf")
```
"""
from .ttLib import TTFont, newTable
from .ttLib.tables._c_m_a_p import cmap_classes
from .ttLib.tables._g_l_y_f import flagCubic
from .ttLib.tables.O_S_2f_2 import Panose
from .misc.timeTools import timestampNow
import struct
from collections import OrderedDict
_headDefaults = dict(
tableVersion=1.0,
fontRevision=1.0,
checkSumAdjustment=0,
magicNumber=0x5F0F3CF5,
flags=0x0003,
unitsPerEm=1000,
created=0,
modified=0,
xMin=0,
yMin=0,
xMax=0,
yMax=0,
macStyle=0,
lowestRecPPEM=3,
fontDirectionHint=2,
indexToLocFormat=0,
glyphDataFormat=0,
)
_maxpDefaultsTTF = dict(
tableVersion=0x00010000,
numGlyphs=0,
maxPoints=0,
maxContours=0,
maxCompositePoints=0,
maxCompositeContours=0,
maxZones=2,
maxTwilightPoints=0,
maxStorage=0,
maxFunctionDefs=0,
maxInstructionDefs=0,
maxStackElements=0,
maxSizeOfInstructions=0,
maxComponentElements=0,
maxComponentDepth=0,
)
_maxpDefaultsOTF = dict(
tableVersion=0x00005000,
numGlyphs=0,
)
_postDefaults = dict(
formatType=3.0,
italicAngle=0,
underlinePosition=0,
underlineThickness=0,
isFixedPitch=0,
minMemType42=0,
maxMemType42=0,
minMemType1=0,
maxMemType1=0,
)
_hheaDefaults = dict(
tableVersion=0x00010000,
ascent=0,
descent=0,
lineGap=0,
advanceWidthMax=0,
minLeftSideBearing=0,
minRightSideBearing=0,
xMaxExtent=0,
caretSlopeRise=1,
caretSlopeRun=0,
caretOffset=0,
reserved0=0,
reserved1=0,
reserved2=0,
reserved3=0,
metricDataFormat=0,
numberOfHMetrics=0,
)
_vheaDefaults = dict(
tableVersion=0x00010000,
ascent=0,
descent=0,
lineGap=0,
advanceHeightMax=0,
minTopSideBearing=0,
minBottomSideBearing=0,
yMaxExtent=0,
caretSlopeRise=0,
caretSlopeRun=0,
reserved0=0,
reserved1=0,
reserved2=0,
reserved3=0,
reserved4=0,
metricDataFormat=0,
numberOfVMetrics=0,
)
_nameIDs = dict(
copyright=0,
familyName=1,
styleName=2,
uniqueFontIdentifier=3,
fullName=4,
version=5,
psName=6,
trademark=7,
manufacturer=8,
designer=9,
description=10,
vendorURL=11,
designerURL=12,
licenseDescription=13,
licenseInfoURL=14,
# reserved = 15,
typographicFamily=16,
typographicSubfamily=17,
compatibleFullName=18,
sampleText=19,
postScriptCIDFindfontName=20,
wwsFamilyName=21,
wwsSubfamilyName=22,
lightBackgroundPalette=23,
darkBackgroundPalette=24,
variationsPostScriptNamePrefix=25,
)
# to insert in setupNameTable doc string:
# print("\n".join(("%s (nameID %s)" % (k, v)) for k, v in sorted(_nameIDs.items(), key=lambda x: x[1])))
_panoseDefaults = Panose()
_OS2Defaults = dict(
version=3,
xAvgCharWidth=0,
usWeightClass=400,
usWidthClass=5,
fsType=0x0004, # default: Preview & Print embedding
ySubscriptXSize=0,
ySubscriptYSize=0,
ySubscriptXOffset=0,
ySubscriptYOffset=0,
ySuperscriptXSize=0,
ySuperscriptYSize=0,
ySuperscriptXOffset=0,
ySuperscriptYOffset=0,
yStrikeoutSize=0,
yStrikeoutPosition=0,
sFamilyClass=0,
panose=_panoseDefaults,
ulUnicodeRange1=0,
ulUnicodeRange2=0,
ulUnicodeRange3=0,
ulUnicodeRange4=0,
achVendID="????",
fsSelection=0,
usFirstCharIndex=0,
usLastCharIndex=0,
sTypoAscender=0,
sTypoDescender=0,
sTypoLineGap=0,
usWinAscent=0,
usWinDescent=0,
ulCodePageRange1=0,
ulCodePageRange2=0,
sxHeight=0,
sCapHeight=0,
usDefaultChar=0, # .notdef
usBreakChar=32, # space
usMaxContext=0,
usLowerOpticalPointSize=0,
usUpperOpticalPointSize=0,
)
class FontBuilder(object):
def __init__(self, unitsPerEm=None, font=None, isTTF=True, glyphDataFormat=0):
"""Initialize a FontBuilder instance.
If the `font` argument is not given, a new `TTFont` will be
constructed, and `unitsPerEm` must be given. If `isTTF` is True,
the font will be a glyf-based TTF; if `isTTF` is False it will be
a CFF-based OTF.
The `glyphDataFormat` argument corresponds to the `head` table field
that defines the format of the TrueType `glyf` table (default=0).
TrueType glyphs historically can only contain quadratic splines and static
components, but there's a proposal to add support for cubic Bezier curves as well
as variable composites/components at
https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1.md
You can experiment with the new features by setting `glyphDataFormat` to 1.
A ValueError is raised if `glyphDataFormat` is left at 0 but glyphs are added
that contain cubic splines or varcomposites. This is to prevent accidentally
creating fonts that are incompatible with existing TrueType implementations.
If `font` is given, it must be a `TTFont` instance and `unitsPerEm`
must _not_ be given. The `isTTF` and `glyphDataFormat` arguments will be ignored.
"""
if font is None:
self.font = TTFont(recalcTimestamp=False)
self.isTTF = isTTF
now = timestampNow()
assert unitsPerEm is not None
self.setupHead(
unitsPerEm=unitsPerEm,
created=now,
modified=now,
glyphDataFormat=glyphDataFormat,
)
self.setupMaxp()
else:
assert unitsPerEm is None
self.font = font
self.isTTF = "glyf" in font
def save(self, file):
"""Save the font. The 'file' argument can be either a pathname or a
writable file object.
"""
self.font.save(file)
def _initTableWithValues(self, tableTag, defaults, values):
table = self.font[tableTag] = newTable(tableTag)
for k, v in defaults.items():
setattr(table, k, v)
for k, v in values.items():
setattr(table, k, v)
return table
def _updateTableWithValues(self, tableTag, values):
table = self.font[tableTag]
for k, v in values.items():
setattr(table, k, v)
def setupHead(self, **values):
"""Create a new `head` table and initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("head", _headDefaults, values)
def updateHead(self, **values):
"""Update the head table with the fields and values passed as
keyword arguments.
"""
self._updateTableWithValues("head", values)
def setupGlyphOrder(self, glyphOrder):
"""Set the glyph order for the font."""
self.font.setGlyphOrder(glyphOrder)
def setupCharacterMap(self, cmapping, uvs=None, allowFallback=False):
"""Build the `cmap` table for the font. The `cmapping` argument should
be a dict mapping unicode code points as integers to glyph names.
The `uvs` argument, when passed, must be a list of tuples, describing
Unicode Variation Sequences. These tuples have three elements:
(unicodeValue, variationSelector, glyphName)
`unicodeValue` and `variationSelector` are integer code points.
`glyphName` may be None, to indicate this is the default variation.
Text processors will then use the cmap to find the glyph name.
Each Unicode Variation Sequence should be an officially supported
sequence, but this is not policed.
"""
subTables = []
highestUnicode = max(cmapping) if cmapping else 0
if highestUnicode > 0xFFFF:
cmapping_3_1 = dict((k, v) for k, v in cmapping.items() if k < 0x10000)
subTable_3_10 = buildCmapSubTable(cmapping, 12, 3, 10)
subTables.append(subTable_3_10)
else:
cmapping_3_1 = cmapping
format = 4
subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1)
try:
subTable_3_1.compile(self.font)
except struct.error:
# format 4 overflowed, fall back to format 12
if not allowFallback:
raise ValueError(
"cmap format 4 subtable overflowed; sort glyph order by unicode to fix."
)
format = 12
subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1)
subTables.append(subTable_3_1)
subTable_0_3 = buildCmapSubTable(cmapping_3_1, format, 0, 3)
subTables.append(subTable_0_3)
if uvs is not None:
uvsDict = {}
for unicodeValue, variationSelector, glyphName in uvs:
if cmapping.get(unicodeValue) == glyphName:
# this is a default variation
glyphName = None
if variationSelector not in uvsDict:
uvsDict[variationSelector] = []
uvsDict[variationSelector].append((unicodeValue, glyphName))
uvsSubTable = buildCmapSubTable({}, 14, 0, 5)
uvsSubTable.uvsDict = uvsDict
subTables.append(uvsSubTable)
self.font["cmap"] = newTable("cmap")
self.font["cmap"].tableVersion = 0
self.font["cmap"].tables = subTables
def setupNameTable(self, nameStrings, windows=True, mac=True):
"""Create the `name` table for the font. The `nameStrings` argument must
be a dict, mapping nameIDs or descriptive names for the nameIDs to name
record values. A value is either a string, or a dict, mapping language codes
to strings, to allow localized name table entries.
By default, both Windows (platformID=3) and Macintosh (platformID=1) name
records are added, unless any of `windows` or `mac` arguments is False.
The following descriptive names are available for nameIDs:
copyright (nameID 0)
familyName (nameID 1)
styleName (nameID 2)
uniqueFontIdentifier (nameID 3)
fullName (nameID 4)
version (nameID 5)
psName (nameID 6)
trademark (nameID 7)
manufacturer (nameID 8)
designer (nameID 9)
description (nameID 10)
vendorURL (nameID 11)
designerURL (nameID 12)
licenseDescription (nameID 13)
licenseInfoURL (nameID 14)
typographicFamily (nameID 16)
typographicSubfamily (nameID 17)
compatibleFullName (nameID 18)
sampleText (nameID 19)
postScriptCIDFindfontName (nameID 20)
wwsFamilyName (nameID 21)
wwsSubfamilyName (nameID 22)
lightBackgroundPalette (nameID 23)
darkBackgroundPalette (nameID 24)
variationsPostScriptNamePrefix (nameID 25)
"""
nameTable = self.font["name"] = newTable("name")
nameTable.names = []
for nameName, nameValue in nameStrings.items():
if isinstance(nameName, int):
nameID = nameName
else:
nameID = _nameIDs[nameName]
if isinstance(nameValue, str):
nameValue = dict(en=nameValue)
nameTable.addMultilingualName(
nameValue, ttFont=self.font, nameID=nameID, windows=windows, mac=mac
)
def setupOS2(self, **values):
"""Create a new `OS/2` table and initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("OS/2", _OS2Defaults, values)
if "xAvgCharWidth" not in values:
assert (
"hmtx" in self.font
), "the 'hmtx' table must be setup before the 'OS/2' table"
self.font["OS/2"].recalcAvgCharWidth(self.font)
if not (
"ulUnicodeRange1" in values
or "ulUnicodeRange2" in values
or "ulUnicodeRange3" in values
or "ulUnicodeRange3" in values
):
assert (
"cmap" in self.font
), "the 'cmap' table must be setup before the 'OS/2' table"
self.font["OS/2"].recalcUnicodeRanges(self.font)
def setupCFF(self, psName, fontInfo, charStringsDict, privateDict):
from .cffLib import (
CFFFontSet,
TopDictIndex,
TopDict,
CharStrings,
GlobalSubrsIndex,
PrivateDict,
)
assert not self.isTTF
self.font.sfntVersion = "OTTO"
fontSet = CFFFontSet()
fontSet.major = 1
fontSet.minor = 0
fontSet.otFont = self.font
fontSet.fontNames = [psName]
fontSet.topDictIndex = TopDictIndex()
globalSubrs = GlobalSubrsIndex()
fontSet.GlobalSubrs = globalSubrs
private = PrivateDict()
for key, value in privateDict.items():
setattr(private, key, value)
fdSelect = None
fdArray = None
topDict = TopDict()
topDict.charset = self.font.getGlyphOrder()
topDict.Private = private
topDict.GlobalSubrs = fontSet.GlobalSubrs
for key, value in fontInfo.items():
setattr(topDict, key, value)
if "FontMatrix" not in fontInfo:
scale = 1 / self.font["head"].unitsPerEm
topDict.FontMatrix = [scale, 0, 0, scale, 0, 0]
charStrings = CharStrings(
None, topDict.charset, globalSubrs, private, fdSelect, fdArray
)
for glyphName, charString in charStringsDict.items():
charString.private = private
charString.globalSubrs = globalSubrs
charStrings[glyphName] = charString
topDict.CharStrings = charStrings
fontSet.topDictIndex.append(topDict)
self.font["CFF "] = newTable("CFF ")
self.font["CFF "].cff = fontSet
def setupCFF2(self, charStringsDict, fdArrayList=None, regions=None):
from .cffLib import (
CFFFontSet,
TopDictIndex,
TopDict,
CharStrings,
GlobalSubrsIndex,
PrivateDict,
FDArrayIndex,
FontDict,
)
assert not self.isTTF
self.font.sfntVersion = "OTTO"
fontSet = CFFFontSet()
fontSet.major = 2
fontSet.minor = 0
cff2GetGlyphOrder = self.font.getGlyphOrder
fontSet.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder, None)
globalSubrs = GlobalSubrsIndex()
fontSet.GlobalSubrs = globalSubrs
if fdArrayList is None:
fdArrayList = [{}]
fdSelect = None
fdArray = FDArrayIndex()
fdArray.strings = None
fdArray.GlobalSubrs = globalSubrs
for privateDict in fdArrayList:
fontDict = FontDict()
fontDict.setCFF2(True)
private = PrivateDict()
for key, value in privateDict.items():
setattr(private, key, value)
fontDict.Private = private
fdArray.append(fontDict)
topDict = TopDict()
topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
topDict.FDArray = fdArray
scale = 1 / self.font["head"].unitsPerEm
topDict.FontMatrix = [scale, 0, 0, scale, 0, 0]
private = fdArray[0].Private
charStrings = CharStrings(None, None, globalSubrs, private, fdSelect, fdArray)
for glyphName, charString in charStringsDict.items():
charString.private = private
charString.globalSubrs = globalSubrs
charStrings[glyphName] = charString
topDict.CharStrings = charStrings
fontSet.topDictIndex.append(topDict)
self.font["CFF2"] = newTable("CFF2")
self.font["CFF2"].cff = fontSet
if regions:
self.setupCFF2Regions(regions)
def setupCFF2Regions(self, regions):
from .varLib.builder import buildVarRegionList, buildVarData, buildVarStore
from .cffLib import VarStoreData
assert "fvar" in self.font, "fvar must to be set up first"
assert "CFF2" in self.font, "CFF2 must to be set up first"
axisTags = [a.axisTag for a in self.font["fvar"].axes]
varRegionList = buildVarRegionList(regions, axisTags)
varData = buildVarData(list(range(len(regions))), None, optimize=False)
varStore = buildVarStore(varRegionList, [varData])
vstore = VarStoreData(otVarStore=varStore)
topDict = self.font["CFF2"].cff.topDictIndex[0]
topDict.VarStore = vstore
for fontDict in topDict.FDArray:
fontDict.Private.vstore = vstore
def setupGlyf(self, glyphs, calcGlyphBounds=True, validateGlyphFormat=True):
"""Create the `glyf` table from a dict, that maps glyph names
to `fontTools.ttLib.tables._g_l_y_f.Glyph` objects, for example
as made by `fontTools.pens.ttGlyphPen.TTGlyphPen`.
If `calcGlyphBounds` is True, the bounds of all glyphs will be
calculated. Only pass False if your glyph objects already have
their bounding box values set.
If `validateGlyphFormat` is True, raise ValueError if any of the glyphs contains
cubic curves or is a variable composite but head.glyphDataFormat=0.
Set it to False to skip the check if you know in advance all the glyphs are
compatible with the specified glyphDataFormat.
"""
assert self.isTTF
if validateGlyphFormat and self.font["head"].glyphDataFormat == 0:
for name, g in glyphs.items():
if g.isVarComposite():
raise ValueError(
f"Glyph {name!r} is a variable composite, but glyphDataFormat=0"
)
elif g.numberOfContours > 0 and any(f & flagCubic for f in g.flags):
raise ValueError(
f"Glyph {name!r} has cubic Bezier outlines, but glyphDataFormat=0; "
"either convert to quadratics with cu2qu or set glyphDataFormat=1."
)
self.font["loca"] = newTable("loca")
self.font["glyf"] = newTable("glyf")
self.font["glyf"].glyphs = glyphs
if hasattr(self.font, "glyphOrder"):
self.font["glyf"].glyphOrder = self.font.glyphOrder
if calcGlyphBounds:
self.calcGlyphBounds()
def setupFvar(self, axes, instances):
"""Adds an font variations table to the font.
Args:
axes (list): See below.
instances (list): See below.
``axes`` should be a list of axes, with each axis either supplied as
a py:class:`.designspaceLib.AxisDescriptor` object, or a tuple in the
format ```tupletag, minValue, defaultValue, maxValue, name``.
The ``name`` is either a string, or a dict, mapping language codes
to strings, to allow localized name table entries.
```instances`` should be a list of instances, with each instance either
supplied as a py:class:`.designspaceLib.InstanceDescriptor` object, or a
dict with keys ``location`` (mapping of axis tags to float values),
``stylename`` and (optionally) ``postscriptfontname``.
The ``stylename`` is either a string, or a dict, mapping language codes
to strings, to allow localized name table entries.
"""
addFvar(self.font, axes, instances)
def setupAvar(self, axes, mappings=None):
"""Adds an axis variations table to the font.
Args:
axes (list): A list of py:class:`.designspaceLib.AxisDescriptor` objects.
"""
from .varLib import _add_avar
if "fvar" not in self.font:
raise KeyError("'fvar' table is missing; can't add 'avar'.")
axisTags = [axis.axisTag for axis in self.font["fvar"].axes]
axes = OrderedDict(enumerate(axes)) # Only values are used
_add_avar(self.font, axes, mappings, axisTags)
def setupGvar(self, variations):
gvar = self.font["gvar"] = newTable("gvar")
gvar.version = 1
gvar.reserved = 0
gvar.variations = variations
def calcGlyphBounds(self):
"""Calculate the bounding boxes of all glyphs in the `glyf` table.
This is usually not called explicitly by client code.
"""
glyphTable = self.font["glyf"]
for glyph in glyphTable.glyphs.values():
glyph.recalcBounds(glyphTable)
def setupHorizontalMetrics(self, metrics):
"""Create a new `hmtx` table, for horizontal metrics.
The `metrics` argument must be a dict, mapping glyph names to
`(width, leftSidebearing)` tuples.
"""
self.setupMetrics("hmtx", metrics)
def setupVerticalMetrics(self, metrics):
"""Create a new `vmtx` table, for horizontal metrics.
The `metrics` argument must be a dict, mapping glyph names to
`(height, topSidebearing)` tuples.
"""
self.setupMetrics("vmtx", metrics)
def setupMetrics(self, tableTag, metrics):
"""See `setupHorizontalMetrics()` and `setupVerticalMetrics()`."""
assert tableTag in ("hmtx", "vmtx")
mtxTable = self.font[tableTag] = newTable(tableTag)
roundedMetrics = {}
for gn in metrics:
w, lsb = metrics[gn]
roundedMetrics[gn] = int(round(w)), int(round(lsb))
mtxTable.metrics = roundedMetrics
def setupHorizontalHeader(self, **values):
"""Create a new `hhea` table initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("hhea", _hheaDefaults, values)
def setupVerticalHeader(self, **values):
"""Create a new `vhea` table initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("vhea", _vheaDefaults, values)
def setupVerticalOrigins(self, verticalOrigins, defaultVerticalOrigin=None):
"""Create a new `VORG` table. The `verticalOrigins` argument must be
a dict, mapping glyph names to vertical origin values.
The `defaultVerticalOrigin` argument should be the most common vertical
origin value. If omitted, this value will be derived from the actual
values in the `verticalOrigins` argument.
"""
if defaultVerticalOrigin is None:
# find the most frequent vorg value
bag = {}
for gn in verticalOrigins:
vorg = verticalOrigins[gn]
if vorg not in bag:
bag[vorg] = 1
else:
bag[vorg] += 1
defaultVerticalOrigin = sorted(
bag, key=lambda vorg: bag[vorg], reverse=True
)[0]
self._initTableWithValues(
"VORG",
{},
dict(VOriginRecords={}, defaultVertOriginY=defaultVerticalOrigin),
)
vorgTable = self.font["VORG"]
vorgTable.majorVersion = 1
vorgTable.minorVersion = 0
for gn in verticalOrigins:
vorgTable[gn] = verticalOrigins[gn]
def setupPost(self, keepGlyphNames=True, **values):
"""Create a new `post` table and initialize it with default values,
which can be overridden by keyword arguments.
"""
isCFF2 = "CFF2" in self.font
postTable = self._initTableWithValues("post", _postDefaults, values)
if (self.isTTF or isCFF2) and keepGlyphNames:
postTable.formatType = 2.0
postTable.extraNames = []
postTable.mapping = {}
else:
postTable.formatType = 3.0
def setupMaxp(self):
"""Create a new `maxp` table. This is called implicitly by FontBuilder
itself and is usually not called by client code.
"""
if self.isTTF:
defaults = _maxpDefaultsTTF
else:
defaults = _maxpDefaultsOTF
self._initTableWithValues("maxp", defaults, {})
def setupDummyDSIG(self):
"""This adds an empty DSIG table to the font to make some MS applications
happy. This does not properly sign the font.
"""
values = dict(
ulVersion=1,
usFlag=0,
usNumSigs=0,
signatureRecords=[],
)
self._initTableWithValues("DSIG", {}, values)
def addOpenTypeFeatures(self, features, filename=None, tables=None, debug=False):
"""Add OpenType features to the font from a string containing
Feature File syntax.
The `filename` argument is used in error messages and to determine
where to look for "include" files.
The optional `tables` argument can be a list of OTL tables tags to
build, allowing the caller to only build selected OTL tables. See
`fontTools.feaLib` for details.
The optional `debug` argument controls whether to add source debugging
information to the font in the `Debg` table.
"""
from .feaLib.builder import addOpenTypeFeaturesFromString
addOpenTypeFeaturesFromString(
self.font, features, filename=filename, tables=tables, debug=debug
)
def addFeatureVariations(self, conditionalSubstitutions, featureTag="rvrn"):
"""Add conditional substitutions to a Variable Font.
See `fontTools.varLib.featureVars.addFeatureVariations`.
"""
from .varLib import featureVars
if "fvar" not in self.font:
raise KeyError("'fvar' table is missing; can't add FeatureVariations.")
featureVars.addFeatureVariations(
self.font, conditionalSubstitutions, featureTag=featureTag
)
def setupCOLR(
self,
colorLayers,
version=None,
varStore=None,
varIndexMap=None,
clipBoxes=None,
allowLayerReuse=True,
):
"""Build new COLR table using color layers dictionary.
Cf. `fontTools.colorLib.builder.buildCOLR`.
"""
from fontTools.colorLib.builder import buildCOLR
glyphMap = self.font.getReverseGlyphMap()
self.font["COLR"] = buildCOLR(
colorLayers,
version=version,
glyphMap=glyphMap,
varStore=varStore,
varIndexMap=varIndexMap,
clipBoxes=clipBoxes,
allowLayerReuse=allowLayerReuse,
)
def setupCPAL(
self,
palettes,
paletteTypes=None,
paletteLabels=None,
paletteEntryLabels=None,
):
"""Build new CPAL table using list of palettes.
Optionally build CPAL v1 table using paletteTypes, paletteLabels and
paletteEntryLabels.
Cf. `fontTools.colorLib.builder.buildCPAL`.
"""
from fontTools.colorLib.builder import buildCPAL
self.font["CPAL"] = buildCPAL(
palettes,
paletteTypes=paletteTypes,
paletteLabels=paletteLabels,
paletteEntryLabels=paletteEntryLabels,
nameTable=self.font.get("name"),
)
def setupStat(self, axes, locations=None, elidedFallbackName=2):
"""Build a new 'STAT' table.
See `fontTools.otlLib.builder.buildStatTable` for details about
the arguments.
"""
from .otlLib.builder import buildStatTable
buildStatTable(self.font, axes, locations, elidedFallbackName)
def buildCmapSubTable(cmapping, format, platformID, platEncID):
subTable = cmap_classes[format](format)
subTable.cmap = cmapping
subTable.platformID = platformID
subTable.platEncID = platEncID
subTable.language = 0
return subTable
def addFvar(font, axes, instances):
from .ttLib.tables._f_v_a_r import Axis, NamedInstance
assert axes
fvar = newTable("fvar")
nameTable = font["name"]
for axis_def in axes:
axis = Axis()
if isinstance(axis_def, tuple):
(
axis.axisTag,
axis.minValue,
axis.defaultValue,
axis.maxValue,
name,
) = axis_def
else:
(axis.axisTag, axis.minValue, axis.defaultValue, axis.maxValue, name) = (
axis_def.tag,
axis_def.minimum,
axis_def.default,
axis_def.maximum,
axis_def.name,
)
if axis_def.hidden:
axis.flags = 0x0001 # HIDDEN_AXIS
if isinstance(name, str):
name = dict(en=name)
axis.axisNameID = nameTable.addMultilingualName(name, ttFont=font)
fvar.axes.append(axis)
for instance in instances:
if isinstance(instance, dict):
coordinates = instance["location"]
name = instance["stylename"]
psname = instance.get("postscriptfontname")
else:
coordinates = instance.location
name = instance.localisedStyleName or instance.styleName
psname = instance.postScriptFontName
if isinstance(name, str):
name = dict(en=name)
inst = NamedInstance()
inst.subfamilyNameID = nameTable.addMultilingualName(name, ttFont=font)
if psname is not None:
inst.postscriptNameID = nameTable.addName(psname)
inst.coordinates = coordinates
fvar.instances.append(inst)
font["fvar"] = fvar
PK aZZZ+��Z fontTools/help.pyimport pkgutil
import sys
import fontTools
import importlib
import os
from pathlib import Path
def main():
"""Show this help"""
path = fontTools.__path__
descriptions = {}
for pkg in sorted(
mod.name
for mod in pkgutil.walk_packages([fontTools.__path__[0]], prefix="fontTools.")
):
try:
imports = __import__(pkg, globals(), locals(), ["main"])
except ImportError as e:
continue
try:
description = imports.main.__doc__
if description:
pkg = pkg.replace("fontTools.", "").replace(".__main__", "")
# show the docstring's first line only
descriptions[pkg] = description.splitlines()[0]
except AttributeError as e:
pass
for pkg, description in descriptions.items():
print("fonttools %-25s %s" % (pkg, description), file=sys.stderr)
if __name__ == "__main__":
print("fonttools v%s\n" % fontTools.__version__, file=sys.stderr)
main()
PK aZZZ�C��7 �7 fontTools/tfmLib.py"""Module for reading TFM (TeX Font Metrics) files.
The TFM format is described in the TFtoPL WEB source code, whose typeset form
can be found on `CTAN <http://mirrors.ctan.org/info/knuth-pdf/texware/tftopl.pdf>`_.
>>> from fontTools.tfmLib import TFM
>>> tfm = TFM("Tests/tfmLib/data/cmr10.tfm")
>>>
>>> # Accessing an attribute gets you metadata.
>>> tfm.checksum
1274110073
>>> tfm.designsize
10.0
>>> tfm.codingscheme
'TeX text'
>>> tfm.family
'CMR'
>>> tfm.seven_bit_safe_flag
False
>>> tfm.face
234
>>> tfm.extraheader
{}
>>> tfm.fontdimens
{'SLANT': 0.0, 'SPACE': 0.33333396911621094, 'STRETCH': 0.16666698455810547, 'SHRINK': 0.11111164093017578, 'XHEIGHT': 0.4305553436279297, 'QUAD': 1.0000028610229492, 'EXTRASPACE': 0.11111164093017578}
>>> # Accessing a character gets you its metrics.
>>> # “width” is always available, other metrics are available only when
>>> # applicable. All values are relative to “designsize”.
>>> tfm.chars[ord("g")]
{'width': 0.5000019073486328, 'height': 0.4305553436279297, 'depth': 0.1944446563720703, 'italic': 0.013888359069824219}
>>> # Kerning and ligature can be accessed as well.
>>> tfm.kerning[ord("c")]
{104: -0.02777862548828125, 107: -0.02777862548828125}
>>> tfm.ligatures[ord("f")]
{105: ('LIG', 12), 102: ('LIG', 11), 108: ('LIG', 13)}
"""
from types import SimpleNamespace
from fontTools.misc.sstruct import calcsize, unpack, unpack2
SIZES_FORMAT = """
>
lf: h # length of the entire file, in words
lh: h # length of the header data, in words
bc: h # smallest character code in the font
ec: h # largest character code in the font
nw: h # number of words in the width table
nh: h # number of words in the height table
nd: h # number of words in the depth table
ni: h # number of words in the italic correction table
nl: h # number of words in the ligature/kern table
nk: h # number of words in the kern table
ne: h # number of words in the extensible character table
np: h # number of font parameter words
"""
SIZES_SIZE = calcsize(SIZES_FORMAT)
FIXED_FORMAT = "12.20F"
HEADER_FORMAT1 = f"""
>
checksum: L
designsize: {FIXED_FORMAT}
"""
HEADER_FORMAT2 = f"""
{HEADER_FORMAT1}
codingscheme: 40p
"""
HEADER_FORMAT3 = f"""
{HEADER_FORMAT2}
family: 20p
"""
HEADER_FORMAT4 = f"""
{HEADER_FORMAT3}
seven_bit_safe_flag: ?
ignored: x
ignored: x
face: B
"""
HEADER_SIZE1 = calcsize(HEADER_FORMAT1)
HEADER_SIZE2 = calcsize(HEADER_FORMAT2)
HEADER_SIZE3 = calcsize(HEADER_FORMAT3)
HEADER_SIZE4 = calcsize(HEADER_FORMAT4)
LIG_KERN_COMMAND = """
>
skip_byte: B
next_char: B
op_byte: B
remainder: B
"""
BASE_PARAMS = [
"SLANT",
"SPACE",
"STRETCH",
"SHRINK",
"XHEIGHT",
"QUAD",
"EXTRASPACE",
]
MATHSY_PARAMS = [
"NUM1",
"NUM2",
"NUM3",
"DENOM1",
"DENOM2",
"SUP1",
"SUP2",
"SUP3",
"SUB1",
"SUB2",
"SUPDROP",
"SUBDROP",
"DELIM1",
"DELIM2",
"AXISHEIGHT",
]
MATHEX_PARAMS = [
"DEFAULTRULETHICKNESS",
"BIGOPSPACING1",
"BIGOPSPACING2",
"BIGOPSPACING3",
"BIGOPSPACING4",
"BIGOPSPACING5",
]
VANILLA = 0
MATHSY = 1
MATHEX = 2
UNREACHABLE = 0
PASSTHROUGH = 1
ACCESSABLE = 2
NO_TAG = 0
LIG_TAG = 1
LIST_TAG = 2
EXT_TAG = 3
STOP_FLAG = 128
KERN_FLAG = 128
class TFMException(Exception):
def __init__(self, message):
super().__init__(message)
class TFM:
def __init__(self, file):
self._read(file)
def __repr__(self):
return (
f"<TFM"
f" for {self.family}"
f" in {self.codingscheme}"
f" at {self.designsize:g}pt>"
)
def _read(self, file):
if hasattr(file, "read"):
data = file.read()
else:
with open(file, "rb") as fp:
data = fp.read()
self._data = data
if len(data) < SIZES_SIZE:
raise TFMException("Too short input file")
sizes = SimpleNamespace()
unpack2(SIZES_FORMAT, data, sizes)
# Do some file structure sanity checks.
# TeX and TFtoPL do additional functional checks and might even correct
# “errors” in the input file, but we instead try to output the file as
# it is as long as it is parsable, even if the data make no sense.
if sizes.lf < 0:
raise TFMException("The file claims to have negative or zero length!")
if len(data) < sizes.lf * 4:
raise TFMException("The file has fewer bytes than it claims!")
for name, length in vars(sizes).items():
if length < 0:
raise TFMException("The subfile size: '{name}' is negative!")
if sizes.lh < 2:
raise TFMException(f"The header length is only {sizes.lh}!")
if sizes.bc > sizes.ec + 1 or sizes.ec > 255:
raise TFMException(
f"The character code range {sizes.bc}..{sizes.ec} is illegal!"
)
if sizes.nw == 0 or sizes.nh == 0 or sizes.nd == 0 or sizes.ni == 0:
raise TFMException("Incomplete subfiles for character dimensions!")
if sizes.ne > 256:
raise TFMException(f"There are {ne} extensible recipes!")
if sizes.lf != (
6
+ sizes.lh
+ (sizes.ec - sizes.bc + 1)
+ sizes.nw
+ sizes.nh
+ sizes.nd
+ sizes.ni
+ sizes.nl
+ sizes.nk
+ sizes.ne
+ sizes.np
):
raise TFMException("Subfile sizes don’t add up to the stated total")
# Subfile offsets, used in the helper function below. These all are
# 32-bit word offsets not 8-bit byte offsets.
char_base = 6 + sizes.lh - sizes.bc
width_base = char_base + sizes.ec + 1
height_base = width_base + sizes.nw
depth_base = height_base + sizes.nh
italic_base = depth_base + sizes.nd
lig_kern_base = italic_base + sizes.ni
kern_base = lig_kern_base + sizes.nl
exten_base = kern_base + sizes.nk
param_base = exten_base + sizes.ne
# Helper functions for accessing individual data. If this looks
# nonidiomatic Python, I blame the effect of reading the literate WEB
# documentation of TFtoPL.
def char_info(c):
return 4 * (char_base + c)
def width_index(c):
return data[char_info(c)]
def noneexistent(c):
return c < sizes.bc or c > sizes.ec or width_index(c) == 0
def height_index(c):
return data[char_info(c) + 1] // 16
def depth_index(c):
return data[char_info(c) + 1] % 16
def italic_index(c):
return data[char_info(c) + 2] // 4
def tag(c):
return data[char_info(c) + 2] % 4
def remainder(c):
return data[char_info(c) + 3]
def width(c):
r = 4 * (width_base + width_index(c))
return read_fixed(r, "v")["v"]
def height(c):
r = 4 * (height_base + height_index(c))
return read_fixed(r, "v")["v"]
def depth(c):
r = 4 * (depth_base + depth_index(c))
return read_fixed(r, "v")["v"]
def italic(c):
r = 4 * (italic_base + italic_index(c))
return read_fixed(r, "v")["v"]
def exten(c):
return 4 * (exten_base + remainder(c))
def lig_step(i):
return 4 * (lig_kern_base + i)
def lig_kern_command(i):
command = SimpleNamespace()
unpack2(LIG_KERN_COMMAND, data[i:], command)
return command
def kern(i):
r = 4 * (kern_base + i)
return read_fixed(r, "v")["v"]
def param(i):
return 4 * (param_base + i)
def read_fixed(index, key, obj=None):
ret = unpack2(f">;{key}:{FIXED_FORMAT}", data[index:], obj)
return ret[0]
# Set all attributes to empty values regardless of the header size.
unpack(HEADER_FORMAT4, [0] * HEADER_SIZE4, self)
offset = 24
length = sizes.lh * 4
self.extraheader = {}
if length >= HEADER_SIZE4:
rest = unpack2(HEADER_FORMAT4, data[offset:], self)[1]
if self.face < 18:
s = self.face % 2
b = self.face // 2
self.face = "MBL"[b % 3] + "RI"[s] + "RCE"[b // 3]
for i in range(sizes.lh - HEADER_SIZE4 // 4):
rest = unpack2(f">;HEADER{i + 18}:l", rest, self.extraheader)[1]
elif length >= HEADER_SIZE3:
unpack2(HEADER_FORMAT3, data[offset:], self)
elif length >= HEADER_SIZE2:
unpack2(HEADER_FORMAT2, data[offset:], self)
elif length >= HEADER_SIZE1:
unpack2(HEADER_FORMAT1, data[offset:], self)
self.fonttype = VANILLA
scheme = self.codingscheme.upper()
if scheme.startswith("TEX MATH SY"):
self.fonttype = MATHSY
elif scheme.startswith("TEX MATH EX"):
self.fonttype = MATHEX
self.fontdimens = {}
for i in range(sizes.np):
name = f"PARAMETER{i+1}"
if i <= 6:
name = BASE_PARAMS[i]
elif self.fonttype == MATHSY and i <= 21:
name = MATHSY_PARAMS[i - 7]
elif self.fonttype == MATHEX and i <= 12:
name = MATHEX_PARAMS[i - 7]
read_fixed(param(i), name, self.fontdimens)
lig_kern_map = {}
self.right_boundary_char = None
self.left_boundary_char = None
if sizes.nl > 0:
cmd = lig_kern_command(lig_step(0))
if cmd.skip_byte == 255:
self.right_boundary_char = cmd.next_char
cmd = lig_kern_command(lig_step((sizes.nl - 1)))
if cmd.skip_byte == 255:
self.left_boundary_char = 256
r = 256 * cmd.op_byte + cmd.remainder
lig_kern_map[self.left_boundary_char] = r
self.chars = {}
for c in range(sizes.bc, sizes.ec + 1):
if width_index(c) > 0:
self.chars[c] = info = {}
info["width"] = width(c)
if height_index(c) > 0:
info["height"] = height(c)
if depth_index(c) > 0:
info["depth"] = depth(c)
if italic_index(c) > 0:
info["italic"] = italic(c)
char_tag = tag(c)
if char_tag == NO_TAG:
pass
elif char_tag == LIG_TAG:
lig_kern_map[c] = remainder(c)
elif char_tag == LIST_TAG:
info["nextlarger"] = remainder(c)
elif char_tag == EXT_TAG:
info["varchar"] = varchar = {}
for i in range(4):
part = data[exten(c) + i]
if i == 3 or part > 0:
name = "rep"
if i == 0:
name = "top"
elif i == 1:
name = "mid"
elif i == 2:
name = "bot"
if noneexistent(part):
varchar[name] = c
else:
varchar[name] = part
self.ligatures = {}
self.kerning = {}
for c, i in sorted(lig_kern_map.items()):
cmd = lig_kern_command(lig_step(i))
if cmd.skip_byte > STOP_FLAG:
i = 256 * cmd.op_byte + cmd.remainder
while i < sizes.nl:
cmd = lig_kern_command(lig_step(i))
if cmd.skip_byte > STOP_FLAG:
pass
else:
if cmd.op_byte >= KERN_FLAG:
r = 256 * (cmd.op_byte - KERN_FLAG) + cmd.remainder
self.kerning.setdefault(c, {})[cmd.next_char] = kern(r)
else:
r = cmd.op_byte
if r == 4 or (r > 7 and r != 11):
# Ligature step with nonstandard code, we output
# the code verbatim.
lig = r
else:
lig = ""
if r % 4 > 1:
lig += "/"
lig += "LIG"
if r % 2 != 0:
lig += "/"
while r > 3:
lig += ">"
r -= 4
self.ligatures.setdefault(c, {})[cmd.next_char] = (
lig,
cmd.remainder,
)
if cmd.skip_byte >= STOP_FLAG:
break
i += cmd.skip_byte + 1
if __name__ == "__main__":
import sys
tfm = TFM(sys.argv[1])
print(
"\n".join(
x
for x in [
f"tfm.checksum={tfm.checksum}",
f"tfm.designsize={tfm.designsize}",
f"tfm.codingscheme={tfm.codingscheme}",
f"tfm.fonttype={tfm.fonttype}",
f"tfm.family={tfm.family}",
f"tfm.seven_bit_safe_flag={tfm.seven_bit_safe_flag}",
f"tfm.face={tfm.face}",
f"tfm.extraheader={tfm.extraheader}",
f"tfm.fontdimens={tfm.fontdimens}",
f"tfm.right_boundary_char={tfm.right_boundary_char}",
f"tfm.left_boundary_char={tfm.left_boundary_char}",
f"tfm.kerning={tfm.kerning}",
f"tfm.ligatures={tfm.ligatures}",
f"tfm.chars={tfm.chars}",
]
)
)
print(tfm)
PK aZZZ���A A fontTools/ttx.py"""\
usage: ttx [options] inputfile1 [... inputfileN]
TTX -- From OpenType To XML And Back
If an input file is a TrueType or OpenType font file, it will be
decompiled to a TTX file (an XML-based text format).
If an input file is a TTX file, it will be compiled to whatever
format the data is in, a TrueType or OpenType/CFF font file.
A special input value of - means read from the standard input.
Output files are created so they are unique: an existing file is
never overwritten.
General options
===============
-h Help print this message.
--version show version and exit.
-d <outputfolder> Specify a directory where the output files are
to be created.
-o <outputfile> Specify a file to write the output to. A special
value of - would use the standard output.
-f Overwrite existing output file(s), ie. don't append
numbers.
-v Verbose: more messages will be written to stdout
about what is being done.
-q Quiet: No messages will be written to stdout about
what is being done.
-a allow virtual glyphs ID's on compile or decompile.
Dump options
============
-l List table info: instead of dumping to a TTX file, list
some minimal info about each table.
-t <table> Specify a table to dump. Multiple -t options
are allowed. When no -t option is specified, all tables
will be dumped.
-x <table> Specify a table to exclude from the dump. Multiple
-x options are allowed. -t and -x are mutually exclusive.
-s Split tables: save the TTX data into separate TTX files per
table and write one small TTX file that contains references
to the individual table dumps. This file can be used as
input to ttx, as long as the table files are in the
same directory.
-g Split glyf table: Save the glyf data into separate TTX files
per glyph and write a small TTX for the glyf table which
contains references to the individual TTGlyph elements.
NOTE: specifying -g implies -s (no need for -s together
with -g)
-i Do NOT disassemble TT instructions: when this option is
given, all TrueType programs (glyph programs, the font
program and the pre-program) will be written to the TTX
file as hex data instead of assembly. This saves some time
and makes the TTX file smaller.
-z <format> Specify a bitmap data export option for EBDT:
{'raw', 'row', 'bitwise', 'extfile'} or for the CBDT:
{'raw', 'extfile'} Each option does one of the following:
-z raw
export the bitmap data as a hex dump
-z row
export each row as hex data
-z bitwise
export each row as binary in an ASCII art style
-z extfile
export the data as external files with XML references
If no export format is specified 'raw' format is used.
-e Don't ignore decompilation errors, but show a full traceback
and abort.
-y <number> Select font number for TrueType Collection (.ttc/.otc),
starting from 0.
--unicodedata <UnicodeData.txt>
Use custom database file to write character names in the
comments of the cmap TTX output.
--newline <value>
Control how line endings are written in the XML file. It
can be 'LF', 'CR', or 'CRLF'. If not specified, the
default platform-specific line endings are used.
Compile options
===============
-m Merge with TrueType-input-file: specify a TrueType or
OpenType font file to be merged with the TTX file. This
option is only valid when at most one TTX file is specified.
-b Don't recalc glyph bounding boxes: use the values in the
TTX file as-is.
--recalc-timestamp
Set font 'modified' timestamp to current time.
By default, the modification time of the TTX file will be
used.
--no-recalc-timestamp
Keep the original font 'modified' timestamp.
--flavor <type>
Specify flavor of output font file. May be 'woff' or 'woff2'.
Note that WOFF2 requires the Brotli Python extension,
available at https://github.com/google/brotli
--with-zopfli
Use Zopfli instead of Zlib to compress WOFF. The Python
extension is available at https://pypi.python.org/pypi/zopfli
"""
from fontTools.ttLib import TTFont, TTLibError
from fontTools.misc.macCreatorType import getMacCreatorAndType
from fontTools.unicode import setUnicodeData
from fontTools.misc.textTools import Tag, tostr
from fontTools.misc.timeTools import timestampSinceEpoch
from fontTools.misc.loggingTools import Timer
from fontTools.misc.cliTools import makeOutputFileName
import os
import sys
import getopt
import re
import logging
log = logging.getLogger("fontTools.ttx")
opentypeheaderRE = re.compile("""sfntVersion=['"]OTTO["']""")
class Options(object):
listTables = False
outputDir = None
outputFile = None
overWrite = False
verbose = False
quiet = False
splitTables = False
splitGlyphs = False
disassembleInstructions = True
mergeFile = None
recalcBBoxes = True
ignoreDecompileErrors = True
bitmapGlyphDataFormat = "raw"
unicodedata = None
newlinestr = "\n"
recalcTimestamp = None
flavor = None
useZopfli = False
def __init__(self, rawOptions, numFiles):
self.onlyTables = []
self.skipTables = []
self.fontNumber = -1
for option, value in rawOptions:
# general options
if option == "-h":
print(__doc__)
sys.exit(0)
elif option == "--version":
from fontTools import version
print(version)
sys.exit(0)
elif option == "-d":
if not os.path.isdir(value):
raise getopt.GetoptError(
"The -d option value must be an existing directory"
)
self.outputDir = value
elif option == "-o":
self.outputFile = value
elif option == "-f":
self.overWrite = True
elif option == "-v":
self.verbose = True
elif option == "-q":
self.quiet = True
# dump options
elif option == "-l":
self.listTables = True
elif option == "-t":
# pad with space if table tag length is less than 4
value = value.ljust(4)
self.onlyTables.append(value)
elif option == "-x":
# pad with space if table tag length is less than 4
value = value.ljust(4)
self.skipTables.append(value)
elif option == "-s":
self.splitTables = True
elif option == "-g":
# -g implies (and forces) splitTables
self.splitGlyphs = True
self.splitTables = True
elif option == "-i":
self.disassembleInstructions = False
elif option == "-z":
validOptions = ("raw", "row", "bitwise", "extfile")
if value not in validOptions:
raise getopt.GetoptError(
"-z does not allow %s as a format. Use %s"
% (option, validOptions)
)
self.bitmapGlyphDataFormat = value
elif option == "-y":
self.fontNumber = int(value)
# compile options
elif option == "-m":
self.mergeFile = value
elif option == "-b":
self.recalcBBoxes = False
elif option == "-e":
self.ignoreDecompileErrors = False
elif option == "--unicodedata":
self.unicodedata = value
elif option == "--newline":
validOptions = ("LF", "CR", "CRLF")
if value == "LF":
self.newlinestr = "\n"
elif value == "CR":
self.newlinestr = "\r"
elif value == "CRLF":
self.newlinestr = "\r\n"
else:
raise getopt.GetoptError(
"Invalid choice for --newline: %r (choose from %s)"
% (value, ", ".join(map(repr, validOptions)))
)
elif option == "--recalc-timestamp":
self.recalcTimestamp = True
elif option == "--no-recalc-timestamp":
self.recalcTimestamp = False
elif option == "--flavor":
self.flavor = value
elif option == "--with-zopfli":
self.useZopfli = True
if self.verbose and self.quiet:
raise getopt.GetoptError("-q and -v options are mutually exclusive")
if self.verbose:
self.logLevel = logging.DEBUG
elif self.quiet:
self.logLevel = logging.WARNING
else:
self.logLevel = logging.INFO
if self.mergeFile and self.flavor:
raise getopt.GetoptError("-m and --flavor options are mutually exclusive")
if self.onlyTables and self.skipTables:
raise getopt.GetoptError("-t and -x options are mutually exclusive")
if self.mergeFile and numFiles > 1:
raise getopt.GetoptError(
"Must specify exactly one TTX source file when using -m"
)
if self.flavor != "woff" and self.useZopfli:
raise getopt.GetoptError("--with-zopfli option requires --flavor 'woff'")
def ttList(input, output, options):
ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True)
reader = ttf.reader
tags = sorted(reader.keys())
print('Listing table info for "%s":' % input)
format = " %4s %10s %8s %8s"
print(format % ("tag ", " checksum", " length", " offset"))
print(format % ("----", "----------", "--------", "--------"))
for tag in tags:
entry = reader.tables[tag]
if ttf.flavor == "woff2":
# WOFF2 doesn't store table checksums, so they must be calculated
from fontTools.ttLib.sfnt import calcChecksum
data = entry.loadData(reader.transformBuffer)
checkSum = calcChecksum(data)
else:
checkSum = int(entry.checkSum)
if checkSum < 0:
checkSum = checkSum + 0x100000000
checksum = "0x%08X" % checkSum
print(format % (tag, checksum, entry.length, entry.offset))
print()
ttf.close()
@Timer(log, "Done dumping TTX in %(time).3f seconds")
def ttDump(input, output, options):
input_name = input
if input == "-":
input, input_name = sys.stdin.buffer, sys.stdin.name
output_name = output
if output == "-":
output, output_name = sys.stdout, sys.stdout.name
log.info('Dumping "%s" to "%s"...', input_name, output_name)
if options.unicodedata:
setUnicodeData(options.unicodedata)
ttf = TTFont(
input,
0,
ignoreDecompileErrors=options.ignoreDecompileErrors,
fontNumber=options.fontNumber,
)
ttf.saveXML(
output,
tables=options.onlyTables,
skipTables=options.skipTables,
splitTables=options.splitTables,
splitGlyphs=options.splitGlyphs,
disassembleInstructions=options.disassembleInstructions,
bitmapGlyphDataFormat=options.bitmapGlyphDataFormat,
newlinestr=options.newlinestr,
)
ttf.close()
@Timer(log, "Done compiling TTX in %(time).3f seconds")
def ttCompile(input, output, options):
input_name = input
if input == "-":
input, input_name = sys.stdin, sys.stdin.name
output_name = output
if output == "-":
output, output_name = sys.stdout.buffer, sys.stdout.name
log.info('Compiling "%s" to "%s"...' % (input_name, output))
if options.useZopfli:
from fontTools.ttLib import sfnt
sfnt.USE_ZOPFLI = True
ttf = TTFont(
options.mergeFile,
flavor=options.flavor,
recalcBBoxes=options.recalcBBoxes,
recalcTimestamp=options.recalcTimestamp,
)
ttf.importXML(input)
if options.recalcTimestamp is None and "head" in ttf and input is not sys.stdin:
# use TTX file modification time for head "modified" timestamp
mtime = os.path.getmtime(input)
ttf["head"].modified = timestampSinceEpoch(mtime)
ttf.save(output)
def guessFileType(fileName):
if fileName == "-":
header = sys.stdin.buffer.peek(256)
ext = ""
else:
base, ext = os.path.splitext(fileName)
try:
with open(fileName, "rb") as f:
header = f.read(256)
except IOError:
return None
if header.startswith(b"\xef\xbb\xbf<?xml"):
header = header.lstrip(b"\xef\xbb\xbf")
cr, tp = getMacCreatorAndType(fileName)
if tp in ("sfnt", "FFIL"):
return "TTF"
if ext == ".dfont":
return "TTF"
head = Tag(header[:4])
if head == "OTTO":
return "OTF"
elif head == "ttcf":
return "TTC"
elif head in ("\0\1\0\0", "true"):
return "TTF"
elif head == "wOFF":
return "WOFF"
elif head == "wOF2":
return "WOFF2"
elif head == "<?xm":
# Use 'latin1' because that can't fail.
header = tostr(header, "latin1")
if opentypeheaderRE.search(header):
return "OTX"
else:
return "TTX"
return None
def parseOptions(args):
rawOptions, files = getopt.getopt(
args,
"ld:o:fvqht:x:sgim:z:baey:",
[
"unicodedata=",
"recalc-timestamp",
"no-recalc-timestamp",
"flavor=",
"version",
"with-zopfli",
"newline=",
],
)
options = Options(rawOptions, len(files))
jobs = []
if not files:
raise getopt.GetoptError("Must specify at least one input file")
for input in files:
if input != "-" and not os.path.isfile(input):
raise getopt.GetoptError('File not found: "%s"' % input)
tp = guessFileType(input)
if tp in ("OTF", "TTF", "TTC", "WOFF", "WOFF2"):
extension = ".ttx"
if options.listTables:
action = ttList
else:
action = ttDump
elif tp == "TTX":
extension = "." + options.flavor if options.flavor else ".ttf"
action = ttCompile
elif tp == "OTX":
extension = "." + options.flavor if options.flavor else ".otf"
action = ttCompile
else:
raise getopt.GetoptError('Unknown file type: "%s"' % input)
if options.outputFile:
output = options.outputFile
else:
if input == "-":
raise getopt.GetoptError("Must provide -o when reading from stdin")
output = makeOutputFileName(
input, options.outputDir, extension, options.overWrite
)
# 'touch' output file to avoid race condition in choosing file names
if action != ttList:
open(output, "a").close()
jobs.append((action, input, output))
return jobs, options
def process(jobs, options):
for action, input, output in jobs:
action(input, output, options)
def main(args=None):
"""Convert OpenType fonts to XML and back"""
from fontTools import configLogger
if args is None:
args = sys.argv[1:]
try:
jobs, options = parseOptions(args)
except getopt.GetoptError as e:
print("%s\nERROR: %s" % (__doc__, e), file=sys.stderr)
sys.exit(2)
configLogger(level=options.logLevel)
try:
process(jobs, options)
except KeyboardInterrupt:
log.error("(Cancelled.)")
sys.exit(1)
except SystemExit:
raise
except TTLibError as e:
log.error(e)
sys.exit(1)
except:
log.exception("Unhandled exception has occurred")
sys.exit(1)
if __name__ == "__main__":
sys.exit(main())
PK aZZZ~[� � fontTools/unicode.pydef _makeunicodes(f):
lines = iter(f.readlines())
unicodes = {}
for line in lines:
if not line:
continue
num, name = line.split(";")[:2]
if name[0] == "<":
continue # "<control>", etc.
num = int(num, 16)
unicodes[num] = name
return unicodes
class _UnicodeCustom(object):
def __init__(self, f):
if isinstance(f, str):
with open(f) as fd:
codes = _makeunicodes(fd)
else:
codes = _makeunicodes(f)
self.codes = codes
def __getitem__(self, charCode):
try:
return self.codes[charCode]
except KeyError:
return "????"
class _UnicodeBuiltin(object):
def __getitem__(self, charCode):
try:
# use unicodedata backport to python2, if available:
# https://github.com/mikekap/unicodedata2
import unicodedata2 as unicodedata
except ImportError:
import unicodedata
try:
return unicodedata.name(chr(charCode))
except ValueError:
return "????"
Unicode = _UnicodeBuiltin()
def setUnicodeData(f):
global Unicode
Unicode = _UnicodeCustom(f)
PK aZZZ�l$��� �� fontTools/cffLib/__init__.py"""cffLib: read/write Adobe CFF fonts
OpenType fonts with PostScript outlines contain a completely independent
font file, Adobe's *Compact Font Format*. So dealing with OpenType fonts
requires also dealing with CFF. This module allows you to read and write
fonts written in the CFF format.
In 2016, OpenType 1.8 introduced the `CFF2 <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2>`_
format which, along with other changes, extended the CFF format to deal with
the demands of variable fonts. This module parses both original CFF and CFF2.
"""
from fontTools.misc import sstruct
from fontTools.misc import psCharStrings
from fontTools.misc.arrayTools import unionRect, intRect
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
tobytes,
tostr,
safeEval,
)
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables.otBase import OTTableWriter
from fontTools.ttLib.tables.otBase import OTTableReader
from fontTools.ttLib.tables import otTables as ot
from io import BytesIO
import struct
import logging
import re
# mute cffLib debug messages when running ttx in verbose mode
DEBUG = logging.DEBUG - 1
log = logging.getLogger(__name__)
cffHeaderFormat = """
major: B
minor: B
hdrSize: B
"""
maxStackLimit = 513
# maxstack operator has been deprecated. max stack is now always 513.
class StopHintCountEvent(Exception):
pass
class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler):
stop_hintcount_ops = (
"op_hintmask",
"op_cntrmask",
"op_rmoveto",
"op_hmoveto",
"op_vmoveto",
)
def __init__(self, localSubrs, globalSubrs, private=None):
psCharStrings.SimpleT2Decompiler.__init__(
self, localSubrs, globalSubrs, private
)
def execute(self, charString):
self.need_hintcount = True # until proven otherwise
for op_name in self.stop_hintcount_ops:
setattr(self, op_name, self.stop_hint_count)
if hasattr(charString, "_desubroutinized"):
# If a charstring has already been desubroutinized, we will still
# need to execute it if we need to count hints in order to
# compute the byte length for mask arguments, and haven't finished
# counting hints pairs.
if self.need_hintcount and self.callingStack:
try:
psCharStrings.SimpleT2Decompiler.execute(self, charString)
except StopHintCountEvent:
del self.callingStack[-1]
return
charString._patches = []
psCharStrings.SimpleT2Decompiler.execute(self, charString)
desubroutinized = charString.program[:]
for idx, expansion in reversed(charString._patches):
assert idx >= 2
assert desubroutinized[idx - 1] in [
"callsubr",
"callgsubr",
], desubroutinized[idx - 1]
assert type(desubroutinized[idx - 2]) == int
if expansion[-1] == "return":
expansion = expansion[:-1]
desubroutinized[idx - 2 : idx] = expansion
if not self.private.in_cff2:
if "endchar" in desubroutinized:
# Cut off after first endchar
desubroutinized = desubroutinized[
: desubroutinized.index("endchar") + 1
]
else:
if not len(desubroutinized) or desubroutinized[-1] != "return":
desubroutinized.append("return")
charString._desubroutinized = desubroutinized
del charString._patches
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1] + self.localBias]
psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
self.processSubr(index, subr)
def stop_hint_count(self, *args):
self.need_hintcount = False
for op_name in self.stop_hintcount_ops:
setattr(self, op_name, None)
cs = self.callingStack[-1]
if hasattr(cs, "_desubroutinized"):
raise StopHintCountEvent()
def op_hintmask(self, index):
psCharStrings.SimpleT2Decompiler.op_hintmask(self, index)
if self.need_hintcount:
self.stop_hint_count()
def processSubr(self, index, subr):
cs = self.callingStack[-1]
if not hasattr(cs, "_desubroutinized"):
cs._patches.append((index, subr._desubroutinized))
class CFFFontSet(object):
"""A CFF font "file" can contain more than one font, although this is
extremely rare (and not allowed within OpenType fonts).
This class is the entry point for parsing a CFF table. To actually
manipulate the data inside the CFF font, you will want to access the
``CFFFontSet``'s :class:`TopDict` object. To do this, a ``CFFFontSet``
object can either be treated as a dictionary (with appropriate
``keys()`` and ``values()`` methods) mapping font names to :class:`TopDict`
objects, or as a list.
.. code:: python
from fontTools import ttLib
tt = ttLib.TTFont("Tests/cffLib/data/LinLibertine_RBI.otf")
tt["CFF "].cff
# <fontTools.cffLib.CFFFontSet object at 0x101e24c90>
tt["CFF "].cff[0] # Here's your actual font data
# <fontTools.cffLib.TopDict object at 0x1020f1fd0>
"""
def decompile(self, file, otFont, isCFF2=None):
"""Parse a binary CFF file into an internal representation. ``file``
should be a file handle object. ``otFont`` is the top-level
:py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file.
If ``isCFF2`` is passed and set to ``True`` or ``False``, then the
library makes an assertion that the CFF header is of the appropriate
version.
"""
self.otFont = otFont
sstruct.unpack(cffHeaderFormat, file.read(3), self)
if isCFF2 is not None:
# called from ttLib: assert 'major' as read from file matches the
# expected version
expected_major = 2 if isCFF2 else 1
if self.major != expected_major:
raise ValueError(
"Invalid CFF 'major' version: expected %d, found %d"
% (expected_major, self.major)
)
else:
# use 'major' version from file to determine if isCFF2
assert self.major in (1, 2), "Unknown CFF format"
isCFF2 = self.major == 2
if not isCFF2:
self.offSize = struct.unpack("B", file.read(1))[0]
file.seek(self.hdrSize)
self.fontNames = list(tostr(s) for s in Index(file, isCFF2=isCFF2))
self.topDictIndex = TopDictIndex(file, isCFF2=isCFF2)
self.strings = IndexedStrings(file)
else: # isCFF2
self.topDictSize = struct.unpack(">H", file.read(2))[0]
file.seek(self.hdrSize)
self.fontNames = ["CFF2Font"]
cff2GetGlyphOrder = otFont.getGlyphOrder
# in CFF2, offsetSize is the size of the TopDict data.
self.topDictIndex = TopDictIndex(
file, cff2GetGlyphOrder, self.topDictSize, isCFF2=isCFF2
)
self.strings = None
self.GlobalSubrs = GlobalSubrsIndex(file, isCFF2=isCFF2)
self.topDictIndex.strings = self.strings
self.topDictIndex.GlobalSubrs = self.GlobalSubrs
def __len__(self):
return len(self.fontNames)
def keys(self):
return list(self.fontNames)
def values(self):
return self.topDictIndex
def __getitem__(self, nameOrIndex):
"""Return TopDict instance identified by name (str) or index (int
or any object that implements `__index__`).
"""
if hasattr(nameOrIndex, "__index__"):
index = nameOrIndex.__index__()
elif isinstance(nameOrIndex, str):
name = nameOrIndex
try:
index = self.fontNames.index(name)
except ValueError:
raise KeyError(nameOrIndex)
else:
raise TypeError(nameOrIndex)
return self.topDictIndex[index]
def compile(self, file, otFont, isCFF2=None):
"""Write the object back into binary representation onto the given file.
``file`` should be a file handle object. ``otFont`` is the top-level
:py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file.
If ``isCFF2`` is passed and set to ``True`` or ``False``, then the
library makes an assertion that the CFF header is of the appropriate
version.
"""
self.otFont = otFont
if isCFF2 is not None:
# called from ttLib: assert 'major' value matches expected version
expected_major = 2 if isCFF2 else 1
if self.major != expected_major:
raise ValueError(
"Invalid CFF 'major' version: expected %d, found %d"
% (expected_major, self.major)
)
else:
# use current 'major' value to determine output format
assert self.major in (1, 2), "Unknown CFF format"
isCFF2 = self.major == 2
if otFont.recalcBBoxes and not isCFF2:
for topDict in self.topDictIndex:
topDict.recalcFontBBox()
if not isCFF2:
strings = IndexedStrings()
else:
strings = None
writer = CFFWriter(isCFF2)
topCompiler = self.topDictIndex.getCompiler(strings, self, isCFF2=isCFF2)
if isCFF2:
self.hdrSize = 5
writer.add(sstruct.pack(cffHeaderFormat, self))
# Note: topDictSize will most likely change in CFFWriter.toFile().
self.topDictSize = topCompiler.getDataLength()
writer.add(struct.pack(">H", self.topDictSize))
else:
self.hdrSize = 4
self.offSize = 4 # will most likely change in CFFWriter.toFile().
writer.add(sstruct.pack(cffHeaderFormat, self))
writer.add(struct.pack("B", self.offSize))
if not isCFF2:
fontNames = Index()
for name in self.fontNames:
fontNames.append(name)
writer.add(fontNames.getCompiler(strings, self, isCFF2=isCFF2))
writer.add(topCompiler)
if not isCFF2:
writer.add(strings.getCompiler())
writer.add(self.GlobalSubrs.getCompiler(strings, self, isCFF2=isCFF2))
for topDict in self.topDictIndex:
if not hasattr(topDict, "charset") or topDict.charset is None:
charset = otFont.getGlyphOrder()
topDict.charset = charset
children = topCompiler.getChildren(strings)
for child in children:
writer.add(child)
writer.toFile(file)
def toXML(self, xmlWriter):
"""Write the object into XML representation onto the given
:class:`fontTools.misc.xmlWriter.XMLWriter`.
.. code:: python
writer = xmlWriter.XMLWriter(sys.stdout)
tt["CFF "].cff.toXML(writer)
"""
xmlWriter.simpletag("major", value=self.major)
xmlWriter.newline()
xmlWriter.simpletag("minor", value=self.minor)
xmlWriter.newline()
for fontName in self.fontNames:
xmlWriter.begintag("CFFFont", name=tostr(fontName))
xmlWriter.newline()
font = self[fontName]
font.toXML(xmlWriter)
xmlWriter.endtag("CFFFont")
xmlWriter.newline()
xmlWriter.newline()
xmlWriter.begintag("GlobalSubrs")
xmlWriter.newline()
self.GlobalSubrs.toXML(xmlWriter)
xmlWriter.endtag("GlobalSubrs")
xmlWriter.newline()
def fromXML(self, name, attrs, content, otFont=None):
"""Reads data from the XML element into the ``CFFFontSet`` object."""
self.otFont = otFont
# set defaults. These will be replaced if there are entries for them
# in the XML file.
if not hasattr(self, "major"):
self.major = 1
if not hasattr(self, "minor"):
self.minor = 0
if name == "CFFFont":
if self.major == 1:
if not hasattr(self, "offSize"):
# this will be recalculated when the cff is compiled.
self.offSize = 4
if not hasattr(self, "hdrSize"):
self.hdrSize = 4
if not hasattr(self, "GlobalSubrs"):
self.GlobalSubrs = GlobalSubrsIndex()
if not hasattr(self, "fontNames"):
self.fontNames = []
self.topDictIndex = TopDictIndex()
fontName = attrs["name"]
self.fontNames.append(fontName)
topDict = TopDict(GlobalSubrs=self.GlobalSubrs)
topDict.charset = None # gets filled in later
elif self.major == 2:
if not hasattr(self, "hdrSize"):
self.hdrSize = 5
if not hasattr(self, "GlobalSubrs"):
self.GlobalSubrs = GlobalSubrsIndex()
if not hasattr(self, "fontNames"):
self.fontNames = ["CFF2Font"]
cff2GetGlyphOrder = self.otFont.getGlyphOrder
topDict = TopDict(
GlobalSubrs=self.GlobalSubrs, cff2GetGlyphOrder=cff2GetGlyphOrder
)
self.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder)
self.topDictIndex.append(topDict)
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
topDict.fromXML(name, attrs, content)
if hasattr(topDict, "VarStore") and topDict.FDArray[0].vstore is None:
fdArray = topDict.FDArray
for fontDict in fdArray:
if hasattr(fontDict, "Private"):
fontDict.Private.vstore = topDict.VarStore
elif name == "GlobalSubrs":
subrCharStringClass = psCharStrings.T2CharString
if not hasattr(self, "GlobalSubrs"):
self.GlobalSubrs = GlobalSubrsIndex()
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
subr = subrCharStringClass()
subr.fromXML(name, attrs, content)
self.GlobalSubrs.append(subr)
elif name == "major":
self.major = int(attrs["value"])
elif name == "minor":
self.minor = int(attrs["value"])
def convertCFFToCFF2(self, otFont):
"""Converts this object from CFF format to CFF2 format. This conversion
is done 'in-place'. The conversion cannot be reversed.
This assumes a decompiled CFF table. (i.e. that the object has been
filled via :meth:`decompile`.)"""
self.major = 2
cff2GetGlyphOrder = self.otFont.getGlyphOrder
topDictData = TopDictIndex(None, cff2GetGlyphOrder)
topDictData.items = self.topDictIndex.items
self.topDictIndex = topDictData
topDict = topDictData[0]
if hasattr(topDict, "Private"):
privateDict = topDict.Private
else:
privateDict = None
opOrder = buildOrder(topDictOperators2)
topDict.order = opOrder
topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
for entry in topDictOperators:
key = entry[1]
if key not in opOrder:
if key in topDict.rawDict:
del topDict.rawDict[key]
if hasattr(topDict, key):
delattr(topDict, key)
if not hasattr(topDict, "FDArray"):
fdArray = topDict.FDArray = FDArrayIndex()
fdArray.strings = None
fdArray.GlobalSubrs = topDict.GlobalSubrs
topDict.GlobalSubrs.fdArray = fdArray
charStrings = topDict.CharStrings
if charStrings.charStringsAreIndexed:
charStrings.charStringsIndex.fdArray = fdArray
else:
charStrings.fdArray = fdArray
fontDict = FontDict()
fontDict.setCFF2(True)
fdArray.append(fontDict)
fontDict.Private = privateDict
privateOpOrder = buildOrder(privateDictOperators2)
for entry in privateDictOperators:
key = entry[1]
if key not in privateOpOrder:
if key in privateDict.rawDict:
# print "Removing private dict", key
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
# print "Removing privateDict attr", key
else:
# clean up the PrivateDicts in the fdArray
fdArray = topDict.FDArray
privateOpOrder = buildOrder(privateDictOperators2)
for fontDict in fdArray:
fontDict.setCFF2(True)
for key in fontDict.rawDict.keys():
if key not in fontDict.order:
del fontDict.rawDict[key]
if hasattr(fontDict, key):
delattr(fontDict, key)
privateDict = fontDict.Private
for entry in privateDictOperators:
key = entry[1]
if key not in privateOpOrder:
if key in privateDict.rawDict:
# print "Removing private dict", key
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
# print "Removing privateDict attr", key
# At this point, the Subrs and Charstrings are all still T2Charstring class
# easiest to fix this by compiling, then decompiling again
file = BytesIO()
self.compile(file, otFont, isCFF2=True)
file.seek(0)
self.decompile(file, otFont, isCFF2=True)
def desubroutinize(self):
for fontName in self.fontNames:
font = self[fontName]
cs = font.CharStrings
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
c.decompile()
subrs = getattr(c.private, "Subrs", [])
decompiler = _DesubroutinizingT2Decompiler(
subrs, c.globalSubrs, c.private
)
decompiler.execute(c)
c.program = c._desubroutinized
del c._desubroutinized
# Delete all the local subrs
if hasattr(font, "FDArray"):
for fd in font.FDArray:
pd = fd.Private
if hasattr(pd, "Subrs"):
del pd.Subrs
if "Subrs" in pd.rawDict:
del pd.rawDict["Subrs"]
else:
pd = font.Private
if hasattr(pd, "Subrs"):
del pd.Subrs
if "Subrs" in pd.rawDict:
del pd.rawDict["Subrs"]
# as well as the global subrs
self.GlobalSubrs.clear()
class CFFWriter(object):
"""Helper class for serializing CFF data to binary. Used by
:meth:`CFFFontSet.compile`."""
def __init__(self, isCFF2):
self.data = []
self.isCFF2 = isCFF2
def add(self, table):
self.data.append(table)
def toFile(self, file):
lastPosList = None
count = 1
while True:
log.log(DEBUG, "CFFWriter.toFile() iteration: %d", count)
count = count + 1
pos = 0
posList = [pos]
for item in self.data:
if hasattr(item, "getDataLength"):
endPos = pos + item.getDataLength()
if isinstance(item, TopDictIndexCompiler) and item.isCFF2:
self.topDictSize = item.getDataLength()
else:
endPos = pos + len(item)
if hasattr(item, "setPos"):
item.setPos(pos, endPos)
pos = endPos
posList.append(pos)
if posList == lastPosList:
break
lastPosList = posList
log.log(DEBUG, "CFFWriter.toFile() writing to file.")
begin = file.tell()
if self.isCFF2:
self.data[1] = struct.pack(">H", self.topDictSize)
else:
self.offSize = calcOffSize(lastPosList[-1])
self.data[1] = struct.pack("B", self.offSize)
posList = [0]
for item in self.data:
if hasattr(item, "toFile"):
item.toFile(file)
else:
file.write(item)
posList.append(file.tell() - begin)
assert posList == lastPosList
def calcOffSize(largestOffset):
if largestOffset < 0x100:
offSize = 1
elif largestOffset < 0x10000:
offSize = 2
elif largestOffset < 0x1000000:
offSize = 3
else:
offSize = 4
return offSize
class IndexCompiler(object):
"""Base class for writing CFF `INDEX data <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#5-index-data>`_
to binary."""
def __init__(self, items, strings, parent, isCFF2=None):
if isCFF2 is None and hasattr(parent, "isCFF2"):
isCFF2 = parent.isCFF2
assert isCFF2 is not None
self.isCFF2 = isCFF2
self.items = self.getItems(items, strings)
self.parent = parent
def getItems(self, items, strings):
return items
def getOffsets(self):
# An empty INDEX contains only the count field.
if self.items:
pos = 1
offsets = [pos]
for item in self.items:
if hasattr(item, "getDataLength"):
pos = pos + item.getDataLength()
else:
pos = pos + len(item)
offsets.append(pos)
else:
offsets = []
return offsets
def getDataLength(self):
if self.isCFF2:
countSize = 4
else:
countSize = 2
if self.items:
lastOffset = self.getOffsets()[-1]
offSize = calcOffSize(lastOffset)
dataLength = (
countSize
+ 1 # count
+ (len(self.items) + 1) * offSize # offSize
+ lastOffset # the offsets
- 1 # size of object data
)
else:
# count. For empty INDEX tables, this is the only entry.
dataLength = countSize
return dataLength
def toFile(self, file):
offsets = self.getOffsets()
if self.isCFF2:
writeCard32(file, len(self.items))
else:
writeCard16(file, len(self.items))
# An empty INDEX contains only the count field.
if self.items:
offSize = calcOffSize(offsets[-1])
writeCard8(file, offSize)
offSize = -offSize
pack = struct.pack
for offset in offsets:
binOffset = pack(">l", offset)[offSize:]
assert len(binOffset) == -offSize
file.write(binOffset)
for item in self.items:
if hasattr(item, "toFile"):
item.toFile(file)
else:
data = tobytes(item, encoding="latin1")
file.write(data)
class IndexedStringsCompiler(IndexCompiler):
def getItems(self, items, strings):
return items.strings
class TopDictIndexCompiler(IndexCompiler):
"""Helper class for writing the TopDict to binary."""
def getItems(self, items, strings):
out = []
for item in items:
out.append(item.getCompiler(strings, self))
return out
def getChildren(self, strings):
children = []
for topDict in self.items:
children.extend(topDict.getChildren(strings))
return children
def getOffsets(self):
if self.isCFF2:
offsets = [0, self.items[0].getDataLength()]
return offsets
else:
return super(TopDictIndexCompiler, self).getOffsets()
def getDataLength(self):
if self.isCFF2:
dataLength = self.items[0].getDataLength()
return dataLength
else:
return super(TopDictIndexCompiler, self).getDataLength()
def toFile(self, file):
if self.isCFF2:
self.items[0].toFile(file)
else:
super(TopDictIndexCompiler, self).toFile(file)
class FDArrayIndexCompiler(IndexCompiler):
"""Helper class for writing the
`Font DICT INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#10-font-dict-index-font-dicts-and-fdselect>`_
to binary."""
def getItems(self, items, strings):
out = []
for item in items:
out.append(item.getCompiler(strings, self))
return out
def getChildren(self, strings):
children = []
for fontDict in self.items:
children.extend(fontDict.getChildren(strings))
return children
def toFile(self, file):
offsets = self.getOffsets()
if self.isCFF2:
writeCard32(file, len(self.items))
else:
writeCard16(file, len(self.items))
offSize = calcOffSize(offsets[-1])
writeCard8(file, offSize)
offSize = -offSize
pack = struct.pack
for offset in offsets:
binOffset = pack(">l", offset)[offSize:]
assert len(binOffset) == -offSize
file.write(binOffset)
for item in self.items:
if hasattr(item, "toFile"):
item.toFile(file)
else:
file.write(item)
def setPos(self, pos, endPos):
self.parent.rawDict["FDArray"] = pos
class GlobalSubrsCompiler(IndexCompiler):
"""Helper class for writing the `global subroutine INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_
to binary."""
def getItems(self, items, strings):
out = []
for cs in items:
cs.compile(self.isCFF2)
out.append(cs.bytecode)
return out
class SubrsCompiler(GlobalSubrsCompiler):
"""Helper class for writing the `local subroutine INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_
to binary."""
def setPos(self, pos, endPos):
offset = pos - self.parent.pos
self.parent.rawDict["Subrs"] = offset
class CharStringsCompiler(GlobalSubrsCompiler):
"""Helper class for writing the `CharStrings INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_
to binary."""
def getItems(self, items, strings):
out = []
for cs in items:
cs.compile(self.isCFF2)
out.append(cs.bytecode)
return out
def setPos(self, pos, endPos):
self.parent.rawDict["CharStrings"] = pos
class Index(object):
"""This class represents what the CFF spec calls an INDEX (an array of
variable-sized objects). `Index` items can be addressed and set using
Python list indexing."""
compilerClass = IndexCompiler
def __init__(self, file=None, isCFF2=None):
assert (isCFF2 is None) == (file is None)
self.items = []
name = self.__class__.__name__
if file is None:
return
self._isCFF2 = isCFF2
log.log(DEBUG, "loading %s at %s", name, file.tell())
self.file = file
if isCFF2:
count = readCard32(file)
else:
count = readCard16(file)
if count == 0:
return
self.items = [None] * count
offSize = readCard8(file)
log.log(DEBUG, " index count: %s offSize: %s", count, offSize)
assert offSize <= 4, "offSize too large: %s" % offSize
self.offsets = offsets = []
pad = b"\0" * (4 - offSize)
for index in range(count + 1):
chunk = file.read(offSize)
chunk = pad + chunk
(offset,) = struct.unpack(">L", chunk)
offsets.append(int(offset))
self.offsetBase = file.tell() - 1
file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot
log.log(DEBUG, " end of %s at %s", name, file.tell())
def __len__(self):
return len(self.items)
def __getitem__(self, index):
item = self.items[index]
if item is not None:
return item
offset = self.offsets[index] + self.offsetBase
size = self.offsets[index + 1] - self.offsets[index]
file = self.file
file.seek(offset)
data = file.read(size)
assert len(data) == size
item = self.produceItem(index, data, file, offset)
self.items[index] = item
return item
def __setitem__(self, index, item):
self.items[index] = item
def produceItem(self, index, data, file, offset):
return data
def append(self, item):
"""Add an item to an INDEX."""
self.items.append(item)
def getCompiler(self, strings, parent, isCFF2=None):
return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
def clear(self):
"""Empty the INDEX."""
del self.items[:]
class GlobalSubrsIndex(Index):
"""This index contains all the global subroutines in the font. A global
subroutine is a set of ``CharString`` data which is accessible to any
glyph in the font, and are used to store repeated instructions - for
example, components may be encoded as global subroutines, but so could
hinting instructions.
Remember that when interpreting a ``callgsubr`` instruction (or indeed
a ``callsubr`` instruction) that you will need to add the "subroutine
number bias" to number given:
.. code:: python
tt = ttLib.TTFont("Almendra-Bold.otf")
u = tt["CFF "].cff[0].CharStrings["udieresis"]
u.decompile()
u.toXML(XMLWriter(sys.stdout))
# <some stuff>
# -64 callgsubr <-- Subroutine which implements the dieresis mark
# <other stuff>
tt["CFF "].cff[0].GlobalSubrs[-64] # <-- WRONG
# <T2CharString (bytecode) at 103451d10>
tt["CFF "].cff[0].GlobalSubrs[-64 + 107] # <-- RIGHT
# <T2CharString (source) at 103451390>
("The bias applied depends on the number of subrs (gsubrs). If the number of
subrs (gsubrs) is less than 1240, the bias is 107. Otherwise if it is less
than 33900, it is 1131; otherwise it is 32768.",
`Subroutine Operators <https://docs.microsoft.com/en-us/typography/opentype/otspec180/cff2charstr#section4.4>`)
"""
compilerClass = GlobalSubrsCompiler
subrClass = psCharStrings.T2CharString
charStringClass = psCharStrings.T2CharString
def __init__(
self,
file=None,
globalSubrs=None,
private=None,
fdSelect=None,
fdArray=None,
isCFF2=None,
):
super(GlobalSubrsIndex, self).__init__(file, isCFF2=isCFF2)
self.globalSubrs = globalSubrs
self.private = private
if fdSelect:
self.fdSelect = fdSelect
if fdArray:
self.fdArray = fdArray
def produceItem(self, index, data, file, offset):
if self.private is not None:
private = self.private
elif hasattr(self, "fdArray") and self.fdArray is not None:
if hasattr(self, "fdSelect") and self.fdSelect is not None:
fdIndex = self.fdSelect[index]
else:
fdIndex = 0
private = self.fdArray[fdIndex].Private
else:
private = None
return self.subrClass(data, private=private, globalSubrs=self.globalSubrs)
def toXML(self, xmlWriter):
"""Write the subroutines index into XML representation onto the given
:class:`fontTools.misc.xmlWriter.XMLWriter`.
.. code:: python
writer = xmlWriter.XMLWriter(sys.stdout)
tt["CFF "].cff[0].GlobalSubrs.toXML(writer)
"""
xmlWriter.comment(
"The 'index' attribute is only for humans; " "it is ignored when parsed."
)
xmlWriter.newline()
for i in range(len(self)):
subr = self[i]
if subr.needsDecompilation():
xmlWriter.begintag("CharString", index=i, raw=1)
else:
xmlWriter.begintag("CharString", index=i)
xmlWriter.newline()
subr.toXML(xmlWriter)
xmlWriter.endtag("CharString")
xmlWriter.newline()
def fromXML(self, name, attrs, content):
if name != "CharString":
return
subr = self.subrClass()
subr.fromXML(name, attrs, content)
self.append(subr)
def getItemAndSelector(self, index):
sel = None
if hasattr(self, "fdSelect"):
sel = self.fdSelect[index]
return self[index], sel
class SubrsIndex(GlobalSubrsIndex):
"""This index contains a glyph's local subroutines. A local subroutine is a
private set of ``CharString`` data which is accessible only to the glyph to
which the index is attached."""
compilerClass = SubrsCompiler
class TopDictIndex(Index):
"""This index represents the array of ``TopDict`` structures in the font
(again, usually only one entry is present). Hence the following calls are
equivalent:
.. code:: python
tt["CFF "].cff[0]
# <fontTools.cffLib.TopDict object at 0x102ed6e50>
tt["CFF "].cff.topDictIndex[0]
# <fontTools.cffLib.TopDict object at 0x102ed6e50>
"""
compilerClass = TopDictIndexCompiler
def __init__(self, file=None, cff2GetGlyphOrder=None, topSize=0, isCFF2=None):
assert (isCFF2 is None) == (file is None)
self.cff2GetGlyphOrder = cff2GetGlyphOrder
if file is not None and isCFF2:
self._isCFF2 = isCFF2
self.items = []
name = self.__class__.__name__
log.log(DEBUG, "loading %s at %s", name, file.tell())
self.file = file
count = 1
self.items = [None] * count
self.offsets = [0, topSize]
self.offsetBase = file.tell()
# pretend we've read the whole lot
file.seek(self.offsetBase + topSize)
log.log(DEBUG, " end of %s at %s", name, file.tell())
else:
super(TopDictIndex, self).__init__(file, isCFF2=isCFF2)
def produceItem(self, index, data, file, offset):
top = TopDict(
self.strings,
file,
offset,
self.GlobalSubrs,
self.cff2GetGlyphOrder,
isCFF2=self._isCFF2,
)
top.decompile(data)
return top
def toXML(self, xmlWriter):
for i in range(len(self)):
xmlWriter.begintag("FontDict", index=i)
xmlWriter.newline()
self[i].toXML(xmlWriter)
xmlWriter.endtag("FontDict")
xmlWriter.newline()
class FDArrayIndex(Index):
compilerClass = FDArrayIndexCompiler
def toXML(self, xmlWriter):
for i in range(len(self)):
xmlWriter.begintag("FontDict", index=i)
xmlWriter.newline()
self[i].toXML(xmlWriter)
xmlWriter.endtag("FontDict")
xmlWriter.newline()
def produceItem(self, index, data, file, offset):
fontDict = FontDict(
self.strings,
file,
offset,
self.GlobalSubrs,
isCFF2=self._isCFF2,
vstore=self.vstore,
)
fontDict.decompile(data)
return fontDict
def fromXML(self, name, attrs, content):
if name != "FontDict":
return
fontDict = FontDict()
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
fontDict.fromXML(name, attrs, content)
self.append(fontDict)
class VarStoreData(object):
def __init__(self, file=None, otVarStore=None):
self.file = file
self.data = None
self.otVarStore = otVarStore
self.font = TTFont() # dummy font for the decompile function.
def decompile(self):
if self.file:
# read data in from file. Assume position is correct.
length = readCard16(self.file)
self.data = self.file.read(length)
globalState = {}
reader = OTTableReader(self.data, globalState)
self.otVarStore = ot.VarStore()
self.otVarStore.decompile(reader, self.font)
return self
def compile(self):
writer = OTTableWriter()
self.otVarStore.compile(writer, self.font)
# Note that this omits the initial Card16 length from the CFF2
# VarStore data block
self.data = writer.getAllData()
def writeXML(self, xmlWriter, name):
self.otVarStore.toXML(xmlWriter, self.font)
def xmlRead(self, name, attrs, content, parent):
self.otVarStore = ot.VarStore()
for element in content:
if isinstance(element, tuple):
name, attrs, content = element
self.otVarStore.fromXML(name, attrs, content, self.font)
else:
pass
return None
def __len__(self):
return len(self.data)
def getNumRegions(self, vsIndex):
if vsIndex is None:
vsIndex = 0
varData = self.otVarStore.VarData[vsIndex]
numRegions = varData.VarRegionCount
return numRegions
class FDSelect(object):
def __init__(self, file=None, numGlyphs=None, format=None):
if file:
# read data in from file
self.format = readCard8(file)
if self.format == 0:
from array import array
self.gidArray = array("B", file.read(numGlyphs)).tolist()
elif self.format == 3:
gidArray = [None] * numGlyphs
nRanges = readCard16(file)
fd = None
prev = None
for i in range(nRanges):
first = readCard16(file)
if prev is not None:
for glyphID in range(prev, first):
gidArray[glyphID] = fd
prev = first
fd = readCard8(file)
if prev is not None:
first = readCard16(file)
for glyphID in range(prev, first):
gidArray[glyphID] = fd
self.gidArray = gidArray
elif self.format == 4:
gidArray = [None] * numGlyphs
nRanges = readCard32(file)
fd = None
prev = None
for i in range(nRanges):
first = readCard32(file)
if prev is not None:
for glyphID in range(prev, first):
gidArray[glyphID] = fd
prev = first
fd = readCard16(file)
if prev is not None:
first = readCard32(file)
for glyphID in range(prev, first):
gidArray[glyphID] = fd
self.gidArray = gidArray
else:
assert False, "unsupported FDSelect format: %s" % format
else:
# reading from XML. Make empty gidArray, and leave format as passed in.
# format is None will result in the smallest representation being used.
self.format = format
self.gidArray = []
def __len__(self):
return len(self.gidArray)
def __getitem__(self, index):
return self.gidArray[index]
def __setitem__(self, index, fdSelectValue):
self.gidArray[index] = fdSelectValue
def append(self, fdSelectValue):
self.gidArray.append(fdSelectValue)
class CharStrings(object):
"""The ``CharStrings`` in the font represent the instructions for drawing
each glyph. This object presents a dictionary interface to the font's
CharStrings, indexed by glyph name:
.. code:: python
tt["CFF "].cff[0].CharStrings["a"]
# <T2CharString (bytecode) at 103451e90>
See :class:`fontTools.misc.psCharStrings.T1CharString` and
:class:`fontTools.misc.psCharStrings.T2CharString` for how to decompile,
compile and interpret the glyph drawing instructions in the returned objects.
"""
def __init__(
self,
file,
charset,
globalSubrs,
private,
fdSelect,
fdArray,
isCFF2=None,
varStore=None,
):
self.globalSubrs = globalSubrs
self.varStore = varStore
if file is not None:
self.charStringsIndex = SubrsIndex(
file, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2
)
self.charStrings = charStrings = {}
for i in range(len(charset)):
charStrings[charset[i]] = i
# read from OTF file: charStrings.values() are indices into
# charStringsIndex.
self.charStringsAreIndexed = 1
else:
self.charStrings = {}
# read from ttx file: charStrings.values() are actual charstrings
self.charStringsAreIndexed = 0
self.private = private
if fdSelect is not None:
self.fdSelect = fdSelect
if fdArray is not None:
self.fdArray = fdArray
def keys(self):
return list(self.charStrings.keys())
def values(self):
if self.charStringsAreIndexed:
return self.charStringsIndex
else:
return list(self.charStrings.values())
def has_key(self, name):
return name in self.charStrings
__contains__ = has_key
def __len__(self):
return len(self.charStrings)
def __getitem__(self, name):
charString = self.charStrings[name]
if self.charStringsAreIndexed:
charString = self.charStringsIndex[charString]
return charString
def __setitem__(self, name, charString):
if self.charStringsAreIndexed:
index = self.charStrings[name]
self.charStringsIndex[index] = charString
else:
self.charStrings[name] = charString
def getItemAndSelector(self, name):
if self.charStringsAreIndexed:
index = self.charStrings[name]
return self.charStringsIndex.getItemAndSelector(index)
else:
if hasattr(self, "fdArray"):
if hasattr(self, "fdSelect"):
sel = self.charStrings[name].fdSelectIndex
else:
sel = 0
else:
sel = None
return self.charStrings[name], sel
def toXML(self, xmlWriter):
names = sorted(self.keys())
for name in names:
charStr, fdSelectIndex = self.getItemAndSelector(name)
if charStr.needsDecompilation():
raw = [("raw", 1)]
else:
raw = []
if fdSelectIndex is None:
xmlWriter.begintag("CharString", [("name", name)] + raw)
else:
xmlWriter.begintag(
"CharString",
[("name", name), ("fdSelectIndex", fdSelectIndex)] + raw,
)
xmlWriter.newline()
charStr.toXML(xmlWriter)
xmlWriter.endtag("CharString")
xmlWriter.newline()
def fromXML(self, name, attrs, content):
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
if name != "CharString":
continue
fdID = -1
if hasattr(self, "fdArray"):
try:
fdID = safeEval(attrs["fdSelectIndex"])
except KeyError:
fdID = 0
private = self.fdArray[fdID].Private
else:
private = self.private
glyphName = attrs["name"]
charStringClass = psCharStrings.T2CharString
charString = charStringClass(private=private, globalSubrs=self.globalSubrs)
charString.fromXML(name, attrs, content)
if fdID >= 0:
charString.fdSelectIndex = fdID
self[glyphName] = charString
def readCard8(file):
return byteord(file.read(1))
def readCard16(file):
(value,) = struct.unpack(">H", file.read(2))
return value
def readCard32(file):
(value,) = struct.unpack(">L", file.read(4))
return value
def writeCard8(file, value):
file.write(bytechr(value))
def writeCard16(file, value):
file.write(struct.pack(">H", value))
def writeCard32(file, value):
file.write(struct.pack(">L", value))
def packCard8(value):
return bytechr(value)
def packCard16(value):
return struct.pack(">H", value)
def packCard32(value):
return struct.pack(">L", value)
def buildOperatorDict(table):
d = {}
for op, name, arg, default, conv in table:
d[op] = (name, arg)
return d
def buildOpcodeDict(table):
d = {}
for op, name, arg, default, conv in table:
if isinstance(op, tuple):
op = bytechr(op[0]) + bytechr(op[1])
else:
op = bytechr(op)
d[name] = (op, arg)
return d
def buildOrder(table):
l = []
for op, name, arg, default, conv in table:
l.append(name)
return l
def buildDefaults(table):
d = {}
for op, name, arg, default, conv in table:
if default is not None:
d[name] = default
return d
def buildConverters(table):
d = {}
for op, name, arg, default, conv in table:
d[name] = conv
return d
class SimpleConverter(object):
def read(self, parent, value):
if not hasattr(parent, "file"):
return self._read(parent, value)
file = parent.file
pos = file.tell()
try:
return self._read(parent, value)
finally:
file.seek(pos)
def _read(self, parent, value):
return value
def write(self, parent, value):
return value
def xmlWrite(self, xmlWriter, name, value):
xmlWriter.simpletag(name, value=value)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
return attrs["value"]
class ASCIIConverter(SimpleConverter):
def _read(self, parent, value):
return tostr(value, encoding="ascii")
def write(self, parent, value):
return tobytes(value, encoding="ascii")
def xmlWrite(self, xmlWriter, name, value):
xmlWriter.simpletag(name, value=tostr(value, encoding="ascii"))
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
return tobytes(attrs["value"], encoding=("ascii"))
class Latin1Converter(SimpleConverter):
def _read(self, parent, value):
return tostr(value, encoding="latin1")
def write(self, parent, value):
return tobytes(value, encoding="latin1")
def xmlWrite(self, xmlWriter, name, value):
value = tostr(value, encoding="latin1")
if name in ["Notice", "Copyright"]:
value = re.sub(r"[\r\n]\s+", " ", value)
xmlWriter.simpletag(name, value=value)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
return tobytes(attrs["value"], encoding=("latin1"))
def parseNum(s):
try:
value = int(s)
except:
value = float(s)
return value
def parseBlendList(s):
valueList = []
for element in s:
if isinstance(element, str):
continue
name, attrs, content = element
blendList = attrs["value"].split()
blendList = [eval(val) for val in blendList]
valueList.append(blendList)
if len(valueList) == 1:
valueList = valueList[0]
return valueList
class NumberConverter(SimpleConverter):
def xmlWrite(self, xmlWriter, name, value):
if isinstance(value, list):
xmlWriter.begintag(name)
xmlWriter.newline()
xmlWriter.indent()
blendValue = " ".join([str(val) for val in value])
xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
xmlWriter.newline()
xmlWriter.dedent()
xmlWriter.endtag(name)
xmlWriter.newline()
else:
xmlWriter.simpletag(name, value=value)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
valueString = attrs.get("value", None)
if valueString is None:
value = parseBlendList(content)
else:
value = parseNum(attrs["value"])
return value
class ArrayConverter(SimpleConverter):
def xmlWrite(self, xmlWriter, name, value):
if value and isinstance(value[0], list):
xmlWriter.begintag(name)
xmlWriter.newline()
xmlWriter.indent()
for valueList in value:
blendValue = " ".join([str(val) for val in valueList])
xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
xmlWriter.newline()
xmlWriter.dedent()
xmlWriter.endtag(name)
xmlWriter.newline()
else:
value = " ".join([str(val) for val in value])
xmlWriter.simpletag(name, value=value)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
valueString = attrs.get("value", None)
if valueString is None:
valueList = parseBlendList(content)
else:
values = valueString.split()
valueList = [parseNum(value) for value in values]
return valueList
class TableConverter(SimpleConverter):
def xmlWrite(self, xmlWriter, name, value):
xmlWriter.begintag(name)
xmlWriter.newline()
value.toXML(xmlWriter)
xmlWriter.endtag(name)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
ob = self.getClass()()
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
ob.fromXML(name, attrs, content)
return ob
class PrivateDictConverter(TableConverter):
def getClass(self):
return PrivateDict
def _read(self, parent, value):
size, offset = value
file = parent.file
isCFF2 = parent._isCFF2
try:
vstore = parent.vstore
except AttributeError:
vstore = None
priv = PrivateDict(parent.strings, file, offset, isCFF2=isCFF2, vstore=vstore)
file.seek(offset)
data = file.read(size)
assert len(data) == size
priv.decompile(data)
return priv
def write(self, parent, value):
return (0, 0) # dummy value
class SubrsConverter(TableConverter):
def getClass(self):
return SubrsIndex
def _read(self, parent, value):
file = parent.file
isCFF2 = parent._isCFF2
file.seek(parent.offset + value) # Offset(self)
return SubrsIndex(file, isCFF2=isCFF2)
def write(self, parent, value):
return 0 # dummy value
class CharStringsConverter(TableConverter):
def _read(self, parent, value):
file = parent.file
isCFF2 = parent._isCFF2
charset = parent.charset
varStore = getattr(parent, "VarStore", None)
globalSubrs = parent.GlobalSubrs
if hasattr(parent, "FDArray"):
fdArray = parent.FDArray
if hasattr(parent, "FDSelect"):
fdSelect = parent.FDSelect
else:
fdSelect = None
private = None
else:
fdSelect, fdArray = None, None
private = parent.Private
file.seek(value) # Offset(0)
charStrings = CharStrings(
file,
charset,
globalSubrs,
private,
fdSelect,
fdArray,
isCFF2=isCFF2,
varStore=varStore,
)
return charStrings
def write(self, parent, value):
return 0 # dummy value
def xmlRead(self, name, attrs, content, parent):
if hasattr(parent, "FDArray"):
# if it is a CID-keyed font, then the private Dict is extracted from the
# parent.FDArray
fdArray = parent.FDArray
if hasattr(parent, "FDSelect"):
fdSelect = parent.FDSelect
else:
fdSelect = None
private = None
else:
# if it is a name-keyed font, then the private dict is in the top dict,
# and
# there is no fdArray.
private, fdSelect, fdArray = parent.Private, None, None
charStrings = CharStrings(
None,
None,
parent.GlobalSubrs,
private,
fdSelect,
fdArray,
varStore=getattr(parent, "VarStore", None),
)
charStrings.fromXML(name, attrs, content)
return charStrings
class CharsetConverter(SimpleConverter):
def _read(self, parent, value):
isCID = hasattr(parent, "ROS")
if value > 2:
numGlyphs = parent.numGlyphs
file = parent.file
file.seek(value)
log.log(DEBUG, "loading charset at %s", value)
format = readCard8(file)
if format == 0:
charset = parseCharset0(numGlyphs, file, parent.strings, isCID)
elif format == 1 or format == 2:
charset = parseCharset(numGlyphs, file, parent.strings, isCID, format)
else:
raise NotImplementedError
assert len(charset) == numGlyphs
log.log(DEBUG, " charset end at %s", file.tell())
# make sure glyph names are unique
allNames = {}
newCharset = []
for glyphName in charset:
if glyphName in allNames:
# make up a new glyphName that's unique
n = allNames[glyphName]
while (glyphName + "#" + str(n)) in allNames:
n += 1
allNames[glyphName] = n + 1
glyphName = glyphName + "#" + str(n)
allNames[glyphName] = 1
newCharset.append(glyphName)
charset = newCharset
else: # offset == 0 -> no charset data.
if isCID or "CharStrings" not in parent.rawDict:
# We get here only when processing fontDicts from the FDArray of
# CFF-CID fonts. Only the real topDict references the chrset.
assert value == 0
charset = None
elif value == 0:
charset = cffISOAdobeStrings
elif value == 1:
charset = cffIExpertStrings
elif value == 2:
charset = cffExpertSubsetStrings
if charset and (len(charset) != parent.numGlyphs):
charset = charset[: parent.numGlyphs]
return charset
def write(self, parent, value):
return 0 # dummy value
def xmlWrite(self, xmlWriter, name, value):
# XXX only write charset when not in OT/TTX context, where we
# dump charset as a separate "GlyphOrder" table.
# # xmlWriter.simpletag("charset")
xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element")
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
pass
class CharsetCompiler(object):
def __init__(self, strings, charset, parent):
assert charset[0] == ".notdef"
isCID = hasattr(parent.dictObj, "ROS")
data0 = packCharset0(charset, isCID, strings)
data = packCharset(charset, isCID, strings)
if len(data) < len(data0):
self.data = data
else:
self.data = data0
self.parent = parent
def setPos(self, pos, endPos):
self.parent.rawDict["charset"] = pos
def getDataLength(self):
return len(self.data)
def toFile(self, file):
file.write(self.data)
def getStdCharSet(charset):
# check to see if we can use a predefined charset value.
predefinedCharSetVal = None
predefinedCharSets = [
(cffISOAdobeStringCount, cffISOAdobeStrings, 0),
(cffExpertStringCount, cffIExpertStrings, 1),
(cffExpertSubsetStringCount, cffExpertSubsetStrings, 2),
]
lcs = len(charset)
for cnt, pcs, csv in predefinedCharSets:
if predefinedCharSetVal is not None:
break
if lcs > cnt:
continue
predefinedCharSetVal = csv
for i in range(lcs):
if charset[i] != pcs[i]:
predefinedCharSetVal = None
break
return predefinedCharSetVal
def getCIDfromName(name, strings):
return int(name[3:])
def getSIDfromName(name, strings):
return strings.getSID(name)
def packCharset0(charset, isCID, strings):
fmt = 0
data = [packCard8(fmt)]
if isCID:
getNameID = getCIDfromName
else:
getNameID = getSIDfromName
for name in charset[1:]:
data.append(packCard16(getNameID(name, strings)))
return bytesjoin(data)
def packCharset(charset, isCID, strings):
fmt = 1
ranges = []
first = None
end = 0
if isCID:
getNameID = getCIDfromName
else:
getNameID = getSIDfromName
for name in charset[1:]:
SID = getNameID(name, strings)
if first is None:
first = SID
elif end + 1 != SID:
nLeft = end - first
if nLeft > 255:
fmt = 2
ranges.append((first, nLeft))
first = SID
end = SID
if end:
nLeft = end - first
if nLeft > 255:
fmt = 2
ranges.append((first, nLeft))
data = [packCard8(fmt)]
if fmt == 1:
nLeftFunc = packCard8
else:
nLeftFunc = packCard16
for first, nLeft in ranges:
data.append(packCard16(first) + nLeftFunc(nLeft))
return bytesjoin(data)
def parseCharset0(numGlyphs, file, strings, isCID):
charset = [".notdef"]
if isCID:
for i in range(numGlyphs - 1):
CID = readCard16(file)
charset.append("cid" + str(CID).zfill(5))
else:
for i in range(numGlyphs - 1):
SID = readCard16(file)
charset.append(strings[SID])
return charset
def parseCharset(numGlyphs, file, strings, isCID, fmt):
charset = [".notdef"]
count = 1
if fmt == 1:
nLeftFunc = readCard8
else:
nLeftFunc = readCard16
while count < numGlyphs:
first = readCard16(file)
nLeft = nLeftFunc(file)
if isCID:
for CID in range(first, first + nLeft + 1):
charset.append("cid" + str(CID).zfill(5))
else:
for SID in range(first, first + nLeft + 1):
charset.append(strings[SID])
count = count + nLeft + 1
return charset
class EncodingCompiler(object):
def __init__(self, strings, encoding, parent):
assert not isinstance(encoding, str)
data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings)
data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings)
if len(data0) < len(data1):
self.data = data0
else:
self.data = data1
self.parent = parent
def setPos(self, pos, endPos):
self.parent.rawDict["Encoding"] = pos
def getDataLength(self):
return len(self.data)
def toFile(self, file):
file.write(self.data)
class EncodingConverter(SimpleConverter):
def _read(self, parent, value):
if value == 0:
return "StandardEncoding"
elif value == 1:
return "ExpertEncoding"
else:
assert value > 1
file = parent.file
file.seek(value)
log.log(DEBUG, "loading Encoding at %s", value)
fmt = readCard8(file)
haveSupplement = fmt & 0x80
if haveSupplement:
raise NotImplementedError("Encoding supplements are not yet supported")
fmt = fmt & 0x7F
if fmt == 0:
encoding = parseEncoding0(
parent.charset, file, haveSupplement, parent.strings
)
elif fmt == 1:
encoding = parseEncoding1(
parent.charset, file, haveSupplement, parent.strings
)
return encoding
def write(self, parent, value):
if value == "StandardEncoding":
return 0
elif value == "ExpertEncoding":
return 1
return 0 # dummy value
def xmlWrite(self, xmlWriter, name, value):
if value in ("StandardEncoding", "ExpertEncoding"):
xmlWriter.simpletag(name, name=value)
xmlWriter.newline()
return
xmlWriter.begintag(name)
xmlWriter.newline()
for code in range(len(value)):
glyphName = value[code]
if glyphName != ".notdef":
xmlWriter.simpletag("map", code=hex(code), name=glyphName)
xmlWriter.newline()
xmlWriter.endtag(name)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
if "name" in attrs:
return attrs["name"]
encoding = [".notdef"] * 256
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
code = safeEval(attrs["code"])
glyphName = attrs["name"]
encoding[code] = glyphName
return encoding
def parseEncoding0(charset, file, haveSupplement, strings):
nCodes = readCard8(file)
encoding = [".notdef"] * 256
for glyphID in range(1, nCodes + 1):
code = readCard8(file)
if code != 0:
encoding[code] = charset[glyphID]
return encoding
def parseEncoding1(charset, file, haveSupplement, strings):
nRanges = readCard8(file)
encoding = [".notdef"] * 256
glyphID = 1
for i in range(nRanges):
code = readCard8(file)
nLeft = readCard8(file)
for glyphID in range(glyphID, glyphID + nLeft + 1):
encoding[code] = charset[glyphID]
code = code + 1
glyphID = glyphID + 1
return encoding
def packEncoding0(charset, encoding, strings):
fmt = 0
m = {}
for code in range(len(encoding)):
name = encoding[code]
if name != ".notdef":
m[name] = code
codes = []
for name in charset[1:]:
code = m.get(name)
codes.append(code)
while codes and codes[-1] is None:
codes.pop()
data = [packCard8(fmt), packCard8(len(codes))]
for code in codes:
if code is None:
code = 0
data.append(packCard8(code))
return bytesjoin(data)
def packEncoding1(charset, encoding, strings):
fmt = 1
m = {}
for code in range(len(encoding)):
name = encoding[code]
if name != ".notdef":
m[name] = code
ranges = []
first = None
end = 0
for name in charset[1:]:
code = m.get(name, -1)
if first is None:
first = code
elif end + 1 != code:
nLeft = end - first
ranges.append((first, nLeft))
first = code
end = code
nLeft = end - first
ranges.append((first, nLeft))
# remove unencoded glyphs at the end.
while ranges and ranges[-1][0] == -1:
ranges.pop()
data = [packCard8(fmt), packCard8(len(ranges))]
for first, nLeft in ranges:
if first == -1: # unencoded
first = 0
data.append(packCard8(first) + packCard8(nLeft))
return bytesjoin(data)
class FDArrayConverter(TableConverter):
def _read(self, parent, value):
try:
vstore = parent.VarStore
except AttributeError:
vstore = None
file = parent.file
isCFF2 = parent._isCFF2
file.seek(value)
fdArray = FDArrayIndex(file, isCFF2=isCFF2)
fdArray.vstore = vstore
fdArray.strings = parent.strings
fdArray.GlobalSubrs = parent.GlobalSubrs
return fdArray
def write(self, parent, value):
return 0 # dummy value
def xmlRead(self, name, attrs, content, parent):
fdArray = FDArrayIndex()
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
fdArray.fromXML(name, attrs, content)
return fdArray
class FDSelectConverter(SimpleConverter):
def _read(self, parent, value):
file = parent.file
file.seek(value)
fdSelect = FDSelect(file, parent.numGlyphs)
return fdSelect
def write(self, parent, value):
return 0 # dummy value
# The FDSelect glyph data is written out to XML in the charstring keys,
# so we write out only the format selector
def xmlWrite(self, xmlWriter, name, value):
xmlWriter.simpletag(name, [("format", value.format)])
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
fmt = safeEval(attrs["format"])
file = None
numGlyphs = None
fdSelect = FDSelect(file, numGlyphs, fmt)
return fdSelect
class VarStoreConverter(SimpleConverter):
def _read(self, parent, value):
file = parent.file
file.seek(value)
varStore = VarStoreData(file)
varStore.decompile()
return varStore
def write(self, parent, value):
return 0 # dummy value
def xmlWrite(self, xmlWriter, name, value):
value.writeXML(xmlWriter, name)
def xmlRead(self, name, attrs, content, parent):
varStore = VarStoreData()
varStore.xmlRead(name, attrs, content, parent)
return varStore
def packFDSelect0(fdSelectArray):
fmt = 0
data = [packCard8(fmt)]
for index in fdSelectArray:
data.append(packCard8(index))
return bytesjoin(data)
def packFDSelect3(fdSelectArray):
fmt = 3
fdRanges = []
lenArray = len(fdSelectArray)
lastFDIndex = -1
for i in range(lenArray):
fdIndex = fdSelectArray[i]
if lastFDIndex != fdIndex:
fdRanges.append([i, fdIndex])
lastFDIndex = fdIndex
sentinelGID = i + 1
data = [packCard8(fmt)]
data.append(packCard16(len(fdRanges)))
for fdRange in fdRanges:
data.append(packCard16(fdRange[0]))
data.append(packCard8(fdRange[1]))
data.append(packCard16(sentinelGID))
return bytesjoin(data)
def packFDSelect4(fdSelectArray):
fmt = 4
fdRanges = []
lenArray = len(fdSelectArray)
lastFDIndex = -1
for i in range(lenArray):
fdIndex = fdSelectArray[i]
if lastFDIndex != fdIndex:
fdRanges.append([i, fdIndex])
lastFDIndex = fdIndex
sentinelGID = i + 1
data = [packCard8(fmt)]
data.append(packCard32(len(fdRanges)))
for fdRange in fdRanges:
data.append(packCard32(fdRange[0]))
data.append(packCard16(fdRange[1]))
data.append(packCard32(sentinelGID))
return bytesjoin(data)
class FDSelectCompiler(object):
def __init__(self, fdSelect, parent):
fmt = fdSelect.format
fdSelectArray = fdSelect.gidArray
if fmt == 0:
self.data = packFDSelect0(fdSelectArray)
elif fmt == 3:
self.data = packFDSelect3(fdSelectArray)
elif fmt == 4:
self.data = packFDSelect4(fdSelectArray)
else:
# choose smaller of the two formats
data0 = packFDSelect0(fdSelectArray)
data3 = packFDSelect3(fdSelectArray)
if len(data0) < len(data3):
self.data = data0
fdSelect.format = 0
else:
self.data = data3
fdSelect.format = 3
self.parent = parent
def setPos(self, pos, endPos):
self.parent.rawDict["FDSelect"] = pos
def getDataLength(self):
return len(self.data)
def toFile(self, file):
file.write(self.data)
class VarStoreCompiler(object):
def __init__(self, varStoreData, parent):
self.parent = parent
if not varStoreData.data:
varStoreData.compile()
data = [packCard16(len(varStoreData.data)), varStoreData.data]
self.data = bytesjoin(data)
def setPos(self, pos, endPos):
self.parent.rawDict["VarStore"] = pos
def getDataLength(self):
return len(self.data)
def toFile(self, file):
file.write(self.data)
class ROSConverter(SimpleConverter):
def xmlWrite(self, xmlWriter, name, value):
registry, order, supplement = value
xmlWriter.simpletag(
name,
[
("Registry", tostr(registry)),
("Order", tostr(order)),
("Supplement", supplement),
],
)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
return (attrs["Registry"], attrs["Order"], safeEval(attrs["Supplement"]))
topDictOperators = [
# opcode name argument type default converter
(25, "maxstack", "number", None, None),
((12, 30), "ROS", ("SID", "SID", "number"), None, ROSConverter()),
((12, 20), "SyntheticBase", "number", None, None),
(0, "version", "SID", None, None),
(1, "Notice", "SID", None, Latin1Converter()),
((12, 0), "Copyright", "SID", None, Latin1Converter()),
(2, "FullName", "SID", None, Latin1Converter()),
((12, 38), "FontName", "SID", None, Latin1Converter()),
(3, "FamilyName", "SID", None, Latin1Converter()),
(4, "Weight", "SID", None, None),
((12, 1), "isFixedPitch", "number", 0, None),
((12, 2), "ItalicAngle", "number", 0, None),
((12, 3), "UnderlinePosition", "number", -100, None),
((12, 4), "UnderlineThickness", "number", 50, None),
((12, 5), "PaintType", "number", 0, None),
((12, 6), "CharstringType", "number", 2, None),
((12, 7), "FontMatrix", "array", [0.001, 0, 0, 0.001, 0, 0], None),
(13, "UniqueID", "number", None, None),
(5, "FontBBox", "array", [0, 0, 0, 0], None),
((12, 8), "StrokeWidth", "number", 0, None),
(14, "XUID", "array", None, None),
((12, 21), "PostScript", "SID", None, None),
((12, 22), "BaseFontName", "SID", None, None),
((12, 23), "BaseFontBlend", "delta", None, None),
((12, 31), "CIDFontVersion", "number", 0, None),
((12, 32), "CIDFontRevision", "number", 0, None),
((12, 33), "CIDFontType", "number", 0, None),
((12, 34), "CIDCount", "number", 8720, None),
(15, "charset", "number", None, CharsetConverter()),
((12, 35), "UIDBase", "number", None, None),
(16, "Encoding", "number", 0, EncodingConverter()),
(18, "Private", ("number", "number"), None, PrivateDictConverter()),
((12, 37), "FDSelect", "number", None, FDSelectConverter()),
((12, 36), "FDArray", "number", None, FDArrayConverter()),
(17, "CharStrings", "number", None, CharStringsConverter()),
(24, "VarStore", "number", None, VarStoreConverter()),
]
topDictOperators2 = [
# opcode name argument type default converter
(25, "maxstack", "number", None, None),
((12, 7), "FontMatrix", "array", [0.001, 0, 0, 0.001, 0, 0], None),
((12, 37), "FDSelect", "number", None, FDSelectConverter()),
((12, 36), "FDArray", "number", None, FDArrayConverter()),
(17, "CharStrings", "number", None, CharStringsConverter()),
(24, "VarStore", "number", None, VarStoreConverter()),
]
# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order,
# in order for the font to compile back from xml.
kBlendDictOpName = "blend"
blendOp = 23
privateDictOperators = [
# opcode name argument type default converter
(22, "vsindex", "number", None, None),
(
blendOp,
kBlendDictOpName,
"blendList",
None,
None,
), # This is for reading to/from XML: it not written to CFF.
(6, "BlueValues", "delta", None, None),
(7, "OtherBlues", "delta", None, None),
(8, "FamilyBlues", "delta", None, None),
(9, "FamilyOtherBlues", "delta", None, None),
((12, 9), "BlueScale", "number", 0.039625, None),
((12, 10), "BlueShift", "number", 7, None),
((12, 11), "BlueFuzz", "number", 1, None),
(10, "StdHW", "number", None, None),
(11, "StdVW", "number", None, None),
((12, 12), "StemSnapH", "delta", None, None),
((12, 13), "StemSnapV", "delta", None, None),
((12, 14), "ForceBold", "number", 0, None),
((12, 15), "ForceBoldThreshold", "number", None, None), # deprecated
((12, 16), "lenIV", "number", None, None), # deprecated
((12, 17), "LanguageGroup", "number", 0, None),
((12, 18), "ExpansionFactor", "number", 0.06, None),
((12, 19), "initialRandomSeed", "number", 0, None),
(20, "defaultWidthX", "number", 0, None),
(21, "nominalWidthX", "number", 0, None),
(19, "Subrs", "number", None, SubrsConverter()),
]
privateDictOperators2 = [
# opcode name argument type default converter
(22, "vsindex", "number", None, None),
(
blendOp,
kBlendDictOpName,
"blendList",
None,
None,
), # This is for reading to/from XML: it not written to CFF.
(6, "BlueValues", "delta", None, None),
(7, "OtherBlues", "delta", None, None),
(8, "FamilyBlues", "delta", None, None),
(9, "FamilyOtherBlues", "delta", None, None),
((12, 9), "BlueScale", "number", 0.039625, None),
((12, 10), "BlueShift", "number", 7, None),
((12, 11), "BlueFuzz", "number", 1, None),
(10, "StdHW", "number", None, None),
(11, "StdVW", "number", None, None),
((12, 12), "StemSnapH", "delta", None, None),
((12, 13), "StemSnapV", "delta", None, None),
((12, 17), "LanguageGroup", "number", 0, None),
((12, 18), "ExpansionFactor", "number", 0.06, None),
(19, "Subrs", "number", None, SubrsConverter()),
]
def addConverters(table):
for i in range(len(table)):
op, name, arg, default, conv = table[i]
if conv is not None:
continue
if arg in ("delta", "array"):
conv = ArrayConverter()
elif arg == "number":
conv = NumberConverter()
elif arg == "SID":
conv = ASCIIConverter()
elif arg == "blendList":
conv = None
else:
assert False
table[i] = op, name, arg, default, conv
addConverters(privateDictOperators)
addConverters(topDictOperators)
class TopDictDecompiler(psCharStrings.DictDecompiler):
operators = buildOperatorDict(topDictOperators)
class PrivateDictDecompiler(psCharStrings.DictDecompiler):
operators = buildOperatorDict(privateDictOperators)
class DictCompiler(object):
maxBlendStack = 0
def __init__(self, dictObj, strings, parent, isCFF2=None):
if strings:
assert isinstance(strings, IndexedStrings)
if isCFF2 is None and hasattr(parent, "isCFF2"):
isCFF2 = parent.isCFF2
assert isCFF2 is not None
self.isCFF2 = isCFF2
self.dictObj = dictObj
self.strings = strings
self.parent = parent
rawDict = {}
for name in dictObj.order:
value = getattr(dictObj, name, None)
if value is None:
continue
conv = dictObj.converters[name]
value = conv.write(dictObj, value)
if value == dictObj.defaults.get(name):
continue
rawDict[name] = value
self.rawDict = rawDict
def setPos(self, pos, endPos):
pass
def getDataLength(self):
return len(self.compile("getDataLength"))
def compile(self, reason):
log.log(DEBUG, "-- compiling %s for %s", self.__class__.__name__, reason)
rawDict = self.rawDict
data = []
for name in self.dictObj.order:
value = rawDict.get(name)
if value is None:
continue
op, argType = self.opcodes[name]
if isinstance(argType, tuple):
l = len(argType)
assert len(value) == l, "value doesn't match arg type"
for i in range(l):
arg = argType[i]
v = value[i]
arghandler = getattr(self, "arg_" + arg)
data.append(arghandler(v))
else:
arghandler = getattr(self, "arg_" + argType)
data.append(arghandler(value))
data.append(op)
data = bytesjoin(data)
return data
def toFile(self, file):
data = self.compile("toFile")
file.write(data)
def arg_number(self, num):
if isinstance(num, list):
data = [encodeNumber(val) for val in num]
data.append(encodeNumber(1))
data.append(bytechr(blendOp))
datum = bytesjoin(data)
else:
datum = encodeNumber(num)
return datum
def arg_SID(self, s):
return psCharStrings.encodeIntCFF(self.strings.getSID(s))
def arg_array(self, value):
data = []
for num in value:
data.append(self.arg_number(num))
return bytesjoin(data)
def arg_delta(self, value):
if not value:
return b""
val0 = value[0]
if isinstance(val0, list):
data = self.arg_delta_blend(value)
else:
out = []
last = 0
for v in value:
out.append(v - last)
last = v
data = []
for num in out:
data.append(encodeNumber(num))
return bytesjoin(data)
def arg_delta_blend(self, value):
"""A delta list with blend lists has to be *all* blend lists.
The value is a list is arranged as follows::
[
[V0, d0..dn]
[V1, d0..dn]
...
[Vm, d0..dn]
]
``V`` is the absolute coordinate value from the default font, and ``d0-dn``
are the delta values from the *n* regions. Each ``V`` is an absolute
coordinate from the default font.
We want to return a list::
[
[v0, v1..vm]
[d0..dn]
...
[d0..dn]
numBlends
blendOp
]
where each ``v`` is relative to the previous default font value.
"""
numMasters = len(value[0])
numBlends = len(value)
numStack = (numBlends * numMasters) + 1
if numStack > self.maxBlendStack:
# Figure out the max number of value we can blend
# and divide this list up into chunks of that size.
numBlendValues = int((self.maxBlendStack - 1) / numMasters)
out = []
while True:
numVal = min(len(value), numBlendValues)
if numVal == 0:
break
valList = value[0:numVal]
out1 = self.arg_delta_blend(valList)
out.extend(out1)
value = value[numVal:]
else:
firstList = [0] * numBlends
deltaList = [None] * numBlends
i = 0
prevVal = 0
while i < numBlends:
# For PrivateDict BlueValues, the default font
# values are absolute, not relative.
# Must convert these back to relative coordinates
# befor writing to CFF2.
defaultValue = value[i][0]
firstList[i] = defaultValue - prevVal
prevVal = defaultValue
deltaList[i] = value[i][1:]
i += 1
relValueList = firstList
for blendList in deltaList:
relValueList.extend(blendList)
out = [encodeNumber(val) for val in relValueList]
out.append(encodeNumber(numBlends))
out.append(bytechr(blendOp))
return out
def encodeNumber(num):
if isinstance(num, float):
return psCharStrings.encodeFloat(num)
else:
return psCharStrings.encodeIntCFF(num)
class TopDictCompiler(DictCompiler):
opcodes = buildOpcodeDict(topDictOperators)
def getChildren(self, strings):
isCFF2 = self.isCFF2
children = []
if self.dictObj.cff2GetGlyphOrder is None:
if hasattr(self.dictObj, "charset") and self.dictObj.charset:
if hasattr(self.dictObj, "ROS"): # aka isCID
charsetCode = None
else:
charsetCode = getStdCharSet(self.dictObj.charset)
if charsetCode is None:
children.append(
CharsetCompiler(strings, self.dictObj.charset, self)
)
else:
self.rawDict["charset"] = charsetCode
if hasattr(self.dictObj, "Encoding") and self.dictObj.Encoding:
encoding = self.dictObj.Encoding
if not isinstance(encoding, str):
children.append(EncodingCompiler(strings, encoding, self))
else:
if hasattr(self.dictObj, "VarStore"):
varStoreData = self.dictObj.VarStore
varStoreComp = VarStoreCompiler(varStoreData, self)
children.append(varStoreComp)
if hasattr(self.dictObj, "FDSelect"):
# I have not yet supported merging a ttx CFF-CID font, as there are
# interesting issues about merging the FDArrays. Here I assume that
# either the font was read from XML, and the FDSelect indices are all
# in the charstring data, or the FDSelect array is already fully defined.
fdSelect = self.dictObj.FDSelect
# probably read in from XML; assume fdIndex in CharString data
if len(fdSelect) == 0:
charStrings = self.dictObj.CharStrings
for name in self.dictObj.charset:
fdSelect.append(charStrings[name].fdSelectIndex)
fdSelectComp = FDSelectCompiler(fdSelect, self)
children.append(fdSelectComp)
if hasattr(self.dictObj, "CharStrings"):
items = []
charStrings = self.dictObj.CharStrings
for name in self.dictObj.charset:
items.append(charStrings[name])
charStringsComp = CharStringsCompiler(items, strings, self, isCFF2=isCFF2)
children.append(charStringsComp)
if hasattr(self.dictObj, "FDArray"):
# I have not yet supported merging a ttx CFF-CID font, as there are
# interesting issues about merging the FDArrays. Here I assume that the
# FDArray info is correct and complete.
fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self)
children.append(fdArrayIndexComp)
children.extend(fdArrayIndexComp.getChildren(strings))
if hasattr(self.dictObj, "Private"):
privComp = self.dictObj.Private.getCompiler(strings, self)
children.append(privComp)
children.extend(privComp.getChildren(strings))
return children
class FontDictCompiler(DictCompiler):
opcodes = buildOpcodeDict(topDictOperators)
def __init__(self, dictObj, strings, parent, isCFF2=None):
super(FontDictCompiler, self).__init__(dictObj, strings, parent, isCFF2=isCFF2)
#
# We now take some effort to detect if there were any key/value pairs
# supplied that were ignored in the FontDict context, and issue a warning
# for those cases.
#
ignoredNames = []
dictObj = self.dictObj
for name in sorted(set(dictObj.converters) - set(dictObj.order)):
if name in dictObj.rawDict:
# The font was directly read from binary. In this
# case, we want to report *all* "useless" key/value
# pairs that are in the font, not just the ones that
# are different from the default.
ignoredNames.append(name)
else:
# The font was probably read from a TTX file. We only
# warn about keys whos value is not the default. The
# ones that have the default value will not be written
# to binary anyway.
default = dictObj.defaults.get(name)
if default is not None:
conv = dictObj.converters[name]
default = conv.read(dictObj, default)
if getattr(dictObj, name, None) != default:
ignoredNames.append(name)
if ignoredNames:
log.warning(
"Some CFF FDArray/FontDict keys were ignored upon compile: "
+ " ".join(sorted(ignoredNames))
)
def getChildren(self, strings):
children = []
if hasattr(self.dictObj, "Private"):
privComp = self.dictObj.Private.getCompiler(strings, self)
children.append(privComp)
children.extend(privComp.getChildren(strings))
return children
class PrivateDictCompiler(DictCompiler):
maxBlendStack = maxStackLimit
opcodes = buildOpcodeDict(privateDictOperators)
def setPos(self, pos, endPos):
size = endPos - pos
self.parent.rawDict["Private"] = size, pos
self.pos = pos
def getChildren(self, strings):
children = []
if hasattr(self.dictObj, "Subrs"):
children.append(self.dictObj.Subrs.getCompiler(strings, self))
return children
class BaseDict(object):
def __init__(self, strings=None, file=None, offset=None, isCFF2=None):
assert (isCFF2 is None) == (file is None)
self.rawDict = {}
self.skipNames = []
self.strings = strings
if file is None:
return
self._isCFF2 = isCFF2
self.file = file
if offset is not None:
log.log(DEBUG, "loading %s at %s", self.__class__.__name__, offset)
self.offset = offset
def decompile(self, data):
log.log(DEBUG, " length %s is %d", self.__class__.__name__, len(data))
dec = self.decompilerClass(self.strings, self)
dec.decompile(data)
self.rawDict = dec.getDict()
self.postDecompile()
def postDecompile(self):
pass
def getCompiler(self, strings, parent, isCFF2=None):
return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
def __getattr__(self, name):
if name[:2] == name[-2:] == "__":
# to make deepcopy() and pickle.load() work, we need to signal with
# AttributeError that dunder methods like '__deepcopy__' or '__getstate__'
# aren't implemented. For more details, see:
# https://github.com/fonttools/fonttools/pull/1488
raise AttributeError(name)
value = self.rawDict.get(name, None)
if value is None:
value = self.defaults.get(name)
if value is None:
raise AttributeError(name)
conv = self.converters[name]
value = conv.read(self, value)
setattr(self, name, value)
return value
def toXML(self, xmlWriter):
for name in self.order:
if name in self.skipNames:
continue
value = getattr(self, name, None)
# XXX For "charset" we never skip calling xmlWrite even if the
# value is None, so we always write the following XML comment:
#
# <!-- charset is dumped separately as the 'GlyphOrder' element -->
#
# Charset is None when 'CFF ' table is imported from XML into an
# empty TTFont(). By writing this comment all the time, we obtain
# the same XML output whether roundtripping XML-to-XML or
# dumping binary-to-XML
if value is None and name != "charset":
continue
conv = self.converters[name]
conv.xmlWrite(xmlWriter, name, value)
ignoredNames = set(self.rawDict) - set(self.order)
if ignoredNames:
xmlWriter.comment(
"some keys were ignored: %s" % " ".join(sorted(ignoredNames))
)
xmlWriter.newline()
def fromXML(self, name, attrs, content):
conv = self.converters[name]
value = conv.xmlRead(name, attrs, content, self)
setattr(self, name, value)
class TopDict(BaseDict):
"""The ``TopDict`` represents the top-level dictionary holding font
information. CFF2 tables contain a restricted set of top-level entries
as described `here <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#7-top-dict-data>`_,
but CFF tables may contain a wider range of information. This information
can be accessed through attributes or through the dictionary returned
through the ``rawDict`` property:
.. code:: python
font = tt["CFF "].cff[0]
font.FamilyName
# 'Linux Libertine O'
font.rawDict["FamilyName"]
# 'Linux Libertine O'
More information is available in the CFF file's private dictionary, accessed
via the ``Private`` property:
.. code:: python
tt["CFF "].cff[0].Private.BlueValues
# [-15, 0, 515, 515, 666, 666]
"""
defaults = buildDefaults(topDictOperators)
converters = buildConverters(topDictOperators)
compilerClass = TopDictCompiler
order = buildOrder(topDictOperators)
decompilerClass = TopDictDecompiler
def __init__(
self,
strings=None,
file=None,
offset=None,
GlobalSubrs=None,
cff2GetGlyphOrder=None,
isCFF2=None,
):
super(TopDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
self.cff2GetGlyphOrder = cff2GetGlyphOrder
self.GlobalSubrs = GlobalSubrs
if isCFF2:
self.defaults = buildDefaults(topDictOperators2)
self.charset = cff2GetGlyphOrder()
self.order = buildOrder(topDictOperators2)
else:
self.defaults = buildDefaults(topDictOperators)
self.order = buildOrder(topDictOperators)
def getGlyphOrder(self):
"""Returns a list of glyph names in the CFF font."""
return self.charset
def postDecompile(self):
offset = self.rawDict.get("CharStrings")
if offset is None:
return
# get the number of glyphs beforehand.
self.file.seek(offset)
if self._isCFF2:
self.numGlyphs = readCard32(self.file)
else:
self.numGlyphs = readCard16(self.file)
def toXML(self, xmlWriter):
if hasattr(self, "CharStrings"):
self.decompileAllCharStrings()
if hasattr(self, "ROS"):
self.skipNames = ["Encoding"]
if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"):
# these values have default values, but I only want them to show up
# in CID fonts.
self.skipNames = [
"CIDFontVersion",
"CIDFontRevision",
"CIDFontType",
"CIDCount",
]
BaseDict.toXML(self, xmlWriter)
def decompileAllCharStrings(self):
# Make sure that all the Private Dicts have been instantiated.
for i, charString in enumerate(self.CharStrings.values()):
try:
charString.decompile()
except:
log.error("Error in charstring %s", i)
raise
def recalcFontBBox(self):
fontBBox = None
for charString in self.CharStrings.values():
bounds = charString.calcBounds(self.CharStrings)
if bounds is not None:
if fontBBox is not None:
fontBBox = unionRect(fontBBox, bounds)
else:
fontBBox = bounds
if fontBBox is None:
self.FontBBox = self.defaults["FontBBox"][:]
else:
self.FontBBox = list(intRect(fontBBox))
class FontDict(BaseDict):
#
# Since fonttools used to pass a lot of fields that are not relevant in the FDArray
# FontDict, there are 'ttx' files in the wild that contain all these. These got in
# the ttx files because fonttools writes explicit values for all the TopDict default
# values. These are not actually illegal in the context of an FDArray FontDict - you
# can legally, per spec, put any arbitrary key/value pair in a FontDict - but are
# useless since current major company CFF interpreters ignore anything but the set
# listed in this file. So, we just silently skip them. An exception is Weight: this
# is not used by any interpreter, but some foundries have asked that this be
# supported in FDArray FontDicts just to preserve information about the design when
# the font is being inspected.
#
# On top of that, there are fonts out there that contain such useless FontDict values.
#
# By subclassing TopDict, we *allow* all key/values from TopDict, both when reading
# from binary or when reading from XML, but by overriding `order` with a limited
# list of names, we ensure that only the useful names ever get exported to XML and
# ever get compiled into the binary font.
#
# We override compilerClass so we can warn about "useless" key/value pairs, either
# from the original binary font or from TTX input.
#
# See:
# - https://github.com/fonttools/fonttools/issues/740
# - https://github.com/fonttools/fonttools/issues/601
# - https://github.com/adobe-type-tools/afdko/issues/137
#
defaults = {}
converters = buildConverters(topDictOperators)
compilerClass = FontDictCompiler
orderCFF = ["FontName", "FontMatrix", "Weight", "Private"]
orderCFF2 = ["Private"]
decompilerClass = TopDictDecompiler
def __init__(
self,
strings=None,
file=None,
offset=None,
GlobalSubrs=None,
isCFF2=None,
vstore=None,
):
super(FontDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
self.vstore = vstore
self.setCFF2(isCFF2)
def setCFF2(self, isCFF2):
# isCFF2 may be None.
if isCFF2:
self.order = self.orderCFF2
self._isCFF2 = True
else:
self.order = self.orderCFF
self._isCFF2 = False
class PrivateDict(BaseDict):
defaults = buildDefaults(privateDictOperators)
converters = buildConverters(privateDictOperators)
order = buildOrder(privateDictOperators)
decompilerClass = PrivateDictDecompiler
compilerClass = PrivateDictCompiler
def __init__(self, strings=None, file=None, offset=None, isCFF2=None, vstore=None):
super(PrivateDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
self.vstore = vstore
if isCFF2:
self.defaults = buildDefaults(privateDictOperators2)
self.order = buildOrder(privateDictOperators2)
# Provide dummy values. This avoids needing to provide
# an isCFF2 state in a lot of places.
self.nominalWidthX = self.defaultWidthX = None
else:
self.defaults = buildDefaults(privateDictOperators)
self.order = buildOrder(privateDictOperators)
@property
def in_cff2(self):
return self._isCFF2
def getNumRegions(self, vi=None): # called from misc/psCharStrings.py
# if getNumRegions is being called, we can assume that VarStore exists.
if vi is None:
if hasattr(self, "vsindex"):
vi = self.vsindex
else:
vi = 0
numRegions = self.vstore.getNumRegions(vi)
return numRegions
class IndexedStrings(object):
"""SID -> string mapping."""
def __init__(self, file=None):
if file is None:
strings = []
else:
strings = [tostr(s, encoding="latin1") for s in Index(file, isCFF2=False)]
self.strings = strings
def getCompiler(self):
return IndexedStringsCompiler(self, None, self, isCFF2=False)
def __len__(self):
return len(self.strings)
def __getitem__(self, SID):
if SID < cffStandardStringCount:
return cffStandardStrings[SID]
else:
return self.strings[SID - cffStandardStringCount]
def getSID(self, s):
if not hasattr(self, "stringMapping"):
self.buildStringMapping()
s = tostr(s, encoding="latin1")
if s in cffStandardStringMapping:
SID = cffStandardStringMapping[s]
elif s in self.stringMapping:
SID = self.stringMapping[s]
else:
SID = len(self.strings) + cffStandardStringCount
self.strings.append(s)
self.stringMapping[s] = SID
return SID
def getStrings(self):
return self.strings
def buildStringMapping(self):
self.stringMapping = {}
for index in range(len(self.strings)):
self.stringMapping[self.strings[index]] = index + cffStandardStringCount
# The 391 Standard Strings as used in the CFF format.
# from Adobe Technical None #5176, version 1.0, 18 March 1998
cffStandardStrings = [
".notdef",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quoteright",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"quoteleft",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"exclamdown",
"cent",
"sterling",
"fraction",
"yen",
"florin",
"section",
"currency",
"quotesingle",
"quotedblleft",
"guillemotleft",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"endash",
"dagger",
"daggerdbl",
"periodcentered",
"paragraph",
"bullet",
"quotesinglbase",
"quotedblbase",
"quotedblright",
"guillemotright",
"ellipsis",
"perthousand",
"questiondown",
"grave",
"acute",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"dieresis",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
"emdash",
"AE",
"ordfeminine",
"Lslash",
"Oslash",
"OE",
"ordmasculine",
"ae",
"dotlessi",
"lslash",
"oslash",
"oe",
"germandbls",
"onesuperior",
"logicalnot",
"mu",
"trademark",
"Eth",
"onehalf",
"plusminus",
"Thorn",
"onequarter",
"divide",
"brokenbar",
"degree",
"thorn",
"threequarters",
"twosuperior",
"registered",
"minus",
"eth",
"multiply",
"threesuperior",
"copyright",
"Aacute",
"Acircumflex",
"Adieresis",
"Agrave",
"Aring",
"Atilde",
"Ccedilla",
"Eacute",
"Ecircumflex",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Ntilde",
"Oacute",
"Ocircumflex",
"Odieresis",
"Ograve",
"Otilde",
"Scaron",
"Uacute",
"Ucircumflex",
"Udieresis",
"Ugrave",
"Yacute",
"Ydieresis",
"Zcaron",
"aacute",
"acircumflex",
"adieresis",
"agrave",
"aring",
"atilde",
"ccedilla",
"eacute",
"ecircumflex",
"edieresis",
"egrave",
"iacute",
"icircumflex",
"idieresis",
"igrave",
"ntilde",
"oacute",
"ocircumflex",
"odieresis",
"ograve",
"otilde",
"scaron",
"uacute",
"ucircumflex",
"udieresis",
"ugrave",
"yacute",
"ydieresis",
"zcaron",
"exclamsmall",
"Hungarumlautsmall",
"dollaroldstyle",
"dollarsuperior",
"ampersandsmall",
"Acutesmall",
"parenleftsuperior",
"parenrightsuperior",
"twodotenleader",
"onedotenleader",
"zerooldstyle",
"oneoldstyle",
"twooldstyle",
"threeoldstyle",
"fouroldstyle",
"fiveoldstyle",
"sixoldstyle",
"sevenoldstyle",
"eightoldstyle",
"nineoldstyle",
"commasuperior",
"threequartersemdash",
"periodsuperior",
"questionsmall",
"asuperior",
"bsuperior",
"centsuperior",
"dsuperior",
"esuperior",
"isuperior",
"lsuperior",
"msuperior",
"nsuperior",
"osuperior",
"rsuperior",
"ssuperior",
"tsuperior",
"ff",
"ffi",
"ffl",
"parenleftinferior",
"parenrightinferior",
"Circumflexsmall",
"hyphensuperior",
"Gravesmall",
"Asmall",
"Bsmall",
"Csmall",
"Dsmall",
"Esmall",
"Fsmall",
"Gsmall",
"Hsmall",
"Ismall",
"Jsmall",
"Ksmall",
"Lsmall",
"Msmall",
"Nsmall",
"Osmall",
"Psmall",
"Qsmall",
"Rsmall",
"Ssmall",
"Tsmall",
"Usmall",
"Vsmall",
"Wsmall",
"Xsmall",
"Ysmall",
"Zsmall",
"colonmonetary",
"onefitted",
"rupiah",
"Tildesmall",
"exclamdownsmall",
"centoldstyle",
"Lslashsmall",
"Scaronsmall",
"Zcaronsmall",
"Dieresissmall",
"Brevesmall",
"Caronsmall",
"Dotaccentsmall",
"Macronsmall",
"figuredash",
"hypheninferior",
"Ogoneksmall",
"Ringsmall",
"Cedillasmall",
"questiondownsmall",
"oneeighth",
"threeeighths",
"fiveeighths",
"seveneighths",
"onethird",
"twothirds",
"zerosuperior",
"foursuperior",
"fivesuperior",
"sixsuperior",
"sevensuperior",
"eightsuperior",
"ninesuperior",
"zeroinferior",
"oneinferior",
"twoinferior",
"threeinferior",
"fourinferior",
"fiveinferior",
"sixinferior",
"seveninferior",
"eightinferior",
"nineinferior",
"centinferior",
"dollarinferior",
"periodinferior",
"commainferior",
"Agravesmall",
"Aacutesmall",
"Acircumflexsmall",
"Atildesmall",
"Adieresissmall",
"Aringsmall",
"AEsmall",
"Ccedillasmall",
"Egravesmall",
"Eacutesmall",
"Ecircumflexsmall",
"Edieresissmall",
"Igravesmall",
"Iacutesmall",
"Icircumflexsmall",
"Idieresissmall",
"Ethsmall",
"Ntildesmall",
"Ogravesmall",
"Oacutesmall",
"Ocircumflexsmall",
"Otildesmall",
"Odieresissmall",
"OEsmall",
"Oslashsmall",
"Ugravesmall",
"Uacutesmall",
"Ucircumflexsmall",
"Udieresissmall",
"Yacutesmall",
"Thornsmall",
"Ydieresissmall",
"001.000",
"001.001",
"001.002",
"001.003",
"Black",
"Bold",
"Book",
"Light",
"Medium",
"Regular",
"Roman",
"Semibold",
]
cffStandardStringCount = 391
assert len(cffStandardStrings) == cffStandardStringCount
# build reverse mapping
cffStandardStringMapping = {}
for _i in range(cffStandardStringCount):
cffStandardStringMapping[cffStandardStrings[_i]] = _i
cffISOAdobeStrings = [
".notdef",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quoteright",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"quoteleft",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"exclamdown",
"cent",
"sterling",
"fraction",
"yen",
"florin",
"section",
"currency",
"quotesingle",
"quotedblleft",
"guillemotleft",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"endash",
"dagger",
"daggerdbl",
"periodcentered",
"paragraph",
"bullet",
"quotesinglbase",
"quotedblbase",
"quotedblright",
"guillemotright",
"ellipsis",
"perthousand",
"questiondown",
"grave",
"acute",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"dieresis",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
"emdash",
"AE",
"ordfeminine",
"Lslash",
"Oslash",
"OE",
"ordmasculine",
"ae",
"dotlessi",
"lslash",
"oslash",
"oe",
"germandbls",
"onesuperior",
"logicalnot",
"mu",
"trademark",
"Eth",
"onehalf",
"plusminus",
"Thorn",
"onequarter",
"divide",
"brokenbar",
"degree",
"thorn",
"threequarters",
"twosuperior",
"registered",
"minus",
"eth",
"multiply",
"threesuperior",
"copyright",
"Aacute",
"Acircumflex",
"Adieresis",
"Agrave",
"Aring",
"Atilde",
"Ccedilla",
"Eacute",
"Ecircumflex",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Ntilde",
"Oacute",
"Ocircumflex",
"Odieresis",
"Ograve",
"Otilde",
"Scaron",
"Uacute",
"Ucircumflex",
"Udieresis",
"Ugrave",
"Yacute",
"Ydieresis",
"Zcaron",
"aacute",
"acircumflex",
"adieresis",
"agrave",
"aring",
"atilde",
"ccedilla",
"eacute",
"ecircumflex",
"edieresis",
"egrave",
"iacute",
"icircumflex",
"idieresis",
"igrave",
"ntilde",
"oacute",
"ocircumflex",
"odieresis",
"ograve",
"otilde",
"scaron",
"uacute",
"ucircumflex",
"udieresis",
"ugrave",
"yacute",
"ydieresis",
"zcaron",
]
cffISOAdobeStringCount = 229
assert len(cffISOAdobeStrings) == cffISOAdobeStringCount
cffIExpertStrings = [
".notdef",
"space",
"exclamsmall",
"Hungarumlautsmall",
"dollaroldstyle",
"dollarsuperior",
"ampersandsmall",
"Acutesmall",
"parenleftsuperior",
"parenrightsuperior",
"twodotenleader",
"onedotenleader",
"comma",
"hyphen",
"period",
"fraction",
"zerooldstyle",
"oneoldstyle",
"twooldstyle",
"threeoldstyle",
"fouroldstyle",
"fiveoldstyle",
"sixoldstyle",
"sevenoldstyle",
"eightoldstyle",
"nineoldstyle",
"colon",
"semicolon",
"commasuperior",
"threequartersemdash",
"periodsuperior",
"questionsmall",
"asuperior",
"bsuperior",
"centsuperior",
"dsuperior",
"esuperior",
"isuperior",
"lsuperior",
"msuperior",
"nsuperior",
"osuperior",
"rsuperior",
"ssuperior",
"tsuperior",
"ff",
"fi",
"fl",
"ffi",
"ffl",
"parenleftinferior",
"parenrightinferior",
"Circumflexsmall",
"hyphensuperior",
"Gravesmall",
"Asmall",
"Bsmall",
"Csmall",
"Dsmall",
"Esmall",
"Fsmall",
"Gsmall",
"Hsmall",
"Ismall",
"Jsmall",
"Ksmall",
"Lsmall",
"Msmall",
"Nsmall",
"Osmall",
"Psmall",
"Qsmall",
"Rsmall",
"Ssmall",
"Tsmall",
"Usmall",
"Vsmall",
"Wsmall",
"Xsmall",
"Ysmall",
"Zsmall",
"colonmonetary",
"onefitted",
"rupiah",
"Tildesmall",
"exclamdownsmall",
"centoldstyle",
"Lslashsmall",
"Scaronsmall",
"Zcaronsmall",
"Dieresissmall",
"Brevesmall",
"Caronsmall",
"Dotaccentsmall",
"Macronsmall",
"figuredash",
"hypheninferior",
"Ogoneksmall",
"Ringsmall",
"Cedillasmall",
"onequarter",
"onehalf",
"threequarters",
"questiondownsmall",
"oneeighth",
"threeeighths",
"fiveeighths",
"seveneighths",
"onethird",
"twothirds",
"zerosuperior",
"onesuperior",
"twosuperior",
"threesuperior",
"foursuperior",
"fivesuperior",
"sixsuperior",
"sevensuperior",
"eightsuperior",
"ninesuperior",
"zeroinferior",
"oneinferior",
"twoinferior",
"threeinferior",
"fourinferior",
"fiveinferior",
"sixinferior",
"seveninferior",
"eightinferior",
"nineinferior",
"centinferior",
"dollarinferior",
"periodinferior",
"commainferior",
"Agravesmall",
"Aacutesmall",
"Acircumflexsmall",
"Atildesmall",
"Adieresissmall",
"Aringsmall",
"AEsmall",
"Ccedillasmall",
"Egravesmall",
"Eacutesmall",
"Ecircumflexsmall",
"Edieresissmall",
"Igravesmall",
"Iacutesmall",
"Icircumflexsmall",
"Idieresissmall",
"Ethsmall",
"Ntildesmall",
"Ogravesmall",
"Oacutesmall",
"Ocircumflexsmall",
"Otildesmall",
"Odieresissmall",
"OEsmall",
"Oslashsmall",
"Ugravesmall",
"Uacutesmall",
"Ucircumflexsmall",
"Udieresissmall",
"Yacutesmall",
"Thornsmall",
"Ydieresissmall",
]
cffExpertStringCount = 166
assert len(cffIExpertStrings) == cffExpertStringCount
cffExpertSubsetStrings = [
".notdef",
"space",
"dollaroldstyle",
"dollarsuperior",
"parenleftsuperior",
"parenrightsuperior",
"twodotenleader",
"onedotenleader",
"comma",
"hyphen",
"period",
"fraction",
"zerooldstyle",
"oneoldstyle",
"twooldstyle",
"threeoldstyle",
"fouroldstyle",
"fiveoldstyle",
"sixoldstyle",
"sevenoldstyle",
"eightoldstyle",
"nineoldstyle",
"colon",
"semicolon",
"commasuperior",
"threequartersemdash",
"periodsuperior",
"asuperior",
"bsuperior",
"centsuperior",
"dsuperior",
"esuperior",
"isuperior",
"lsuperior",
"msuperior",
"nsuperior",
"osuperior",
"rsuperior",
"ssuperior",
"tsuperior",
"ff",
"fi",
"fl",
"ffi",
"ffl",
"parenleftinferior",
"parenrightinferior",
"hyphensuperior",
"colonmonetary",
"onefitted",
"rupiah",
"centoldstyle",
"figuredash",
"hypheninferior",
"onequarter",
"onehalf",
"threequarters",
"oneeighth",
"threeeighths",
"fiveeighths",
"seveneighths",
"onethird",
"twothirds",
"zerosuperior",
"onesuperior",
"twosuperior",
"threesuperior",
"foursuperior",
"fivesuperior",
"sixsuperior",
"sevensuperior",
"eightsuperior",
"ninesuperior",
"zeroinferior",
"oneinferior",
"twoinferior",
"threeinferior",
"fourinferior",
"fiveinferior",
"sixinferior",
"seveninferior",
"eightinferior",
"nineinferior",
"centinferior",
"dollarinferior",
"periodinferior",
"commainferior",
]
cffExpertSubsetStringCount = 87
assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount
PK aZZZߟ'w�w �w fontTools/cffLib/specializer.py# -*- coding: utf-8 -*-
"""T2CharString operator specializer and generalizer.
PostScript glyph drawing operations can be expressed in multiple different
ways. For example, as well as the ``lineto`` operator, there is also a
``hlineto`` operator which draws a horizontal line, removing the need to
specify a ``dx`` coordinate, and a ``vlineto`` operator which draws a
vertical line, removing the need to specify a ``dy`` coordinate. As well
as decompiling :class:`fontTools.misc.psCharStrings.T2CharString` objects
into lists of operations, this module allows for conversion between general
and specific forms of the operation.
"""
from fontTools.cffLib import maxStackLimit
def stringToProgram(string):
if isinstance(string, str):
string = string.split()
program = []
for token in string:
try:
token = int(token)
except ValueError:
try:
token = float(token)
except ValueError:
pass
program.append(token)
return program
def programToString(program):
return " ".join(str(x) for x in program)
def programToCommands(program, getNumRegions=None):
"""Takes a T2CharString program list and returns list of commands.
Each command is a two-tuple of commandname,arg-list. The commandname might
be empty string if no commandname shall be emitted (used for glyph width,
hintmask/cntrmask argument, as well as stray arguments at the end of the
program (🤷).
'getNumRegions' may be None, or a callable object. It must return the
number of regions. 'getNumRegions' takes a single argument, vsindex. If
the vsindex argument is None, getNumRegions returns the default number
of regions for the charstring, else it returns the numRegions for
the vsindex.
The Charstring may or may not start with a width value. If the first
non-blend operator has an odd number of arguments, then the first argument is
a width, and is popped off. This is complicated with blend operators, as
there may be more than one before the first hint or moveto operator, and each
one reduces several arguments to just one list argument. We have to sum the
number of arguments that are not part of the blend arguments, and all the
'numBlends' values. We could instead have said that by definition, if there
is a blend operator, there is no width value, since CFF2 Charstrings don't
have width values. I discussed this with Behdad, and we are allowing for an
initial width value in this case because developers may assemble a CFF2
charstring from CFF Charstrings, which could have width values.
"""
seenWidthOp = False
vsIndex = None
lenBlendStack = 0
lastBlendIndex = 0
commands = []
stack = []
it = iter(program)
for token in it:
if not isinstance(token, str):
stack.append(token)
continue
if token == "blend":
assert getNumRegions is not None
numSourceFonts = 1 + getNumRegions(vsIndex)
# replace the blend op args on the stack with a single list
# containing all the blend op args.
numBlends = stack[-1]
numBlendArgs = numBlends * numSourceFonts + 1
# replace first blend op by a list of the blend ops.
stack[-numBlendArgs:] = [stack[-numBlendArgs:]]
lenBlendStack += numBlends + len(stack) - 1
lastBlendIndex = len(stack)
# if a blend op exists, this is or will be a CFF2 charstring.
continue
elif token == "vsindex":
vsIndex = stack[-1]
assert type(vsIndex) is int
elif (not seenWidthOp) and token in {
"hstem",
"hstemhm",
"vstem",
"vstemhm",
"cntrmask",
"hintmask",
"hmoveto",
"vmoveto",
"rmoveto",
"endchar",
}:
seenWidthOp = True
parity = token in {"hmoveto", "vmoveto"}
if lenBlendStack:
# lenBlendStack has the number of args represented by the last blend
# arg and all the preceding args. We need to now add the number of
# args following the last blend arg.
numArgs = lenBlendStack + len(stack[lastBlendIndex:])
else:
numArgs = len(stack)
if numArgs and (numArgs % 2) ^ parity:
width = stack.pop(0)
commands.append(("", [width]))
if token in {"hintmask", "cntrmask"}:
if stack:
commands.append(("", stack))
commands.append((token, []))
commands.append(("", [next(it)]))
else:
commands.append((token, stack))
stack = []
if stack:
commands.append(("", stack))
return commands
def _flattenBlendArgs(args):
token_list = []
for arg in args:
if isinstance(arg, list):
token_list.extend(arg)
token_list.append("blend")
else:
token_list.append(arg)
return token_list
def commandsToProgram(commands):
"""Takes a commands list as returned by programToCommands() and converts
it back to a T2CharString program list."""
program = []
for op, args in commands:
if any(isinstance(arg, list) for arg in args):
args = _flattenBlendArgs(args)
program.extend(args)
if op:
program.append(op)
return program
def _everyN(el, n):
"""Group the list el into groups of size n"""
if len(el) % n != 0:
raise ValueError(el)
for i in range(0, len(el), n):
yield el[i : i + n]
class _GeneralizerDecombinerCommandsMap(object):
@staticmethod
def rmoveto(args):
if len(args) != 2:
raise ValueError(args)
yield ("rmoveto", args)
@staticmethod
def hmoveto(args):
if len(args) != 1:
raise ValueError(args)
yield ("rmoveto", [args[0], 0])
@staticmethod
def vmoveto(args):
if len(args) != 1:
raise ValueError(args)
yield ("rmoveto", [0, args[0]])
@staticmethod
def rlineto(args):
if not args:
raise ValueError(args)
for args in _everyN(args, 2):
yield ("rlineto", args)
@staticmethod
def hlineto(args):
if not args:
raise ValueError(args)
it = iter(args)
try:
while True:
yield ("rlineto", [next(it), 0])
yield ("rlineto", [0, next(it)])
except StopIteration:
pass
@staticmethod
def vlineto(args):
if not args:
raise ValueError(args)
it = iter(args)
try:
while True:
yield ("rlineto", [0, next(it)])
yield ("rlineto", [next(it), 0])
except StopIteration:
pass
@staticmethod
def rrcurveto(args):
if not args:
raise ValueError(args)
for args in _everyN(args, 6):
yield ("rrcurveto", args)
@staticmethod
def hhcurveto(args):
if len(args) < 4 or len(args) % 4 > 1:
raise ValueError(args)
if len(args) % 2 == 1:
yield ("rrcurveto", [args[1], args[0], args[2], args[3], args[4], 0])
args = args[5:]
for args in _everyN(args, 4):
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[3], 0])
@staticmethod
def vvcurveto(args):
if len(args) < 4 or len(args) % 4 > 1:
raise ValueError(args)
if len(args) % 2 == 1:
yield ("rrcurveto", [args[0], args[1], args[2], args[3], 0, args[4]])
args = args[5:]
for args in _everyN(args, 4):
yield ("rrcurveto", [0, args[0], args[1], args[2], 0, args[3]])
@staticmethod
def hvcurveto(args):
if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}:
raise ValueError(args)
last_args = None
if len(args) % 2 == 1:
lastStraight = len(args) % 8 == 5
args, last_args = args[:-5], args[-5:]
it = _everyN(args, 4)
try:
while True:
args = next(it)
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
args = next(it)
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
except StopIteration:
pass
if last_args:
args = last_args
if lastStraight:
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
else:
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
@staticmethod
def vhcurveto(args):
if len(args) < 4 or len(args) % 8 not in {0, 1, 4, 5}:
raise ValueError(args)
last_args = None
if len(args) % 2 == 1:
lastStraight = len(args) % 8 == 5
args, last_args = args[:-5], args[-5:]
it = _everyN(args, 4)
try:
while True:
args = next(it)
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
args = next(it)
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
except StopIteration:
pass
if last_args:
args = last_args
if lastStraight:
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
else:
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
@staticmethod
def rcurveline(args):
if len(args) < 8 or len(args) % 6 != 2:
raise ValueError(args)
args, last_args = args[:-2], args[-2:]
for args in _everyN(args, 6):
yield ("rrcurveto", args)
yield ("rlineto", last_args)
@staticmethod
def rlinecurve(args):
if len(args) < 8 or len(args) % 2 != 0:
raise ValueError(args)
args, last_args = args[:-6], args[-6:]
for args in _everyN(args, 2):
yield ("rlineto", args)
yield ("rrcurveto", last_args)
def _convertBlendOpToArgs(blendList):
# args is list of blend op args. Since we are supporting
# recursive blend op calls, some of these args may also
# be a list of blend op args, and need to be converted before
# we convert the current list.
if any([isinstance(arg, list) for arg in blendList]):
args = [
i
for e in blendList
for i in (_convertBlendOpToArgs(e) if isinstance(e, list) else [e])
]
else:
args = blendList
# We now know that blendList contains a blend op argument list, even if
# some of the args are lists that each contain a blend op argument list.
# Convert from:
# [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn]
# to:
# [ [x0] + [delta tuple for x0],
# ...,
# [xn] + [delta tuple for xn] ]
numBlends = args[-1]
# Can't use args.pop() when the args are being used in a nested list
# comprehension. See calling context
args = args[:-1]
numRegions = len(args) // numBlends - 1
if not (numBlends * (numRegions + 1) == len(args)):
raise ValueError(blendList)
defaultArgs = [[arg] for arg in args[:numBlends]]
deltaArgs = args[numBlends:]
numDeltaValues = len(deltaArgs)
deltaList = [
deltaArgs[i : i + numRegions] for i in range(0, numDeltaValues, numRegions)
]
blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)]
return blend_args
def generalizeCommands(commands, ignoreErrors=False):
result = []
mapping = _GeneralizerDecombinerCommandsMap
for op, args in commands:
# First, generalize any blend args in the arg list.
if any([isinstance(arg, list) for arg in args]):
try:
args = [
n
for arg in args
for n in (
_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg]
)
]
except ValueError:
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(("", args))
result.append(("", [op]))
else:
raise
func = getattr(mapping, op, None)
if not func:
result.append((op, args))
continue
try:
for command in func(args):
result.append(command)
except ValueError:
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(("", args))
result.append(("", [op]))
else:
raise
return result
def generalizeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(
generalizeCommands(programToCommands(program, getNumRegions), **kwargs)
)
def _categorizeVector(v):
"""
Takes X,Y vector v and returns one of r, h, v, or 0 depending on which
of X and/or Y are zero, plus tuple of nonzero ones. If both are zero,
it returns a single zero still.
>>> _categorizeVector((0,0))
('0', (0,))
>>> _categorizeVector((1,0))
('h', (1,))
>>> _categorizeVector((0,2))
('v', (2,))
>>> _categorizeVector((1,2))
('r', (1, 2))
"""
if not v[0]:
if not v[1]:
return "0", v[:1]
else:
return "v", v[1:]
else:
if not v[1]:
return "h", v[:1]
else:
return "r", v
def _mergeCategories(a, b):
if a == "0":
return b
if b == "0":
return a
if a == b:
return a
return None
def _negateCategory(a):
if a == "h":
return "v"
if a == "v":
return "h"
assert a in "0r"
return a
def _convertToBlendCmds(args):
# return a list of blend commands, and
# the remaining non-blended args, if any.
num_args = len(args)
stack_use = 0
new_args = []
i = 0
while i < num_args:
arg = args[i]
if not isinstance(arg, list):
new_args.append(arg)
i += 1
stack_use += 1
else:
prev_stack_use = stack_use
# The arg is a tuple of blend values.
# These are each (master 0,delta 1..delta n, 1)
# Combine as many successive tuples as we can,
# up to the max stack limit.
num_sources = len(arg) - 1
blendlist = [arg]
i += 1
stack_use += 1 + num_sources # 1 for the num_blends arg
while (i < num_args) and isinstance(args[i], list):
blendlist.append(args[i])
i += 1
stack_use += num_sources
if stack_use + num_sources > maxStackLimit:
# if we are here, max stack is the CFF2 max stack.
# I use the CFF2 max stack limit here rather than
# the 'maxstack' chosen by the client, as the default
# maxstack may have been used unintentionally. For all
# the other operators, this just produces a little less
# optimization, but here it puts a hard (and low) limit
# on the number of source fonts that can be used.
break
# blendList now contains as many single blend tuples as can be
# combined without exceeding the CFF2 stack limit.
num_blends = len(blendlist)
# append the 'num_blends' default font values
blend_args = []
for arg in blendlist:
blend_args.append(arg[0])
for arg in blendlist:
assert arg[-1] == 1
blend_args.extend(arg[1:-1])
blend_args.append(num_blends)
new_args.append(blend_args)
stack_use = prev_stack_use + num_blends
return new_args
def _addArgs(a, b):
if isinstance(b, list):
if isinstance(a, list):
if len(a) != len(b) or a[-1] != b[-1]:
raise ValueError()
return [_addArgs(va, vb) for va, vb in zip(a[:-1], b[:-1])] + [a[-1]]
else:
a, b = b, a
if isinstance(a, list):
assert a[-1] == 1
return [_addArgs(a[0], b)] + a[1:]
return a + b
def specializeCommands(
commands,
ignoreErrors=False,
generalizeFirst=True,
preserveTopology=False,
maxstack=48,
):
# We perform several rounds of optimizations. They are carefully ordered and are:
#
# 0. Generalize commands.
# This ensures that they are in our expected simple form, with each line/curve only
# having arguments for one segment, and using the generic form (rlineto/rrcurveto).
# If caller is sure the input is in this form, they can turn off generalization to
# save time.
#
# 1. Combine successive rmoveto operations.
#
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
# We specialize into some, made-up, variants as well, which simplifies following
# passes.
#
# 3. Merge or delete redundant operations, to the extent requested.
# OpenType spec declares point numbers in CFF undefined. As such, we happily
# change topology. If client relies on point numbers (in GPOS anchors, or for
# hinting purposes(what?)) they can turn this off.
#
# 4. Peephole optimization to revert back some of the h/v variants back into their
# original "relative" operator (rline/rrcurveto) if that saves a byte.
#
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
#
# 6. Resolve any remaining made-up operators into real operators.
#
# I have convinced myself that this produces optimal bytecode (except for, possibly
# one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-)
# A dynamic-programming approach can do the same but would be significantly slower.
#
# 7. For any args which are blend lists, convert them to a blend command.
# 0. Generalize commands.
if generalizeFirst:
commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
else:
commands = list(commands) # Make copy since we modify in-place later.
# 1. Combine successive rmoveto operations.
for i in range(len(commands) - 1, 0, -1):
if "rmoveto" == commands[i][0] == commands[i - 1][0]:
v1, v2 = commands[i - 1][1], commands[i][1]
commands[i - 1] = ("rmoveto", [v1[0] + v2[0], v1[1] + v2[1]])
del commands[i]
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
#
# We, in fact, specialize into more, made-up, variants that special-case when both
# X and Y components are zero. This simplifies the following optimization passes.
# This case is rare, but OCD does not let me skip it.
#
# After this round, we will have four variants that use the following mnemonics:
#
# - 'r' for relative, ie. non-zero X and non-zero Y,
# - 'h' for horizontal, ie. zero X and non-zero Y,
# - 'v' for vertical, ie. non-zero X and zero Y,
# - '0' for zeros, ie. zero X and zero Y.
#
# The '0' pseudo-operators are not part of the spec, but help simplify the following
# optimization rounds. We resolve them at the end. So, after this, we will have four
# moveto and four lineto variants:
#
# - 0moveto, 0lineto
# - hmoveto, hlineto
# - vmoveto, vlineto
# - rmoveto, rlineto
#
# and sixteen curveto variants. For example, a '0hcurveto' operator means a curve
# dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3.
# An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3.
#
# There are nine different variants of curves without the '0'. Those nine map exactly
# to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto,
# vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of
# arguments and one without. Eg. an hhcurveto with an extra argument (odd number of
# arguments) is in fact an rhcurveto. The operators in the spec are designed such that
# all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve.
#
# Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest
# of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be
# thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always
# encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute
# the '0' with either 'h' or 'v' and it works.
#
# When we get to curve splines however, things become more complicated... XXX finish this.
# There's one more complexity with splines. If one side of the spline is not horizontal or
# vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode.
# Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and
# only hvcurveto and vhcurveto operators can encode a spline ending with 'r'.
# This limits our merge opportunities later.
#
for i in range(len(commands)):
op, args = commands[i]
if op in {"rmoveto", "rlineto"}:
c, args = _categorizeVector(args)
commands[i] = c + op[1:], args
continue
if op == "rrcurveto":
c1, args1 = _categorizeVector(args[:2])
c2, args2 = _categorizeVector(args[-2:])
commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2
continue
# 3. Merge or delete redundant operations, to the extent requested.
#
# TODO
# A 0moveto that comes before all other path operations can be removed.
# though I find conflicting evidence for this.
#
# TODO
# "If hstem and vstem hints are both declared at the beginning of a
# CharString, and this sequence is followed directly by the hintmask or
# cntrmask operators, then the vstem hint operator (or, if applicable,
# the vstemhm operator) need not be included."
#
# "The sequence and form of a CFF2 CharString program may be represented as:
# {hs* vs* cm* hm* mt subpath}? {mt subpath}*"
#
# https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1
#
# For Type2 CharStrings the sequence is:
# w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
# Some other redundancies change topology (point numbers).
if not preserveTopology:
for i in range(len(commands) - 1, -1, -1):
op, args = commands[i]
# A 00curveto is demoted to a (specialized) lineto.
if op == "00curveto":
assert len(args) == 4
c, args = _categorizeVector(args[1:3])
op = c + "lineto"
commands[i] = op, args
# and then...
# A 0lineto can be deleted.
if op == "0lineto":
del commands[i]
continue
# Merge adjacent hlineto's and vlineto's.
# In CFF2 charstrings from variable fonts, each
# arg item may be a list of blendable values, one from
# each source font.
if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]):
_, other_args = commands[i - 1]
assert len(args) == 1 and len(other_args) == 1
try:
new_args = [_addArgs(args[0], other_args[0])]
except ValueError:
continue
commands[i - 1] = (op, new_args)
del commands[i]
continue
# 4. Peephole optimization to revert back some of the h/v variants back into their
# original "relative" operator (rline/rrcurveto) if that saves a byte.
for i in range(1, len(commands) - 1):
op, args = commands[i]
prv, nxt = commands[i - 1][0], commands[i + 1][0]
if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto":
assert len(args) == 1
args = [0, args[0]] if op[0] == "v" else [args[0], 0]
commands[i] = ("rlineto", args)
continue
if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto":
assert (op[0] == "r") ^ (op[1] == "r")
if op[0] == "v":
pos = 0
elif op[0] != "r":
pos = 1
elif op[1] == "v":
pos = 4
else:
pos = 5
# Insert, while maintaining the type of args (can be tuple or list).
args = args[:pos] + type(args)((0,)) + args[pos:]
commands[i] = ("rrcurveto", args)
continue
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
for i in range(len(commands) - 1, 0, -1):
op1, args1 = commands[i - 1]
op2, args2 = commands[i]
new_op = None
# Merge logic...
if {op1, op2} <= {"rlineto", "rrcurveto"}:
if op1 == op2:
new_op = op1
else:
if op2 == "rrcurveto" and len(args2) == 6:
new_op = "rlinecurve"
elif len(args2) == 2:
new_op = "rcurveline"
elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}:
new_op = op2
elif {op1, op2} == {"vlineto", "hlineto"}:
new_op = op1
elif "curveto" == op1[2:] == op2[2:]:
d0, d1 = op1[:2]
d2, d3 = op2[:2]
if d1 == "r" or d2 == "r" or d0 == d3 == "r":
continue
d = _mergeCategories(d1, d2)
if d is None:
continue
if d0 == "r":
d = _mergeCategories(d, d3)
if d is None:
continue
new_op = "r" + d + "curveto"
elif d3 == "r":
d0 = _mergeCategories(d0, _negateCategory(d))
if d0 is None:
continue
new_op = d0 + "r" + "curveto"
else:
d0 = _mergeCategories(d0, d3)
if d0 is None:
continue
new_op = d0 + d + "curveto"
# Make sure the stack depth does not exceed (maxstack - 1), so
# that subroutinizer can insert subroutine calls at any point.
if new_op and len(args1) + len(args2) < maxstack:
commands[i - 1] = (new_op, args1 + args2)
del commands[i]
# 6. Resolve any remaining made-up operators into real operators.
for i in range(len(commands)):
op, args = commands[i]
if op in {"0moveto", "0lineto"}:
commands[i] = "h" + op[1:], args
continue
if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}:
op0, op1 = op[:2]
if (op0 == "r") ^ (op1 == "r"):
assert len(args) % 2 == 1
if op0 == "0":
op0 = "h"
if op1 == "0":
op1 = "h"
if op0 == "r":
op0 = op1
if op1 == "r":
op1 = _negateCategory(op0)
assert {op0, op1} <= {"h", "v"}, (op0, op1)
if len(args) % 2:
if op0 != op1: # vhcurveto / hvcurveto
if (op0 == "h") ^ (len(args) % 8 == 1):
# Swap last two args order
args = args[:-2] + args[-1:] + args[-2:-1]
else: # hhcurveto / vvcurveto
if op0 == "h": # hhcurveto
# Swap first two args order
args = args[1:2] + args[:1] + args[2:]
commands[i] = op0 + op1 + "curveto", args
continue
# 7. For any series of args which are blend lists, convert the series to a single blend arg.
for i in range(len(commands)):
op, args = commands[i]
if any(isinstance(arg, list) for arg in args):
commands[i] = op, _convertToBlendCmds(args)
return commands
def specializeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(
specializeCommands(programToCommands(program, getNumRegions), **kwargs)
)
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.specialer",
description="CFF CharString generalizer/specializer",
)
parser.add_argument("program", metavar="command", nargs="*", help="Commands.")
parser.add_argument(
"--num-regions",
metavar="NumRegions",
nargs="*",
default=None,
help="Number of variable-font regions for blend opertaions.",
)
options = parser.parse_args(sys.argv[1:])
getNumRegions = (
None
if options.num_regions is None
else lambda vsIndex: int(options.num_regions[0 if vsIndex is None else vsIndex])
)
program = stringToProgram(options.program)
print("Program:")
print(programToString(program))
commands = programToCommands(program, getNumRegions)
print("Commands:")
print(commands)
program2 = commandsToProgram(commands)
print("Program from commands:")
print(programToString(program2))
assert program == program2
print("Generalized program:")
print(programToString(generalizeProgram(program, getNumRegions)))
print("Specialized program:")
print(programToString(specializeProgram(program, getNumRegions)))
PK aZZZ �q� � fontTools/cffLib/width.py# -*- coding: utf-8 -*-
"""T2CharString glyph width optimizer.
CFF glyphs whose width equals the CFF Private dictionary's ``defaultWidthX``
value do not need to specify their width in their charstring, saving bytes.
This module determines the optimum ``defaultWidthX`` and ``nominalWidthX``
values for a font, when provided with a list of glyph widths."""
from fontTools.ttLib import TTFont
from collections import defaultdict
from operator import add
from functools import reduce
class missingdict(dict):
def __init__(self, missing_func):
self.missing_func = missing_func
def __missing__(self, v):
return self.missing_func(v)
def cumSum(f, op=add, start=0, decreasing=False):
keys = sorted(f.keys())
minx, maxx = keys[0], keys[-1]
total = reduce(op, f.values(), start)
if decreasing:
missing = lambda x: start if x > maxx else total
domain = range(maxx, minx - 1, -1)
else:
missing = lambda x: start if x < minx else total
domain = range(minx, maxx + 1)
out = missingdict(missing)
v = start
for x in domain:
v = op(v, f[x])
out[x] = v
return out
def byteCost(widths, default, nominal):
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
cost = 0
for w, freq in widths.items():
if w == default:
continue
diff = abs(w - nominal)
if diff <= 107:
cost += freq
elif diff <= 1131:
cost += freq * 2
else:
cost += freq * 5
return cost
def optimizeWidthsBruteforce(widths):
"""Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
d = defaultdict(int)
for w in widths:
d[w] += 1
# Maximum number of bytes using default can possibly save
maxDefaultAdvantage = 5 * max(d.values())
minw, maxw = min(widths), max(widths)
domain = list(range(minw, maxw + 1))
bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
bestCost = len(widths) * 5 + 1
for nominal in domain:
if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
continue
for default in domain:
cost = byteCost(widths, default, nominal)
if cost < bestCost:
bestCost = cost
bestDefault = default
bestNominal = nominal
return bestDefault, bestNominal
def optimizeWidths(widths):
"""Given a list of glyph widths, or dictionary mapping glyph width to number of
glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
This algorithm is linear in UPEM+numGlyphs."""
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
keys = sorted(widths.keys())
minw, maxw = keys[0], keys[-1]
domain = list(range(minw, maxw + 1))
# Cumulative sum/max forward/backward.
cumFrqU = cumSum(widths, op=add)
cumMaxU = cumSum(widths, op=max)
cumFrqD = cumSum(widths, op=add, decreasing=True)
cumMaxD = cumSum(widths, op=max, decreasing=True)
# Cost per nominal choice, without default consideration.
nomnCostU = missingdict(
lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
)
nomnCostD = missingdict(
lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
)
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
# Cost-saving per nominal choice, by best default choice.
dfltCostU = missingdict(
lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
)
dfltCostD = missingdict(
lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
)
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
# Combined cost per nominal choice.
bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
# Best nominal.
nominal = min(domain, key=lambda x: bestCost[x])
# Work back the best default.
bestC = bestCost[nominal]
dfltC = nomnCost[nominal] - bestCost[nominal]
ends = []
if dfltC == dfltCostU[nominal]:
starts = [nominal, nominal - 108, nominal - 1132]
for start in starts:
while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]:
start -= 1
ends.append(start)
else:
starts = [nominal, nominal + 108, nominal + 1132]
for start in starts:
while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]:
start += 1
ends.append(start)
default = min(ends, key=lambda default: byteCost(widths, default, nominal))
return default, nominal
def main(args=None):
"""Calculate optimum defaultWidthX/nominalWidthX values"""
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.width",
description=main.__doc__,
)
parser.add_argument(
"inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files"
)
parser.add_argument(
"-b",
"--brute-force",
dest="brute",
action="store_true",
help="Use brute-force approach (VERY slow)",
)
args = parser.parse_args(args)
for fontfile in args.inputs:
font = TTFont(fontfile)
hmtx = font["hmtx"]
widths = [m[0] for m in hmtx.metrics.values()]
if args.brute:
default, nominal = optimizeWidthsBruteforce(widths)
else:
default, nominal = optimizeWidths(widths)
print(
"glyphs=%d default=%d nominal=%d byteCost=%d"
% (len(widths), default, nominal, byteCost(widths, default, nominal))
)
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
main()
PK aZZZ fontTools/colorLib/__init__.pyPK aZZZ�"��Y �Y fontTools/colorLib/builder.py"""
colorLib.builder: Build COLR/CPAL tables from scratch
"""
import collections
import copy
import enum
from functools import partial
from math import ceil, log
from typing import (
Any,
Dict,
Generator,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
from fontTools.misc.arrayTools import intRect
from fontTools.misc.fixedTools import fixedToFloat
from fontTools.misc.treeTools import build_n_ary_tree
from fontTools.ttLib.tables import C_O_L_R_
from fontTools.ttLib.tables import C_P_A_L_
from fontTools.ttLib.tables import _n_a_m_e
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otTables import ExtendMode, CompositeMode
from .errors import ColorLibError
from .geometry import round_start_circle_stable_containment
from .table_builder import BuildCallback, TableBuilder
# TODO move type aliases to colorLib.types?
T = TypeVar("T")
_Kwargs = Mapping[str, Any]
_PaintInput = Union[int, _Kwargs, ot.Paint, Tuple[str, "_PaintInput"]]
_PaintInputList = Sequence[_PaintInput]
_ColorGlyphsDict = Dict[str, Union[_PaintInputList, _PaintInput]]
_ColorGlyphsV0Dict = Dict[str, Sequence[Tuple[str, int]]]
_ClipBoxInput = Union[
Tuple[int, int, int, int, int], # format 1, variable
Tuple[int, int, int, int], # format 0, non-variable
ot.ClipBox,
]
MAX_PAINT_COLR_LAYER_COUNT = 255
_DEFAULT_ALPHA = 1.0
_MAX_REUSE_LEN = 32
def _beforeBuildPaintRadialGradient(paint, source):
x0 = source["x0"]
y0 = source["y0"]
r0 = source["r0"]
x1 = source["x1"]
y1 = source["y1"]
r1 = source["r1"]
# TODO apparently no builder_test confirms this works (?)
# avoid abrupt change after rounding when c0 is near c1's perimeter
c = round_start_circle_stable_containment((x0, y0), r0, (x1, y1), r1)
x0, y0 = c.centre
r0 = c.radius
# update source to ensure paint is built with corrected values
source["x0"] = x0
source["y0"] = y0
source["r0"] = r0
source["x1"] = x1
source["y1"] = y1
source["r1"] = r1
return paint, source
def _defaultColorStop():
colorStop = ot.ColorStop()
colorStop.Alpha = _DEFAULT_ALPHA
return colorStop
def _defaultVarColorStop():
colorStop = ot.VarColorStop()
colorStop.Alpha = _DEFAULT_ALPHA
return colorStop
def _defaultColorLine():
colorLine = ot.ColorLine()
colorLine.Extend = ExtendMode.PAD
return colorLine
def _defaultVarColorLine():
colorLine = ot.VarColorLine()
colorLine.Extend = ExtendMode.PAD
return colorLine
def _defaultPaintSolid():
paint = ot.Paint()
paint.Alpha = _DEFAULT_ALPHA
return paint
def _buildPaintCallbacks():
return {
(
BuildCallback.BEFORE_BUILD,
ot.Paint,
ot.PaintFormat.PaintRadialGradient,
): _beforeBuildPaintRadialGradient,
(
BuildCallback.BEFORE_BUILD,
ot.Paint,
ot.PaintFormat.PaintVarRadialGradient,
): _beforeBuildPaintRadialGradient,
(BuildCallback.CREATE_DEFAULT, ot.ColorStop): _defaultColorStop,
(BuildCallback.CREATE_DEFAULT, ot.VarColorStop): _defaultVarColorStop,
(BuildCallback.CREATE_DEFAULT, ot.ColorLine): _defaultColorLine,
(BuildCallback.CREATE_DEFAULT, ot.VarColorLine): _defaultVarColorLine,
(
BuildCallback.CREATE_DEFAULT,
ot.Paint,
ot.PaintFormat.PaintSolid,
): _defaultPaintSolid,
(
BuildCallback.CREATE_DEFAULT,
ot.Paint,
ot.PaintFormat.PaintVarSolid,
): _defaultPaintSolid,
}
def populateCOLRv0(
table: ot.COLR,
colorGlyphsV0: _ColorGlyphsV0Dict,
glyphMap: Optional[Mapping[str, int]] = None,
):
"""Build v0 color layers and add to existing COLR table.
Args:
table: a raw ``otTables.COLR()`` object (not ttLib's ``table_C_O_L_R_``).
colorGlyphsV0: map of base glyph names to lists of (layer glyph names,
color palette index) tuples. Can be empty.
glyphMap: a map from glyph names to glyph indices, as returned from
``TTFont.getReverseGlyphMap()``, to optionally sort base records by GID.
"""
if glyphMap is not None:
colorGlyphItems = sorted(
colorGlyphsV0.items(), key=lambda item: glyphMap[item[0]]
)
else:
colorGlyphItems = colorGlyphsV0.items()
baseGlyphRecords = []
layerRecords = []
for baseGlyph, layers in colorGlyphItems:
baseRec = ot.BaseGlyphRecord()
baseRec.BaseGlyph = baseGlyph
baseRec.FirstLayerIndex = len(layerRecords)
baseRec.NumLayers = len(layers)
baseGlyphRecords.append(baseRec)
for layerGlyph, paletteIndex in layers:
layerRec = ot.LayerRecord()
layerRec.LayerGlyph = layerGlyph
layerRec.PaletteIndex = paletteIndex
layerRecords.append(layerRec)
table.BaseGlyphRecordArray = table.LayerRecordArray = None
if baseGlyphRecords:
table.BaseGlyphRecordArray = ot.BaseGlyphRecordArray()
table.BaseGlyphRecordArray.BaseGlyphRecord = baseGlyphRecords
if layerRecords:
table.LayerRecordArray = ot.LayerRecordArray()
table.LayerRecordArray.LayerRecord = layerRecords
table.BaseGlyphRecordCount = len(baseGlyphRecords)
table.LayerRecordCount = len(layerRecords)
def buildCOLR(
colorGlyphs: _ColorGlyphsDict,
version: Optional[int] = None,
*,
glyphMap: Optional[Mapping[str, int]] = None,
varStore: Optional[ot.VarStore] = None,
varIndexMap: Optional[ot.DeltaSetIndexMap] = None,
clipBoxes: Optional[Dict[str, _ClipBoxInput]] = None,
allowLayerReuse: bool = True,
) -> C_O_L_R_.table_C_O_L_R_:
"""Build COLR table from color layers mapping.
Args:
colorGlyphs: map of base glyph name to, either list of (layer glyph name,
color palette index) tuples for COLRv0; or a single ``Paint`` (dict) or
list of ``Paint`` for COLRv1.
version: the version of COLR table. If None, the version is determined
by the presence of COLRv1 paints or variation data (varStore), which
require version 1; otherwise, if all base glyphs use only simple color
layers, version 0 is used.
glyphMap: a map from glyph names to glyph indices, as returned from
TTFont.getReverseGlyphMap(), to optionally sort base records by GID.
varStore: Optional ItemVarationStore for deltas associated with v1 layer.
varIndexMap: Optional DeltaSetIndexMap for deltas associated with v1 layer.
clipBoxes: Optional map of base glyph name to clip box 4- or 5-tuples:
(xMin, yMin, xMax, yMax) or (xMin, yMin, xMax, yMax, varIndexBase).
Returns:
A new COLR table.
"""
self = C_O_L_R_.table_C_O_L_R_()
if varStore is not None and version == 0:
raise ValueError("Can't add VarStore to COLRv0")
if version in (None, 0) and not varStore:
# split color glyphs into v0 and v1 and encode separately
colorGlyphsV0, colorGlyphsV1 = _split_color_glyphs_by_version(colorGlyphs)
if version == 0 and colorGlyphsV1:
raise ValueError("Can't encode COLRv1 glyphs in COLRv0")
else:
# unless explicitly requested for v1 or have variations, in which case
# we encode all color glyph as v1
colorGlyphsV0, colorGlyphsV1 = {}, colorGlyphs
colr = ot.COLR()
populateCOLRv0(colr, colorGlyphsV0, glyphMap)
colr.LayerList, colr.BaseGlyphList = buildColrV1(
colorGlyphsV1,
glyphMap,
allowLayerReuse=allowLayerReuse,
)
if version is None:
version = 1 if (varStore or colorGlyphsV1) else 0
elif version not in (0, 1):
raise NotImplementedError(version)
self.version = colr.Version = version
if version == 0:
self.ColorLayers = self._decompileColorLayersV0(colr)
else:
colr.ClipList = buildClipList(clipBoxes) if clipBoxes else None
colr.VarIndexMap = varIndexMap
colr.VarStore = varStore
self.table = colr
return self
def buildClipList(clipBoxes: Dict[str, _ClipBoxInput]) -> ot.ClipList:
clipList = ot.ClipList()
clipList.Format = 1
clipList.clips = {name: buildClipBox(box) for name, box in clipBoxes.items()}
return clipList
def buildClipBox(clipBox: _ClipBoxInput) -> ot.ClipBox:
if isinstance(clipBox, ot.ClipBox):
return clipBox
n = len(clipBox)
clip = ot.ClipBox()
if n not in (4, 5):
raise ValueError(f"Invalid ClipBox: expected 4 or 5 values, found {n}")
clip.xMin, clip.yMin, clip.xMax, clip.yMax = intRect(clipBox[:4])
clip.Format = int(n == 5) + 1
if n == 5:
clip.VarIndexBase = int(clipBox[4])
return clip
class ColorPaletteType(enum.IntFlag):
USABLE_WITH_LIGHT_BACKGROUND = 0x0001
USABLE_WITH_DARK_BACKGROUND = 0x0002
@classmethod
def _missing_(cls, value):
# enforce reserved bits
if isinstance(value, int) and (value < 0 or value & 0xFFFC != 0):
raise ValueError(f"{value} is not a valid {cls.__name__}")
return super()._missing_(value)
# None, 'abc' or {'en': 'abc', 'de': 'xyz'}
_OptionalLocalizedString = Union[None, str, Dict[str, str]]
def buildPaletteLabels(
labels: Iterable[_OptionalLocalizedString], nameTable: _n_a_m_e.table__n_a_m_e
) -> List[Optional[int]]:
return [
(
nameTable.addMultilingualName(l, mac=False)
if isinstance(l, dict)
else (
C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
if l is None
else nameTable.addMultilingualName({"en": l}, mac=False)
)
)
for l in labels
]
def buildCPAL(
palettes: Sequence[Sequence[Tuple[float, float, float, float]]],
paletteTypes: Optional[Sequence[ColorPaletteType]] = None,
paletteLabels: Optional[Sequence[_OptionalLocalizedString]] = None,
paletteEntryLabels: Optional[Sequence[_OptionalLocalizedString]] = None,
nameTable: Optional[_n_a_m_e.table__n_a_m_e] = None,
) -> C_P_A_L_.table_C_P_A_L_:
"""Build CPAL table from list of color palettes.
Args:
palettes: list of lists of colors encoded as tuples of (R, G, B, A) floats
in the range [0..1].
paletteTypes: optional list of ColorPaletteType, one for each palette.
paletteLabels: optional list of palette labels. Each lable can be either:
None (no label), a string (for for default English labels), or a
localized string (as a dict keyed with BCP47 language codes).
paletteEntryLabels: optional list of palette entry labels, one for each
palette entry (see paletteLabels).
nameTable: optional name table where to store palette and palette entry
labels. Required if either paletteLabels or paletteEntryLabels is set.
Return:
A new CPAL v0 or v1 table, if custom palette types or labels are specified.
"""
if len({len(p) for p in palettes}) != 1:
raise ColorLibError("color palettes have different lengths")
if (paletteLabels or paletteEntryLabels) and not nameTable:
raise TypeError(
"nameTable is required if palette or palette entries have labels"
)
cpal = C_P_A_L_.table_C_P_A_L_()
cpal.numPaletteEntries = len(palettes[0])
cpal.palettes = []
for i, palette in enumerate(palettes):
colors = []
for j, color in enumerate(palette):
if not isinstance(color, tuple) or len(color) != 4:
raise ColorLibError(
f"In palette[{i}][{j}]: expected (R, G, B, A) tuple, got {color!r}"
)
if any(v > 1 or v < 0 for v in color):
raise ColorLibError(
f"palette[{i}][{j}] has invalid out-of-range [0..1] color: {color!r}"
)
# input colors are RGBA, CPAL encodes them as BGRA
red, green, blue, alpha = color
colors.append(
C_P_A_L_.Color(*(round(v * 255) for v in (blue, green, red, alpha)))
)
cpal.palettes.append(colors)
if any(v is not None for v in (paletteTypes, paletteLabels, paletteEntryLabels)):
cpal.version = 1
if paletteTypes is not None:
if len(paletteTypes) != len(palettes):
raise ColorLibError(
f"Expected {len(palettes)} paletteTypes, got {len(paletteTypes)}"
)
cpal.paletteTypes = [ColorPaletteType(t).value for t in paletteTypes]
else:
cpal.paletteTypes = [C_P_A_L_.table_C_P_A_L_.DEFAULT_PALETTE_TYPE] * len(
palettes
)
if paletteLabels is not None:
if len(paletteLabels) != len(palettes):
raise ColorLibError(
f"Expected {len(palettes)} paletteLabels, got {len(paletteLabels)}"
)
cpal.paletteLabels = buildPaletteLabels(paletteLabels, nameTable)
else:
cpal.paletteLabels = [C_P_A_L_.table_C_P_A_L_.NO_NAME_ID] * len(palettes)
if paletteEntryLabels is not None:
if len(paletteEntryLabels) != cpal.numPaletteEntries:
raise ColorLibError(
f"Expected {cpal.numPaletteEntries} paletteEntryLabels, "
f"got {len(paletteEntryLabels)}"
)
cpal.paletteEntryLabels = buildPaletteLabels(paletteEntryLabels, nameTable)
else:
cpal.paletteEntryLabels = [
C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
] * cpal.numPaletteEntries
else:
cpal.version = 0
return cpal
# COLR v1 tables
# See draft proposal at: https://github.com/googlefonts/colr-gradients-spec
def _is_colrv0_layer(layer: Any) -> bool:
# Consider as COLRv0 layer any sequence of length 2 (be it tuple or list) in which
# the first element is a str (the layerGlyph) and the second element is an int
# (CPAL paletteIndex).
# https://github.com/googlefonts/ufo2ft/issues/426
try:
layerGlyph, paletteIndex = layer
except (TypeError, ValueError):
return False
else:
return isinstance(layerGlyph, str) and isinstance(paletteIndex, int)
def _split_color_glyphs_by_version(
colorGlyphs: _ColorGlyphsDict,
) -> Tuple[_ColorGlyphsV0Dict, _ColorGlyphsDict]:
colorGlyphsV0 = {}
colorGlyphsV1 = {}
for baseGlyph, layers in colorGlyphs.items():
if all(_is_colrv0_layer(l) for l in layers):
colorGlyphsV0[baseGlyph] = layers
else:
colorGlyphsV1[baseGlyph] = layers
# sanity check
assert set(colorGlyphs) == (set(colorGlyphsV0) | set(colorGlyphsV1))
return colorGlyphsV0, colorGlyphsV1
def _reuse_ranges(num_layers: int) -> Generator[Tuple[int, int], None, None]:
# TODO feels like something itertools might have already
for lbound in range(num_layers):
# Reuse of very large #s of layers is relatively unlikely
# +2: we want sequences of at least 2
# otData handles single-record duplication
for ubound in range(
lbound + 2, min(num_layers + 1, lbound + 2 + _MAX_REUSE_LEN)
):
yield (lbound, ubound)
class LayerReuseCache:
reusePool: Mapping[Tuple[Any, ...], int]
tuples: Mapping[int, Tuple[Any, ...]]
keepAlive: List[ot.Paint] # we need id to remain valid
def __init__(self):
self.reusePool = {}
self.tuples = {}
self.keepAlive = []
def _paint_tuple(self, paint: ot.Paint):
# start simple, who even cares about cyclic graphs or interesting field types
def _tuple_safe(value):
if isinstance(value, enum.Enum):
return value
elif hasattr(value, "__dict__"):
return tuple(
(k, _tuple_safe(v)) for k, v in sorted(value.__dict__.items())
)
elif isinstance(value, collections.abc.MutableSequence):
return tuple(_tuple_safe(e) for e in value)
return value
# Cache the tuples for individual Paint instead of the whole sequence
# because the seq could be a transient slice
result = self.tuples.get(id(paint), None)
if result is None:
result = _tuple_safe(paint)
self.tuples[id(paint)] = result
self.keepAlive.append(paint)
return result
def _as_tuple(self, paints: Sequence[ot.Paint]) -> Tuple[Any, ...]:
return tuple(self._paint_tuple(p) for p in paints)
def try_reuse(self, layers: List[ot.Paint]) -> List[ot.Paint]:
found_reuse = True
while found_reuse:
found_reuse = False
ranges = sorted(
_reuse_ranges(len(layers)),
key=lambda t: (t[1] - t[0], t[1], t[0]),
reverse=True,
)
for lbound, ubound in ranges:
reuse_lbound = self.reusePool.get(
self._as_tuple(layers[lbound:ubound]), -1
)
if reuse_lbound == -1:
continue
new_slice = ot.Paint()
new_slice.Format = int(ot.PaintFormat.PaintColrLayers)
new_slice.NumLayers = ubound - lbound
new_slice.FirstLayerIndex = reuse_lbound
layers = layers[:lbound] + [new_slice] + layers[ubound:]
found_reuse = True
break
return layers
def add(self, layers: List[ot.Paint], first_layer_index: int):
for lbound, ubound in _reuse_ranges(len(layers)):
self.reusePool[self._as_tuple(layers[lbound:ubound])] = (
lbound + first_layer_index
)
class LayerListBuilder:
layers: List[ot.Paint]
cache: LayerReuseCache
allowLayerReuse: bool
def __init__(self, *, allowLayerReuse=True):
self.layers = []
if allowLayerReuse:
self.cache = LayerReuseCache()
else:
self.cache = None
# We need to intercept construction of PaintColrLayers
callbacks = _buildPaintCallbacks()
callbacks[
(
BuildCallback.BEFORE_BUILD,
ot.Paint,
ot.PaintFormat.PaintColrLayers,
)
] = self._beforeBuildPaintColrLayers
self.tableBuilder = TableBuilder(callbacks)
# COLR layers is unusual in that it modifies shared state
# so we need a callback into an object
def _beforeBuildPaintColrLayers(self, dest, source):
# Sketchy gymnastics: a sequence input will have dropped it's layers
# into NumLayers; get it back
if isinstance(source.get("NumLayers", None), collections.abc.Sequence):
layers = source["NumLayers"]
else:
layers = source["Layers"]
# Convert maps seqs or whatever into typed objects
layers = [self.buildPaint(l) for l in layers]
# No reason to have a colr layers with just one entry
if len(layers) == 1:
return layers[0], {}
if self.cache is not None:
# Look for reuse, with preference to longer sequences
# This may make the layer list smaller
layers = self.cache.try_reuse(layers)
# The layer list is now final; if it's too big we need to tree it
is_tree = len(layers) > MAX_PAINT_COLR_LAYER_COUNT
layers = build_n_ary_tree(layers, n=MAX_PAINT_COLR_LAYER_COUNT)
# We now have a tree of sequences with Paint leaves.
# Convert the sequences into PaintColrLayers.
def listToColrLayers(layer):
if isinstance(layer, collections.abc.Sequence):
return self.buildPaint(
{
"Format": ot.PaintFormat.PaintColrLayers,
"Layers": [listToColrLayers(l) for l in layer],
}
)
return layer
layers = [listToColrLayers(l) for l in layers]
# No reason to have a colr layers with just one entry
if len(layers) == 1:
return layers[0], {}
paint = ot.Paint()
paint.Format = int(ot.PaintFormat.PaintColrLayers)
paint.NumLayers = len(layers)
paint.FirstLayerIndex = len(self.layers)
self.layers.extend(layers)
# Register our parts for reuse provided we aren't a tree
# If we are a tree the leaves registered for reuse and that will suffice
if self.cache is not None and not is_tree:
self.cache.add(layers, paint.FirstLayerIndex)
# we've fully built dest; empty source prevents generalized build from kicking in
return paint, {}
def buildPaint(self, paint: _PaintInput) -> ot.Paint:
return self.tableBuilder.build(ot.Paint, paint)
def build(self) -> Optional[ot.LayerList]:
if not self.layers:
return None
layers = ot.LayerList()
layers.LayerCount = len(self.layers)
layers.Paint = self.layers
return layers
def buildBaseGlyphPaintRecord(
baseGlyph: str, layerBuilder: LayerListBuilder, paint: _PaintInput
) -> ot.BaseGlyphList:
self = ot.BaseGlyphPaintRecord()
self.BaseGlyph = baseGlyph
self.Paint = layerBuilder.buildPaint(paint)
return self
def _format_glyph_errors(errors: Mapping[str, Exception]) -> str:
lines = []
for baseGlyph, error in sorted(errors.items()):
lines.append(f" {baseGlyph} => {type(error).__name__}: {error}")
return "\n".join(lines)
def buildColrV1(
colorGlyphs: _ColorGlyphsDict,
glyphMap: Optional[Mapping[str, int]] = None,
*,
allowLayerReuse: bool = True,
) -> Tuple[Optional[ot.LayerList], ot.BaseGlyphList]:
if glyphMap is not None:
colorGlyphItems = sorted(
colorGlyphs.items(), key=lambda item: glyphMap[item[0]]
)
else:
colorGlyphItems = colorGlyphs.items()
errors = {}
baseGlyphs = []
layerBuilder = LayerListBuilder(allowLayerReuse=allowLayerReuse)
for baseGlyph, paint in colorGlyphItems:
try:
baseGlyphs.append(buildBaseGlyphPaintRecord(baseGlyph, layerBuilder, paint))
except (ColorLibError, OverflowError, ValueError, TypeError) as e:
errors[baseGlyph] = e
if errors:
failed_glyphs = _format_glyph_errors(errors)
exc = ColorLibError(f"Failed to build BaseGlyphList:\n{failed_glyphs}")
exc.errors = errors
raise exc from next(iter(errors.values()))
layers = layerBuilder.build()
glyphs = ot.BaseGlyphList()
glyphs.BaseGlyphCount = len(baseGlyphs)
glyphs.BaseGlyphPaintRecord = baseGlyphs
return (layers, glyphs)
PK aZZZ]��) ) fontTools/colorLib/errors.pyclass ColorLibError(Exception):
pass
PK aZZZ+t�� � fontTools/colorLib/geometry.py"""Helpers for manipulating 2D points and vectors in COLR table."""
from math import copysign, cos, hypot, isclose, pi
from fontTools.misc.roundTools import otRound
def _vector_between(origin, target):
return (target[0] - origin[0], target[1] - origin[1])
def _round_point(pt):
return (otRound(pt[0]), otRound(pt[1]))
def _unit_vector(vec):
length = hypot(*vec)
if length == 0:
return None
return (vec[0] / length, vec[1] / length)
_CIRCLE_INSIDE_TOLERANCE = 1e-4
# The unit vector's X and Y components are respectively
# U = (cos(α), sin(α))
# where α is the angle between the unit vector and the positive x axis.
_UNIT_VECTOR_THRESHOLD = cos(3 / 8 * pi) # == sin(1/8 * pi) == 0.38268343236508984
def _rounding_offset(direction):
# Return 2-tuple of -/+ 1.0 or 0.0 approximately based on the direction vector.
# We divide the unit circle in 8 equal slices oriented towards the cardinal
# (N, E, S, W) and intermediate (NE, SE, SW, NW) directions. To each slice we
# map one of the possible cases: -1, 0, +1 for either X and Y coordinate.
# E.g. Return (+1.0, -1.0) if unit vector is oriented towards SE, or
# (-1.0, 0.0) if it's pointing West, etc.
uv = _unit_vector(direction)
if not uv:
return (0, 0)
result = []
for uv_component in uv:
if -_UNIT_VECTOR_THRESHOLD <= uv_component < _UNIT_VECTOR_THRESHOLD:
# unit vector component near 0: direction almost orthogonal to the
# direction of the current axis, thus keep coordinate unchanged
result.append(0)
else:
# nudge coord by +/- 1.0 in direction of unit vector
result.append(copysign(1.0, uv_component))
return tuple(result)
class Circle:
def __init__(self, centre, radius):
self.centre = centre
self.radius = radius
def __repr__(self):
return f"Circle(centre={self.centre}, radius={self.radius})"
def round(self):
return Circle(_round_point(self.centre), otRound(self.radius))
def inside(self, outer_circle, tolerance=_CIRCLE_INSIDE_TOLERANCE):
dist = self.radius + hypot(*_vector_between(self.centre, outer_circle.centre))
return (
isclose(outer_circle.radius, dist, rel_tol=_CIRCLE_INSIDE_TOLERANCE)
or outer_circle.radius > dist
)
def concentric(self, other):
return self.centre == other.centre
def move(self, dx, dy):
self.centre = (self.centre[0] + dx, self.centre[1] + dy)
def round_start_circle_stable_containment(c0, r0, c1, r1):
"""Round start circle so that it stays inside/outside end circle after rounding.
The rounding of circle coordinates to integers may cause an abrupt change
if the start circle c0 is so close to the end circle c1's perimiter that
it ends up falling outside (or inside) as a result of the rounding.
To keep the gradient unchanged, we nudge it in the right direction.
See:
https://github.com/googlefonts/colr-gradients-spec/issues/204
https://github.com/googlefonts/picosvg/issues/158
"""
start, end = Circle(c0, r0), Circle(c1, r1)
inside_before_round = start.inside(end)
round_start = start.round()
round_end = end.round()
inside_after_round = round_start.inside(round_end)
if inside_before_round == inside_after_round:
return round_start
elif inside_after_round:
# start was outside before rounding: we need to push start away from end
direction = _vector_between(round_end.centre, round_start.centre)
radius_delta = +1.0
else:
# start was inside before rounding: we need to push start towards end
direction = _vector_between(round_start.centre, round_end.centre)
radius_delta = -1.0
dx, dy = _rounding_offset(direction)
# At most 2 iterations ought to be enough to converge. Before the loop, we
# know the start circle didn't keep containment after normal rounding; thus
# we continue adjusting by -/+ 1.0 until containment is restored.
# Normal rounding can at most move each coordinates -/+0.5; in the worst case
# both the start and end circle's centres and radii will be rounded in opposite
# directions, e.g. when they move along a 45 degree diagonal:
# c0 = (1.5, 1.5) ===> (2.0, 2.0)
# r0 = 0.5 ===> 1.0
# c1 = (0.499, 0.499) ===> (0.0, 0.0)
# r1 = 2.499 ===> 2.0
# In this example, the relative distance between the circles, calculated
# as r1 - (r0 + distance(c0, c1)) is initially 0.57437 (c0 is inside c1), and
# -1.82842 after rounding (c0 is now outside c1). Nudging c0 by -1.0 on both
# x and y axes moves it towards c1 by hypot(-1.0, -1.0) = 1.41421. Two of these
# moves cover twice that distance, which is enough to restore containment.
max_attempts = 2
for _ in range(max_attempts):
if round_start.concentric(round_end):
# can't move c0 towards c1 (they are the same), so we change the radius
round_start.radius += radius_delta
assert round_start.radius >= 0
else:
round_start.move(dx, dy)
if inside_before_round == round_start.inside(round_end):
break
else: # likely a bug
raise AssertionError(
f"Rounding circle {start} "
f"{'inside' if inside_before_round else 'outside'} "
f"{end} failed after {max_attempts} attempts!"
)
return round_start
PK aZZZ}`/�- - # fontTools/colorLib/table_builder.py"""
colorLib.table_builder: Generic helper for filling in BaseTable derivatives from tuples and maps and such.
"""
import collections
import enum
from fontTools.ttLib.tables.otBase import (
BaseTable,
FormatSwitchingBaseTable,
UInt8FormatSwitchingBaseTable,
)
from fontTools.ttLib.tables.otConverters import (
ComputedInt,
SimpleValue,
Struct,
Short,
UInt8,
UShort,
IntValue,
FloatValue,
OptionalValue,
)
from fontTools.misc.roundTools import otRound
class BuildCallback(enum.Enum):
"""Keyed on (BEFORE_BUILD, class[, Format if available]).
Receives (dest, source).
Should return (dest, source), which can be new objects.
"""
BEFORE_BUILD = enum.auto()
"""Keyed on (AFTER_BUILD, class[, Format if available]).
Receives (dest).
Should return dest, which can be a new object.
"""
AFTER_BUILD = enum.auto()
"""Keyed on (CREATE_DEFAULT, class[, Format if available]).
Receives no arguments.
Should return a new instance of class.
"""
CREATE_DEFAULT = enum.auto()
def _assignable(convertersByName):
return {k: v for k, v in convertersByName.items() if not isinstance(v, ComputedInt)}
def _isNonStrSequence(value):
return isinstance(value, collections.abc.Sequence) and not isinstance(value, str)
def _split_format(cls, source):
if _isNonStrSequence(source):
assert len(source) > 0, f"{cls} needs at least format from {source}"
fmt, remainder = source[0], source[1:]
elif isinstance(source, collections.abc.Mapping):
assert "Format" in source, f"{cls} needs at least Format from {source}"
remainder = source.copy()
fmt = remainder.pop("Format")
else:
raise ValueError(f"Not sure how to populate {cls} from {source}")
assert isinstance(
fmt, collections.abc.Hashable
), f"{cls} Format is not hashable: {fmt!r}"
assert fmt in cls.convertersByName, f"{cls} invalid Format: {fmt!r}"
return fmt, remainder
class TableBuilder:
"""
Helps to populate things derived from BaseTable from maps, tuples, etc.
A table of lifecycle callbacks may be provided to add logic beyond what is possible
based on otData info for the target class. See BuildCallbacks.
"""
def __init__(self, callbackTable=None):
if callbackTable is None:
callbackTable = {}
self._callbackTable = callbackTable
def _convert(self, dest, field, converter, value):
enumClass = getattr(converter, "enumClass", None)
if enumClass:
if isinstance(value, enumClass):
pass
elif isinstance(value, str):
try:
value = getattr(enumClass, value.upper())
except AttributeError:
raise ValueError(f"{value} is not a valid {enumClass}")
else:
value = enumClass(value)
elif isinstance(converter, IntValue):
value = otRound(value)
elif isinstance(converter, FloatValue):
value = float(value)
elif isinstance(converter, Struct):
if converter.repeat:
if _isNonStrSequence(value):
value = [self.build(converter.tableClass, v) for v in value]
else:
value = [self.build(converter.tableClass, value)]
setattr(dest, converter.repeat, len(value))
else:
value = self.build(converter.tableClass, value)
elif callable(converter):
value = converter(value)
setattr(dest, field, value)
def build(self, cls, source):
assert issubclass(cls, BaseTable)
if isinstance(source, cls):
return source
callbackKey = (cls,)
fmt = None
if issubclass(cls, FormatSwitchingBaseTable):
fmt, source = _split_format(cls, source)
callbackKey = (cls, fmt)
dest = self._callbackTable.get(
(BuildCallback.CREATE_DEFAULT,) + callbackKey, lambda: cls()
)()
assert isinstance(dest, cls)
convByName = _assignable(cls.convertersByName)
skippedFields = set()
# For format switchers we need to resolve converters based on format
if issubclass(cls, FormatSwitchingBaseTable):
dest.Format = fmt
convByName = _assignable(convByName[dest.Format])
skippedFields.add("Format")
# Convert sequence => mapping so before thunk only has to handle one format
if _isNonStrSequence(source):
# Sequence (typically list or tuple) assumed to match fields in declaration order
assert len(source) <= len(
convByName
), f"Sequence of {len(source)} too long for {cls}; expected <= {len(convByName)} values"
source = dict(zip(convByName.keys(), source))
dest, source = self._callbackTable.get(
(BuildCallback.BEFORE_BUILD,) + callbackKey, lambda d, s: (d, s)
)(dest, source)
if isinstance(source, collections.abc.Mapping):
for field, value in source.items():
if field in skippedFields:
continue
converter = convByName.get(field, None)
if not converter:
raise ValueError(
f"Unrecognized field {field} for {cls}; expected one of {sorted(convByName.keys())}"
)
self._convert(dest, field, converter, value)
else:
# let's try as a 1-tuple
dest = self.build(cls, (source,))
for field, conv in convByName.items():
if not hasattr(dest, field) and isinstance(conv, OptionalValue):
setattr(dest, field, conv.DEFAULT)
dest = self._callbackTable.get(
(BuildCallback.AFTER_BUILD,) + callbackKey, lambda d: d
)(dest)
return dest
class TableUnbuilder:
def __init__(self, callbackTable=None):
if callbackTable is None:
callbackTable = {}
self._callbackTable = callbackTable
def unbuild(self, table):
assert isinstance(table, BaseTable)
source = {}
callbackKey = (type(table),)
if isinstance(table, FormatSwitchingBaseTable):
source["Format"] = int(table.Format)
callbackKey += (table.Format,)
for converter in table.getConverters():
if isinstance(converter, ComputedInt):
continue
value = getattr(table, converter.name)
enumClass = getattr(converter, "enumClass", None)
if enumClass:
source[converter.name] = value.name.lower()
elif isinstance(converter, Struct):
if converter.repeat:
source[converter.name] = [self.unbuild(v) for v in value]
else:
source[converter.name] = self.unbuild(value)
elif isinstance(converter, SimpleValue):
# "simple" values (e.g. int, float, str) need no further un-building
source[converter.name] = value
else:
raise NotImplementedError(
"Don't know how unbuild {value!r} with {converter!r}"
)
source = self._callbackTable.get(callbackKey, lambda s: s)(source)
return source
PK aZZZV��^ ^ fontTools/colorLib/unbuilder.pyfrom fontTools.ttLib.tables import otTables as ot
from .table_builder import TableUnbuilder
def unbuildColrV1(layerList, baseGlyphList):
layers = []
if layerList:
layers = layerList.Paint
unbuilder = LayerListUnbuilder(layers)
return {
rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)
for rec in baseGlyphList.BaseGlyphPaintRecord
}
def _flatten_layers(lst):
for paint in lst:
if paint["Format"] == ot.PaintFormat.PaintColrLayers:
yield from _flatten_layers(paint["Layers"])
else:
yield paint
class LayerListUnbuilder:
def __init__(self, layers):
self.layers = layers
callbacks = {
(
ot.Paint,
ot.PaintFormat.PaintColrLayers,
): self._unbuildPaintColrLayers,
}
self.tableUnbuilder = TableUnbuilder(callbacks)
def unbuildPaint(self, paint):
assert isinstance(paint, ot.Paint)
return self.tableUnbuilder.unbuild(paint)
def _unbuildPaintColrLayers(self, source):
assert source["Format"] == ot.PaintFormat.PaintColrLayers
layers = list(
_flatten_layers(
[
self.unbuildPaint(childPaint)
for childPaint in self.layers[
source["FirstLayerIndex"] : source["FirstLayerIndex"]
+ source["NumLayers"]
]
]
)
)
if len(layers) == 1:
return layers[0]
return {"Format": source["Format"], "Layers": layers}
if __name__ == "__main__":
from pprint import pprint
import sys
from fontTools.ttLib import TTFont
try:
fontfile = sys.argv[1]
except IndexError:
sys.exit("usage: fonttools colorLib.unbuilder FONTFILE")
font = TTFont(fontfile)
colr = font["COLR"]
if colr.version < 1:
sys.exit(f"error: No COLR table version=1 found in {fontfile}")
colorGlyphs = unbuildColrV1(
colr.table.LayerList,
colr.table.BaseGlyphList,
)
pprint(colorGlyphs)
PK aZZZS�6�S
S
fontTools/config/__init__.py"""
Define all configuration options that can affect the working of fontTools
modules. E.g. optimization levels of varLib IUP, otlLib GPOS compression level,
etc. If this file gets too big, split it into smaller files per-module.
An instance of the Config class can be attached to a TTFont object, so that
the various modules can access their configuration options from it.
"""
from textwrap import dedent
from fontTools.misc.configTools import *
class Config(AbstractConfig):
options = Options()
OPTIONS = Config.options
Config.register_option(
name="fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL",
help=dedent(
"""\
GPOS Lookup type 2 (PairPos) compression level:
0 = do not attempt to compact PairPos lookups;
1 to 8 = create at most 1 to 8 new subtables for each existing
subtable, provided that it would yield a 50%% file size saving;
9 = create as many new subtables as needed to yield a file size saving.
Default: 0.
This compaction aims to save file size, by splitting large class
kerning subtables (Format 2) that contain many zero values into
smaller and denser subtables. It's a trade-off between the overhead
of several subtables versus the sparseness of one big subtable.
See the pull request: https://github.com/fonttools/fonttools/pull/2326
"""
),
default=0,
parse=int,
validate=lambda v: v in range(10),
)
Config.register_option(
name="fontTools.ttLib.tables.otBase:USE_HARFBUZZ_REPACKER",
help=dedent(
"""\
FontTools tries to use the HarfBuzz Repacker to serialize GPOS/GSUB tables
if the uharfbuzz python bindings are importable, otherwise falls back to its
slower, less efficient serializer. Set to False to always use the latter.
Set to True to explicitly request the HarfBuzz Repacker (will raise an
error if uharfbuzz cannot be imported).
"""
),
default=None,
parse=Option.parse_optional_bool,
validate=Option.validate_optional_bool,
)
Config.register_option(
name="fontTools.otlLib.builder:WRITE_GPOS7",
help=dedent(
"""\
macOS before 13.2 didn’t support GPOS LookupType 7 (non-chaining
ContextPos lookups), so FontTools.otlLib.builder disables a file size
optimisation that would use LookupType 7 instead of 8 when there is no
chaining (no prefix or suffix). Set to True to enable the optimization.
"""
),
default=False,
parse=Option.parse_optional_bool,
validate=Option.validate_optional_bool,
)
PK aZZZ���7j j fontTools/cu2qu/__init__.py# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cu2qu import *
PK aZZZ�|'KS S fontTools/cu2qu/__main__.pyimport sys
from .cli import main
if __name__ == "__main__":
sys.exit(main())
PK aZZZ��d�E E fontTools/cu2qu/benchmark.py"""Benchmark the cu2qu algorithm performance."""
from .cu2qu import *
import random
import timeit
MAX_ERR = 0.05
def generate_curve():
return [
tuple(float(random.randint(0, 2048)) for coord in range(2))
for point in range(4)
]
def setup_curve_to_quadratic():
return generate_curve(), MAX_ERR
def setup_curves_to_quadratic():
num_curves = 3
return ([generate_curve() for curve in range(num_curves)], [MAX_ERR] * num_curves)
def run_benchmark(module, function, setup_suffix="", repeat=5, number=1000):
setup_func = "setup_" + function
if setup_suffix:
print("%s with %s:" % (function, setup_suffix), end="")
setup_func += "_" + setup_suffix
else:
print("%s:" % function, end="")
def wrapper(function, setup_func):
function = globals()[function]
setup_func = globals()[setup_func]
def wrapped():
return function(*setup_func())
return wrapped
results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
print("\t%5.1fus" % (min(results) * 1000000.0 / number))
def main():
"""Benchmark the cu2qu algorithm performance."""
run_benchmark("cu2qu", "curve_to_quadratic")
run_benchmark("cu2qu", "curves_to_quadratic")
if __name__ == "__main__":
random.seed(1)
main()
PK aZZZ!�7Ի � fontTools/cu2qu/cli.pyimport os
import argparse
import logging
import shutil
import multiprocessing as mp
from contextlib import closing
from functools import partial
import fontTools
from .ufo import font_to_quadratic, fonts_to_quadratic
ufo_module = None
try:
import ufoLib2 as ufo_module
except ImportError:
try:
import defcon as ufo_module
except ImportError as e:
pass
logger = logging.getLogger("fontTools.cu2qu")
def _cpu_count():
try:
return mp.cpu_count()
except NotImplementedError: # pragma: no cover
return 1
def open_ufo(path):
if hasattr(ufo_module.Font, "open"): # ufoLib2
return ufo_module.Font.open(path)
return ufo_module.Font(path) # defcon
def _font_to_quadratic(input_path, output_path=None, **kwargs):
ufo = open_ufo(input_path)
logger.info("Converting curves for %s", input_path)
if font_to_quadratic(ufo, **kwargs):
logger.info("Saving %s", output_path)
if output_path:
ufo.save(output_path)
else:
ufo.save() # save in-place
elif output_path:
_copytree(input_path, output_path)
def _samepath(path1, path2):
# TODO on python3+, there's os.path.samefile
path1 = os.path.normcase(os.path.abspath(os.path.realpath(path1)))
path2 = os.path.normcase(os.path.abspath(os.path.realpath(path2)))
return path1 == path2
def _copytree(input_path, output_path):
if _samepath(input_path, output_path):
logger.debug("input and output paths are the same file; skipped copy")
return
if os.path.exists(output_path):
shutil.rmtree(output_path)
shutil.copytree(input_path, output_path)
def main(args=None):
"""Convert a UFO font from cubic to quadratic curves"""
parser = argparse.ArgumentParser(prog="cu2qu")
parser.add_argument("--version", action="version", version=fontTools.__version__)
parser.add_argument(
"infiles",
nargs="+",
metavar="INPUT",
help="one or more input UFO source file(s).",
)
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument(
"-e",
"--conversion-error",
type=float,
metavar="ERROR",
default=None,
help="maxiumum approximation error measured in EM (default: 0.001)",
)
parser.add_argument(
"-m",
"--mixed",
default=False,
action="store_true",
help="whether to used mixed quadratic and cubic curves",
)
parser.add_argument(
"--keep-direction",
dest="reverse_direction",
action="store_false",
help="do not reverse the contour direction",
)
mode_parser = parser.add_mutually_exclusive_group()
mode_parser.add_argument(
"-i",
"--interpolatable",
action="store_true",
help="whether curve conversion should keep interpolation compatibility",
)
mode_parser.add_argument(
"-j",
"--jobs",
type=int,
nargs="?",
default=1,
const=_cpu_count(),
metavar="N",
help="Convert using N multiple processes (default: %(default)s)",
)
output_parser = parser.add_mutually_exclusive_group()
output_parser.add_argument(
"-o",
"--output-file",
default=None,
metavar="OUTPUT",
help=(
"output filename for the converted UFO. By default fonts are "
"modified in place. This only works with a single input."
),
)
output_parser.add_argument(
"-d",
"--output-dir",
default=None,
metavar="DIRECTORY",
help="output directory where to save converted UFOs",
)
options = parser.parse_args(args)
if ufo_module is None:
parser.error("Either ufoLib2 or defcon are required to run this script.")
if not options.verbose:
level = "WARNING"
elif options.verbose == 1:
level = "INFO"
else:
level = "DEBUG"
logging.basicConfig(level=level)
if len(options.infiles) > 1 and options.output_file:
parser.error("-o/--output-file can't be used with multile inputs")
if options.output_dir:
output_dir = options.output_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
elif not os.path.isdir(output_dir):
parser.error("'%s' is not a directory" % output_dir)
output_paths = [
os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
]
elif options.output_file:
output_paths = [options.output_file]
else:
# save in-place
output_paths = [None] * len(options.infiles)
kwargs = dict(
dump_stats=options.verbose > 0,
max_err_em=options.conversion_error,
reverse_direction=options.reverse_direction,
all_quadratic=False if options.mixed else True,
)
if options.interpolatable:
logger.info("Converting curves compatibly")
ufos = [open_ufo(infile) for infile in options.infiles]
if fonts_to_quadratic(ufos, **kwargs):
for ufo, output_path in zip(ufos, output_paths):
logger.info("Saving %s", output_path)
if output_path:
ufo.save(output_path)
else:
ufo.save()
else:
for input_path, output_path in zip(options.infiles, output_paths):
if output_path:
_copytree(input_path, output_path)
else:
jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1
if jobs > 1:
func = partial(_font_to_quadratic, **kwargs)
logger.info("Running %d parallel processes", jobs)
with closing(mp.Pool(jobs)) as pool:
pool.starmap(func, zip(options.infiles, output_paths))
else:
for input_path, output_path in zip(options.infiles, output_paths):
_font_to_quadratic(input_path, output_path, **kwargs)
PK aZZZl��9R@ R@ fontTools/cu2qu/cu2qu.py# cython: language_level=3
# distutils: define_macros=CYTHON_TRACE_NOGIL=1
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import cython
COMPILED = cython.compiled
except (AttributeError, ImportError):
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
COMPILED = False
import math
from .errors import Error as Cu2QuError, ApproxNotFoundError
__all__ = ["curve_to_quadratic", "curves_to_quadratic"]
MAX_N = 100
NAN = float("NaN")
@cython.cfunc
@cython.inline
@cython.returns(cython.double)
@cython.locals(v1=cython.complex, v2=cython.complex)
def dot(v1, v2):
"""Return the dot product of two vectors.
Args:
v1 (complex): First vector.
v2 (complex): Second vector.
Returns:
double: Dot product.
"""
return (v1 * v2.conjugate()).real
@cython.cfunc
@cython.inline
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(
_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex
)
def calc_cubic_points(a, b, c, d):
_1 = d
_2 = (c / 3.0) + d
_3 = (b + c) / 3.0 + _2
_4 = a + d + c + b
return _1, _2, _3, _4
@cython.cfunc
@cython.inline
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
def calc_cubic_parameters(p0, p1, p2, p3):
c = (p1 - p0) * 3.0
b = (p2 - p1) * 3.0 - c
d = p0
a = p3 - d - c - b
return a, b, c, d
@cython.cfunc
@cython.inline
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
def split_cubic_into_n_iter(p0, p1, p2, p3, n):
"""Split a cubic Bezier into n equal parts.
Splits the curve into `n` equal parts by curve time.
(t=0..1/n, t=1/n..2/n, ...)
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
Returns:
An iterator yielding the control points (four complex values) of the
subcurves.
"""
# Hand-coded special-cases
if n == 2:
return iter(split_cubic_into_two(p0, p1, p2, p3))
if n == 3:
return iter(split_cubic_into_three(p0, p1, p2, p3))
if n == 4:
a, b = split_cubic_into_two(p0, p1, p2, p3)
return iter(
split_cubic_into_two(a[0], a[1], a[2], a[3])
+ split_cubic_into_two(b[0], b[1], b[2], b[3])
)
if n == 6:
a, b = split_cubic_into_two(p0, p1, p2, p3)
return iter(
split_cubic_into_three(a[0], a[1], a[2], a[3])
+ split_cubic_into_three(b[0], b[1], b[2], b[3])
)
return _split_cubic_into_n_gen(p0, p1, p2, p3, n)
@cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
n=cython.int,
)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(
dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int
)
@cython.locals(
a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex
)
def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3)
dt = 1 / n
delta_2 = dt * dt
delta_3 = dt * delta_2
for i in range(n):
t1 = i * dt
t1_2 = t1 * t1
# calc new a, b, c and d
a1 = a * delta_3
b1 = (3 * a * t1 + b) * delta_2
c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt
d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d
yield calc_cubic_points(a1, b1, c1, d1)
@cython.cfunc
@cython.inline
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def split_cubic_into_two(p0, p1, p2, p3):
"""Split a cubic Bezier into two equal parts.
Splits the curve into two equal parts at t = 0.5
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
Returns:
tuple: Two cubic Beziers (each expressed as a tuple of four complex
values).
"""
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
deriv3 = (p3 + p2 - p1 - p0) * 0.125
return (
(p0, (p0 + p1) * 0.5, mid - deriv3, mid),
(mid, mid + deriv3, (p2 + p3) * 0.5, p3),
)
@cython.cfunc
@cython.inline
@cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(
mid1=cython.complex,
deriv1=cython.complex,
mid2=cython.complex,
deriv2=cython.complex,
)
def split_cubic_into_three(p0, p1, p2, p3):
"""Split a cubic Bezier into three equal parts.
Splits the curve into three equal parts at t = 1/3 and t = 2/3
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
Returns:
tuple: Three cubic Beziers (each expressed as a tuple of four complex
values).
"""
mid1 = (8 * p0 + 12 * p1 + 6 * p2 + p3) * (1 / 27)
deriv1 = (p3 + 3 * p2 - 4 * p0) * (1 / 27)
mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * (1 / 27)
deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27)
return (
(p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1),
(mid1, mid1 + deriv1, mid2 - deriv2, mid2),
(mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3),
)
@cython.cfunc
@cython.inline
@cython.returns(cython.complex)
@cython.locals(
t=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(_p1=cython.complex, _p2=cython.complex)
def cubic_approx_control(t, p0, p1, p2, p3):
"""Approximate a cubic Bezier using a quadratic one.
Args:
t (double): Position of control point.
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
Returns:
complex: Location of candidate control point on quadratic curve.
"""
_p1 = p0 + (p1 - p0) * 1.5
_p2 = p3 + (p2 - p3) * 1.5
return _p1 + (_p2 - _p1) * t
@cython.cfunc
@cython.inline
@cython.returns(cython.complex)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(ab=cython.complex, cd=cython.complex, p=cython.complex, h=cython.double)
def calc_intersect(a, b, c, d):
"""Calculate the intersection of two lines.
Args:
a (complex): Start point of first line.
b (complex): End point of first line.
c (complex): Start point of second line.
d (complex): End point of second line.
Returns:
complex: Location of intersection if one present, ``complex(NaN,NaN)``
if no intersection was found.
"""
ab = b - a
cd = d - c
p = ab * 1j
try:
h = dot(p, a - c) / dot(p, cd)
except ZeroDivisionError:
return complex(NAN, NAN)
return c + cd * h
@cython.cfunc
@cython.returns(cython.int)
@cython.locals(
tolerance=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
"""Check if a cubic Bezier lies within a given distance of the origin.
"Origin" means *the* origin (0,0), not the start of the curve. Note that no
checks are made on the start and end positions of the curve; this function
only checks the inside of the curve.
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
tolerance (double): Distance from origin.
Returns:
bool: True if the cubic Bezier ``p`` entirely lies within a distance
``tolerance`` of the origin, False otherwise.
"""
# First check p2 then p1, as p2 has higher error early on.
if abs(p2) <= tolerance and abs(p1) <= tolerance:
return True
# Split.
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
if abs(mid) > tolerance:
return False
deriv3 = (p3 + p2 - p1 - p0) * 0.125
return cubic_farthest_fit_inside(
p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance
) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance)
@cython.cfunc
@cython.inline
@cython.locals(tolerance=cython.double)
@cython.locals(
q1=cython.complex,
c0=cython.complex,
c1=cython.complex,
c2=cython.complex,
c3=cython.complex,
)
def cubic_approx_quadratic(cubic, tolerance):
"""Approximate a cubic Bezier with a single quadratic within a given tolerance.
Args:
cubic (sequence): Four complex numbers representing control points of
the cubic Bezier curve.
tolerance (double): Permitted deviation from the original curve.
Returns:
Three complex numbers representing control points of the quadratic
curve if it fits within the given tolerance, or ``None`` if no suitable
curve could be calculated.
"""
q1 = calc_intersect(cubic[0], cubic[1], cubic[2], cubic[3])
if math.isnan(q1.imag):
return None
c0 = cubic[0]
c3 = cubic[3]
c1 = c0 + (q1 - c0) * (2 / 3)
c2 = c3 + (q1 - c3) * (2 / 3)
if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance):
return None
return c0, q1, c3
@cython.cfunc
@cython.locals(n=cython.int, tolerance=cython.double)
@cython.locals(i=cython.int)
@cython.locals(all_quadratic=cython.int)
@cython.locals(
c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex
)
@cython.locals(
q0=cython.complex,
q1=cython.complex,
next_q1=cython.complex,
q2=cython.complex,
d1=cython.complex,
)
def cubic_approx_spline(cubic, n, tolerance, all_quadratic):
"""Approximate a cubic Bezier curve with a spline of n quadratics.
Args:
cubic (sequence): Four complex numbers representing control points of
the cubic Bezier curve.
n (int): Number of quadratic Bezier curves in the spline.
tolerance (double): Permitted deviation from the original curve.
Returns:
A list of ``n+2`` complex numbers, representing control points of the
quadratic spline if it fits within the given tolerance, or ``None`` if
no suitable spline could be calculated.
"""
if n == 1:
return cubic_approx_quadratic(cubic, tolerance)
if n == 2 and all_quadratic == False:
return cubic
cubics = split_cubic_into_n_iter(cubic[0], cubic[1], cubic[2], cubic[3], n)
# calculate the spline of quadratics and check errors at the same time.
next_cubic = next(cubics)
next_q1 = cubic_approx_control(
0, next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3]
)
q2 = cubic[0]
d1 = 0j
spline = [cubic[0], next_q1]
for i in range(1, n + 1):
# Current cubic to convert
c0, c1, c2, c3 = next_cubic
# Current quadratic approximation of current cubic
q0 = q2
q1 = next_q1
if i < n:
next_cubic = next(cubics)
next_q1 = cubic_approx_control(
i / (n - 1), next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3]
)
spline.append(next_q1)
q2 = (q1 + next_q1) * 0.5
else:
q2 = c3
# End-point deltas
d0 = d1
d1 = q2 - c3
if abs(d1) > tolerance or not cubic_farthest_fit_inside(
d0,
q0 + (q1 - q0) * (2 / 3) - c1,
q2 + (q1 - q2) * (2 / 3) - c2,
d1,
tolerance,
):
return None
spline.append(cubic[3])
return spline
@cython.locals(max_err=cython.double)
@cython.locals(n=cython.int)
@cython.locals(all_quadratic=cython.int)
def curve_to_quadratic(curve, max_err, all_quadratic=True):
"""Approximate a cubic Bezier curve with a spline of n quadratics.
Args:
cubic (sequence): Four 2D tuples representing control points of
the cubic Bezier curve.
max_err (double): Permitted deviation from the original curve.
all_quadratic (bool): If True (default) returned value is a
quadratic spline. If False, it's either a single quadratic
curve or a single cubic curve.
Returns:
If all_quadratic is True: A list of 2D tuples, representing
control points of the quadratic spline if it fits within the
given tolerance, or ``None`` if no suitable spline could be
calculated.
If all_quadratic is False: Either a quadratic curve (if length
of output is 3), or a cubic curve (if length of output is 4).
"""
curve = [complex(*p) for p in curve]
for n in range(1, MAX_N + 1):
spline = cubic_approx_spline(curve, n, max_err, all_quadratic)
if spline is not None:
# done. go home
return [(s.real, s.imag) for s in spline]
raise ApproxNotFoundError(curve)
@cython.locals(l=cython.int, last_i=cython.int, i=cython.int)
@cython.locals(all_quadratic=cython.int)
def curves_to_quadratic(curves, max_errors, all_quadratic=True):
"""Return quadratic Bezier splines approximating the input cubic Beziers.
Args:
curves: A sequence of *n* curves, each curve being a sequence of four
2D tuples.
max_errors: A sequence of *n* floats representing the maximum permissible
deviation from each of the cubic Bezier curves.
all_quadratic (bool): If True (default) returned values are a
quadratic spline. If False, they are either a single quadratic
curve or a single cubic curve.
Example::
>>> curves_to_quadratic( [
... [ (50,50), (100,100), (150,100), (200,50) ],
... [ (75,50), (120,100), (150,75), (200,60) ]
... ], [1,1] )
[[(50.0, 50.0), (75.0, 75.0), (125.0, 91.66666666666666), (175.0, 75.0), (200.0, 50.0)], [(75.0, 50.0), (97.5, 75.0), (135.41666666666666, 82.08333333333333), (175.0, 67.5), (200.0, 60.0)]]
The returned splines have "implied oncurve points" suitable for use in
TrueType ``glif`` outlines - i.e. in the first spline returned above,
the first quadratic segment runs from (50,50) to
( (75 + 125)/2 , (120 + 91.666..)/2 ) = (100, 83.333...).
Returns:
If all_quadratic is True, a list of splines, each spline being a list
of 2D tuples.
If all_quadratic is False, a list of curves, each curve being a quadratic
(length 3), or cubic (length 4).
Raises:
fontTools.cu2qu.Errors.ApproxNotFoundError: if no suitable approximation
can be found for all curves with the given parameters.
"""
curves = [[complex(*p) for p in curve] for curve in curves]
assert len(max_errors) == len(curves)
l = len(curves)
splines = [None] * l
last_i = i = 0
n = 1
while True:
spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic)
if spline is None:
if n == MAX_N:
break
n += 1
last_i = i
continue
splines[i] = spline
i = (i + 1) % l
if i == last_i:
# done. go home
return [[(s.real, s.imag) for s in spline] for spline in splines]
raise ApproxNotFoundError(curves)
PK aZZZ�� � fontTools/cu2qu/errors.py# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Error(Exception):
"""Base Cu2Qu exception class for all other errors."""
class ApproxNotFoundError(Error):
def __init__(self, curve):
message = "no approximation found: %s" % curve
super().__init__(message)
self.curve = curve
class UnequalZipLengthsError(Error):
pass
class IncompatibleGlyphsError(Error):
def __init__(self, glyphs):
assert len(glyphs) > 1
self.glyphs = glyphs
names = set(repr(g.name) for g in glyphs)
if len(names) > 1:
self.combined_name = "{%s}" % ", ".join(sorted(names))
else:
self.combined_name = names.pop()
def __repr__(self):
return "<%s %s>" % (type(self).__name__, self.combined_name)
class IncompatibleSegmentNumberError(IncompatibleGlyphsError):
def __str__(self):
return "Glyphs named %s have different number of segments" % (
self.combined_name
)
class IncompatibleSegmentTypesError(IncompatibleGlyphsError):
def __init__(self, glyphs, segments):
IncompatibleGlyphsError.__init__(self, glyphs)
self.segments = segments
def __str__(self):
lines = []
ndigits = len(str(max(self.segments)))
for i, tags in sorted(self.segments.items()):
lines.append(
"%s: (%s)" % (str(i).rjust(ndigits), ", ".join(repr(t) for t in tags))
)
return "Glyphs named %s have incompatible segment types:\n %s" % (
self.combined_name,
"\n ".join(lines),
)
class IncompatibleFontsError(Error):
def __init__(self, glyph_errors):
self.glyph_errors = glyph_errors
def __str__(self):
return "fonts contains incompatible glyphs: %s" % (
", ".join(repr(g) for g in sorted(self.glyph_errors.keys()))
)
PK aZZZ<T�. . fontTools/cu2qu/ufo.py# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts cubic bezier curves to quadratic splines.
Conversion is performed such that the quadratic splines keep the same end-curve
tangents as the original cubics. The approach is iterative, increasing the
number of segments for a spline until the error gets below a bound.
Respective curves from multiple fonts will be converted at once to ensure that
the resulting splines are interpolation-compatible.
"""
import logging
from fontTools.pens.basePen import AbstractPen
from fontTools.pens.pointPen import PointToSegmentPen
from fontTools.pens.reverseContourPen import ReverseContourPen
from . import curves_to_quadratic
from .errors import (
UnequalZipLengthsError,
IncompatibleSegmentNumberError,
IncompatibleSegmentTypesError,
IncompatibleGlyphsError,
IncompatibleFontsError,
)
__all__ = ["fonts_to_quadratic", "font_to_quadratic"]
# The default approximation error below is a relative value (1/1000 of the EM square).
# Later on, we convert it to absolute font units by multiplying it by a font's UPEM
# (see fonts_to_quadratic).
DEFAULT_MAX_ERR = 0.001
CURVE_TYPE_LIB_KEY = "com.github.googlei18n.cu2qu.curve_type"
logger = logging.getLogger(__name__)
_zip = zip
def zip(*args):
"""Ensure each argument to zip has the same length. Also make sure a list is
returned for python 2/3 compatibility.
"""
if len(set(len(a) for a in args)) != 1:
raise UnequalZipLengthsError(*args)
return list(_zip(*args))
class GetSegmentsPen(AbstractPen):
"""Pen to collect segments into lists of points for conversion.
Curves always include their initial on-curve point, so some points are
duplicated between segments.
"""
def __init__(self):
self._last_pt = None
self.segments = []
def _add_segment(self, tag, *args):
if tag in ["move", "line", "qcurve", "curve"]:
self._last_pt = args[-1]
self.segments.append((tag, args))
def moveTo(self, pt):
self._add_segment("move", pt)
def lineTo(self, pt):
self._add_segment("line", pt)
def qCurveTo(self, *points):
self._add_segment("qcurve", self._last_pt, *points)
def curveTo(self, *points):
self._add_segment("curve", self._last_pt, *points)
def closePath(self):
self._add_segment("close")
def endPath(self):
self._add_segment("end")
def addComponent(self, glyphName, transformation):
pass
def _get_segments(glyph):
"""Get a glyph's segments as extracted by GetSegmentsPen."""
pen = GetSegmentsPen()
# glyph.draw(pen)
# We can't simply draw the glyph with the pen, but we must initialize the
# PointToSegmentPen explicitly with outputImpliedClosingLine=True.
# By default PointToSegmentPen does not outputImpliedClosingLine -- unless
# last and first point on closed contour are duplicated. Because we are
# converting multiple glyphs at the same time, we want to make sure
# this function returns the same number of segments, whether or not
# the last and first point overlap.
# https://github.com/googlefonts/fontmake/issues/572
# https://github.com/fonttools/fonttools/pull/1720
pointPen = PointToSegmentPen(pen, outputImpliedClosingLine=True)
glyph.drawPoints(pointPen)
return pen.segments
def _set_segments(glyph, segments, reverse_direction):
"""Draw segments as extracted by GetSegmentsPen back to a glyph."""
glyph.clearContours()
pen = glyph.getPen()
if reverse_direction:
pen = ReverseContourPen(pen)
for tag, args in segments:
if tag == "move":
pen.moveTo(*args)
elif tag == "line":
pen.lineTo(*args)
elif tag == "curve":
pen.curveTo(*args[1:])
elif tag == "qcurve":
pen.qCurveTo(*args[1:])
elif tag == "close":
pen.closePath()
elif tag == "end":
pen.endPath()
else:
raise AssertionError('Unhandled segment type "%s"' % tag)
def _segments_to_quadratic(segments, max_err, stats, all_quadratic=True):
"""Return quadratic approximations of cubic segments."""
assert all(s[0] == "curve" for s in segments), "Non-cubic given to convert"
new_points = curves_to_quadratic([s[1] for s in segments], max_err, all_quadratic)
n = len(new_points[0])
assert all(len(s) == n for s in new_points[1:]), "Converted incompatibly"
spline_length = str(n - 2)
stats[spline_length] = stats.get(spline_length, 0) + 1
if all_quadratic or n == 3:
return [("qcurve", p) for p in new_points]
else:
return [("curve", p) for p in new_points]
def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats, all_quadratic=True):
"""Do the actual conversion of a set of compatible glyphs, after arguments
have been set up.
Return True if the glyphs were modified, else return False.
"""
try:
segments_by_location = zip(*[_get_segments(g) for g in glyphs])
except UnequalZipLengthsError:
raise IncompatibleSegmentNumberError(glyphs)
if not any(segments_by_location):
return False
# always modify input glyphs if reverse_direction is True
glyphs_modified = reverse_direction
new_segments_by_location = []
incompatible = {}
for i, segments in enumerate(segments_by_location):
tag = segments[0][0]
if not all(s[0] == tag for s in segments[1:]):
incompatible[i] = [s[0] for s in segments]
elif tag == "curve":
new_segments = _segments_to_quadratic(
segments, max_err, stats, all_quadratic
)
if all_quadratic or new_segments != segments:
glyphs_modified = True
segments = new_segments
new_segments_by_location.append(segments)
if glyphs_modified:
new_segments_by_glyph = zip(*new_segments_by_location)
for glyph, new_segments in zip(glyphs, new_segments_by_glyph):
_set_segments(glyph, new_segments, reverse_direction)
if incompatible:
raise IncompatibleSegmentTypesError(glyphs, segments=incompatible)
return glyphs_modified
def glyphs_to_quadratic(
glyphs, max_err=None, reverse_direction=False, stats=None, all_quadratic=True
):
"""Convert the curves of a set of compatible of glyphs to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation
compatibility. If this is not required, calling glyphs_to_quadratic with one
glyph at a time may yield slightly more optimized results.
Return True if glyphs were modified, else return False.
Raises IncompatibleGlyphsError if glyphs have non-interpolatable outlines.
"""
if stats is None:
stats = {}
if not max_err:
# assume 1000 is the default UPEM
max_err = DEFAULT_MAX_ERR * 1000
if isinstance(max_err, (list, tuple)):
max_errors = max_err
else:
max_errors = [max_err] * len(glyphs)
assert len(max_errors) == len(glyphs)
return _glyphs_to_quadratic(
glyphs, max_errors, reverse_direction, stats, all_quadratic
)
def fonts_to_quadratic(
fonts,
max_err_em=None,
max_err=None,
reverse_direction=False,
stats=None,
dump_stats=False,
remember_curve_type=True,
all_quadratic=True,
):
"""Convert the curves of a collection of fonts to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation
compatibility. If this is not required, calling fonts_to_quadratic with one
font at a time may yield slightly more optimized results.
Return the set of modified glyph names if any, else return an empty set.
By default, cu2qu stores the curve type in the fonts' lib, under a private
key "com.github.googlei18n.cu2qu.curve_type", and will not try to convert
them again if the curve type is already set to "quadratic".
Setting 'remember_curve_type' to False disables this optimization.
Raises IncompatibleFontsError if same-named glyphs from different fonts
have non-interpolatable outlines.
"""
if remember_curve_type:
curve_types = {f.lib.get(CURVE_TYPE_LIB_KEY, "cubic") for f in fonts}
if len(curve_types) == 1:
curve_type = next(iter(curve_types))
if curve_type in ("quadratic", "mixed"):
logger.info("Curves already converted to quadratic")
return False
elif curve_type == "cubic":
pass # keep converting
else:
raise NotImplementedError(curve_type)
elif len(curve_types) > 1:
# going to crash later if they do differ
logger.warning("fonts may contain different curve types")
if stats is None:
stats = {}
if max_err_em and max_err:
raise TypeError("Only one of max_err and max_err_em can be specified.")
if not (max_err_em or max_err):
max_err_em = DEFAULT_MAX_ERR
if isinstance(max_err, (list, tuple)):
assert len(max_err) == len(fonts)
max_errors = max_err
elif max_err:
max_errors = [max_err] * len(fonts)
if isinstance(max_err_em, (list, tuple)):
assert len(fonts) == len(max_err_em)
max_errors = [f.info.unitsPerEm * e for f, e in zip(fonts, max_err_em)]
elif max_err_em:
max_errors = [f.info.unitsPerEm * max_err_em for f in fonts]
modified = set()
glyph_errors = {}
for name in set().union(*(f.keys() for f in fonts)):
glyphs = []
cur_max_errors = []
for font, error in zip(fonts, max_errors):
if name in font:
glyphs.append(font[name])
cur_max_errors.append(error)
try:
if _glyphs_to_quadratic(
glyphs, cur_max_errors, reverse_direction, stats, all_quadratic
):
modified.add(name)
except IncompatibleGlyphsError as exc:
logger.error(exc)
glyph_errors[name] = exc
if glyph_errors:
raise IncompatibleFontsError(glyph_errors)
if modified and dump_stats:
spline_lengths = sorted(stats.keys())
logger.info(
"New spline lengths: %s"
% (", ".join("%s: %d" % (l, stats[l]) for l in spline_lengths))
)
if remember_curve_type:
for font in fonts:
curve_type = font.lib.get(CURVE_TYPE_LIB_KEY, "cubic")
new_curve_type = "quadratic" if all_quadratic else "mixed"
if curve_type != new_curve_type:
font.lib[CURVE_TYPE_LIB_KEY] = new_curve_type
return modified
def glyph_to_quadratic(glyph, **kwargs):
"""Convenience wrapper around glyphs_to_quadratic, for just one glyph.
Return True if the glyph was modified, else return False.
"""
return glyphs_to_quadratic([glyph], **kwargs)
def font_to_quadratic(font, **kwargs):
"""Convenience wrapper around fonts_to_quadratic, for just one font.
Return the set of modified glyph names if any, else return empty set.
"""
return fonts_to_quadratic([font], **kwargs)
PK aZZZ�vl��� �� $ fontTools/designspaceLib/__init__.pyfrom __future__ import annotations
import collections
import copy
import itertools
import math
import os
import posixpath
from io import BytesIO, StringIO
from textwrap import indent
from typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union, cast
from fontTools.misc import etree as ET
from fontTools.misc import plistlib
from fontTools.misc.loggingTools import LogMixin
from fontTools.misc.textTools import tobytes, tostr
"""
designSpaceDocument
- read and write designspace files
"""
__all__ = [
"AxisDescriptor",
"AxisLabelDescriptor",
"AxisMappingDescriptor",
"BaseDocReader",
"BaseDocWriter",
"DesignSpaceDocument",
"DesignSpaceDocumentError",
"DiscreteAxisDescriptor",
"InstanceDescriptor",
"LocationLabelDescriptor",
"RangeAxisSubsetDescriptor",
"RuleDescriptor",
"SourceDescriptor",
"ValueAxisSubsetDescriptor",
"VariableFontDescriptor",
]
# ElementTree allows to find namespace-prefixed elements, but not attributes
# so we have to do it ourselves for 'xml:lang'
XML_NS = "{http://www.w3.org/XML/1998/namespace}"
XML_LANG = XML_NS + "lang"
def posix(path):
"""Normalize paths using forward slash to work also on Windows."""
new_path = posixpath.join(*path.split(os.path.sep))
if path.startswith("/"):
# The above transformation loses absolute paths
new_path = "/" + new_path
elif path.startswith(r"\\"):
# The above transformation loses leading slashes of UNC path mounts
new_path = "//" + new_path
return new_path
def posixpath_property(private_name):
"""Generate a propery that holds a path always using forward slashes."""
def getter(self):
# Normal getter
return getattr(self, private_name)
def setter(self, value):
# The setter rewrites paths using forward slashes
if value is not None:
value = posix(value)
setattr(self, private_name, value)
return property(getter, setter)
class DesignSpaceDocumentError(Exception):
def __init__(self, msg, obj=None):
self.msg = msg
self.obj = obj
def __str__(self):
return str(self.msg) + (": %r" % self.obj if self.obj is not None else "")
class AsDictMixin(object):
def asdict(self):
d = {}
for attr, value in self.__dict__.items():
if attr.startswith("_"):
continue
if hasattr(value, "asdict"):
value = value.asdict()
elif isinstance(value, list):
value = [v.asdict() if hasattr(v, "asdict") else v for v in value]
d[attr] = value
return d
class SimpleDescriptor(AsDictMixin):
"""Containers for a bunch of attributes"""
# XXX this is ugly. The 'print' is inappropriate here, and instead of
# assert, it should simply return True/False
def compare(self, other):
# test if this object contains the same data as the other
for attr in self._attrs:
try:
assert getattr(self, attr) == getattr(other, attr)
except AssertionError:
print(
"failed attribute",
attr,
getattr(self, attr),
"!=",
getattr(other, attr),
)
def __repr__(self):
attrs = [f"{a}={repr(getattr(self, a))}," for a in self._attrs]
attrs = indent("\n".join(attrs), " ")
return f"{self.__class__.__name__}(\n{attrs}\n)"
class SourceDescriptor(SimpleDescriptor):
"""Simple container for data related to the source
.. code:: python
doc = DesignSpaceDocument()
s1 = SourceDescriptor()
s1.path = masterPath1
s1.name = "master.ufo1"
s1.font = defcon.Font("master.ufo1")
s1.location = dict(weight=0)
s1.familyName = "MasterFamilyName"
s1.styleName = "MasterStyleNameOne"
s1.localisedFamilyName = dict(fr="Caractère")
s1.mutedGlyphNames.append("A")
s1.mutedGlyphNames.append("Z")
doc.addSource(s1)
"""
flavor = "source"
_attrs = [
"filename",
"path",
"name",
"layerName",
"location",
"copyLib",
"copyGroups",
"copyFeatures",
"muteKerning",
"muteInfo",
"mutedGlyphNames",
"familyName",
"styleName",
"localisedFamilyName",
]
filename = posixpath_property("_filename")
path = posixpath_property("_path")
def __init__(
self,
*,
filename=None,
path=None,
font=None,
name=None,
location=None,
designLocation=None,
layerName=None,
familyName=None,
styleName=None,
localisedFamilyName=None,
copyLib=False,
copyInfo=False,
copyGroups=False,
copyFeatures=False,
muteKerning=False,
muteInfo=False,
mutedGlyphNames=None,
):
self.filename = filename
"""string. A relative path to the source file, **as it is in the document**.
MutatorMath + VarLib.
"""
self.path = path
"""The absolute path, calculated from filename."""
self.font = font
"""Any Python object. Optional. Points to a representation of this
source font that is loaded in memory, as a Python object (e.g. a
``defcon.Font`` or a ``fontTools.ttFont.TTFont``).
The default document reader will not fill-in this attribute, and the
default writer will not use this attribute. It is up to the user of
``designspaceLib`` to either load the resource identified by
``filename`` and store it in this field, or write the contents of
this field to the disk and make ```filename`` point to that.
"""
self.name = name
"""string. Optional. Unique identifier name for this source.
MutatorMath + varLib.
"""
self.designLocation = (
designLocation if designLocation is not None else location or {}
)
"""dict. Axis values for this source, in design space coordinates.
MutatorMath + varLib.
This may be only part of the full design location.
See :meth:`getFullDesignLocation()`
.. versionadded:: 5.0
"""
self.layerName = layerName
"""string. The name of the layer in the source to look for
outline data. Default ``None`` which means ``foreground``.
"""
self.familyName = familyName
"""string. Family name of this source. Though this data
can be extracted from the font, it can be efficient to have it right
here.
varLib.
"""
self.styleName = styleName
"""string. Style name of this source. Though this data
can be extracted from the font, it can be efficient to have it right
here.
varLib.
"""
self.localisedFamilyName = localisedFamilyName or {}
"""dict. A dictionary of localised family name strings, keyed by
language code.
If present, will be used to build localized names for all instances.
.. versionadded:: 5.0
"""
self.copyLib = copyLib
"""bool. Indicates if the contents of the font.lib need to
be copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyInfo = copyInfo
"""bool. Indicates if the non-interpolating font.info needs
to be copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyGroups = copyGroups
"""bool. Indicates if the groups need to be copied to the
instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyFeatures = copyFeatures
"""bool. Indicates if the feature text needs to be
copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.muteKerning = muteKerning
"""bool. Indicates if the kerning data from this source
needs to be muted (i.e. not be part of the calculations).
MutatorMath only.
"""
self.muteInfo = muteInfo
"""bool. Indicated if the interpolating font.info data for
this source needs to be muted.
MutatorMath only.
"""
self.mutedGlyphNames = mutedGlyphNames or []
"""list. Glyphnames that need to be muted in the
instances.
MutatorMath only.
"""
@property
def location(self):
"""dict. Axis values for this source, in design space coordinates.
MutatorMath + varLib.
.. deprecated:: 5.0
Use the more explicit alias for this property :attr:`designLocation`.
"""
return self.designLocation
@location.setter
def location(self, location: Optional[SimpleLocationDict]):
self.designLocation = location or {}
def setFamilyName(self, familyName, languageCode="en"):
"""Setter for :attr:`localisedFamilyName`
.. versionadded:: 5.0
"""
self.localisedFamilyName[languageCode] = tostr(familyName)
def getFamilyName(self, languageCode="en"):
"""Getter for :attr:`localisedFamilyName`
.. versionadded:: 5.0
"""
return self.localisedFamilyName.get(languageCode)
def getFullDesignLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict:
"""Get the complete design location of this source, from its
:attr:`designLocation` and the document's axis defaults.
.. versionadded:: 5.0
"""
result: SimpleLocationDict = {}
for axis in doc.axes:
if axis.name in self.designLocation:
result[axis.name] = self.designLocation[axis.name]
else:
result[axis.name] = axis.map_forward(axis.default)
return result
class RuleDescriptor(SimpleDescriptor):
"""Represents the rule descriptor element: a set of glyph substitutions to
trigger conditionally in some parts of the designspace.
.. code:: python
r1 = RuleDescriptor()
r1.name = "unique.rule.name"
r1.conditionSets.append([dict(name="weight", minimum=-10, maximum=10), dict(...)])
r1.conditionSets.append([dict(...), dict(...)])
r1.subs.append(("a", "a.alt"))
.. code:: xml
<!-- optional: list of substitution rules -->
<rules>
<rule name="vertical.bars">
<conditionset>
<condition minimum="250.000000" maximum="750.000000" name="weight"/>
<condition minimum="100" name="width"/>
<condition minimum="10" maximum="40" name="optical"/>
</conditionset>
<sub name="cent" with="cent.alt"/>
<sub name="dollar" with="dollar.alt"/>
</rule>
</rules>
"""
_attrs = ["name", "conditionSets", "subs"] # what do we need here
def __init__(self, *, name=None, conditionSets=None, subs=None):
self.name = name
"""string. Unique name for this rule. Can be used to reference this rule data."""
# list of lists of dict(name='aaaa', minimum=0, maximum=1000)
self.conditionSets = conditionSets or []
"""a list of conditionsets.
- Each conditionset is a list of conditions.
- Each condition is a dict with ``name``, ``minimum`` and ``maximum`` keys.
"""
# list of substitutions stored as tuples of glyphnames ("a", "a.alt")
self.subs = subs or []
"""list of substitutions.
- Each substitution is stored as tuples of glyphnames, e.g. ("a", "a.alt").
- Note: By default, rules are applied first, before other text
shaping/OpenType layout, as they are part of the
`Required Variation Alternates OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_.
See ref:`rules-element` § Attributes.
"""
def evaluateRule(rule, location):
"""Return True if any of the rule's conditionsets matches the given location."""
return any(evaluateConditions(c, location) for c in rule.conditionSets)
def evaluateConditions(conditions, location):
"""Return True if all the conditions matches the given location.
- If a condition has no minimum, check for < maximum.
- If a condition has no maximum, check for > minimum.
"""
for cd in conditions:
value = location[cd["name"]]
if cd.get("minimum") is None:
if value > cd["maximum"]:
return False
elif cd.get("maximum") is None:
if cd["minimum"] > value:
return False
elif not cd["minimum"] <= value <= cd["maximum"]:
return False
return True
def processRules(rules, location, glyphNames):
"""Apply these rules at this location to these glyphnames.
Return a new list of glyphNames with substitutions applied.
- rule order matters
"""
newNames = []
for rule in rules:
if evaluateRule(rule, location):
for name in glyphNames:
swap = False
for a, b in rule.subs:
if name == a:
swap = True
break
if swap:
newNames.append(b)
else:
newNames.append(name)
glyphNames = newNames
newNames = []
return glyphNames
AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]]
SimpleLocationDict = Dict[str, float]
class AxisMappingDescriptor(SimpleDescriptor):
"""Represents the axis mapping element: mapping an input location
to an output location in the designspace.
.. code:: python
m1 = AxisMappingDescriptor()
m1.inputLocation = {"weight": 900, "width": 150}
m1.outputLocation = {"weight": 870}
.. code:: xml
<mappings>
<mapping>
<input>
<dimension name="weight" xvalue="900"/>
<dimension name="width" xvalue="150"/>
</input>
<output>
<dimension name="weight" xvalue="870"/>
</output>
</mapping>
</mappings>
"""
_attrs = ["inputLocation", "outputLocation"]
def __init__(
self,
*,
inputLocation=None,
outputLocation=None,
description=None,
groupDescription=None,
):
self.inputLocation: SimpleLocationDict = inputLocation or {}
"""dict. Axis values for the input of the mapping, in design space coordinates.
varLib.
.. versionadded:: 5.1
"""
self.outputLocation: SimpleLocationDict = outputLocation or {}
"""dict. Axis values for the output of the mapping, in design space coordinates.
varLib.
.. versionadded:: 5.1
"""
self.description = description
"""string. A description of the mapping.
varLib.
.. versionadded:: 5.2
"""
self.groupDescription = groupDescription
"""string. A description of the group of mappings.
varLib.
.. versionadded:: 5.2
"""
class InstanceDescriptor(SimpleDescriptor):
"""Simple container for data related to the instance
.. code:: python
i2 = InstanceDescriptor()
i2.path = instancePath2
i2.familyName = "InstanceFamilyName"
i2.styleName = "InstanceStyleName"
i2.name = "instance.ufo2"
# anisotropic location
i2.designLocation = dict(weight=500, width=(400,300))
i2.postScriptFontName = "InstancePostscriptName"
i2.styleMapFamilyName = "InstanceStyleMapFamilyName"
i2.styleMapStyleName = "InstanceStyleMapStyleName"
i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever'
doc.addInstance(i2)
"""
flavor = "instance"
_defaultLanguageCode = "en"
_attrs = [
"filename",
"path",
"name",
"locationLabel",
"designLocation",
"userLocation",
"familyName",
"styleName",
"postScriptFontName",
"styleMapFamilyName",
"styleMapStyleName",
"localisedFamilyName",
"localisedStyleName",
"localisedStyleMapFamilyName",
"localisedStyleMapStyleName",
"glyphs",
"kerning",
"info",
"lib",
]
filename = posixpath_property("_filename")
path = posixpath_property("_path")
def __init__(
self,
*,
filename=None,
path=None,
font=None,
name=None,
location=None,
locationLabel=None,
designLocation=None,
userLocation=None,
familyName=None,
styleName=None,
postScriptFontName=None,
styleMapFamilyName=None,
styleMapStyleName=None,
localisedFamilyName=None,
localisedStyleName=None,
localisedStyleMapFamilyName=None,
localisedStyleMapStyleName=None,
glyphs=None,
kerning=True,
info=True,
lib=None,
):
self.filename = filename
"""string. Relative path to the instance file, **as it is
in the document**. The file may or may not exist.
MutatorMath + VarLib.
"""
self.path = path
"""string. Absolute path to the instance file, calculated from
the document path and the string in the filename attr. The file may
or may not exist.
MutatorMath.
"""
self.font = font
"""Same as :attr:`SourceDescriptor.font`
.. seealso:: :attr:`SourceDescriptor.font`
"""
self.name = name
"""string. Unique identifier name of the instance, used to
identify it if it needs to be referenced from elsewhere in the
document.
"""
self.locationLabel = locationLabel
"""Name of a :class:`LocationLabelDescriptor`. If
provided, the instance should have the same location as the
LocationLabel.
.. seealso::
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.designLocation: AnisotropicLocationDict = (
designLocation if designLocation is not None else (location or {})
)
"""dict. Axis values for this instance, in design space coordinates.
MutatorMath + varLib.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.userLocation: SimpleLocationDict = userLocation or {}
"""dict. Axis values for this instance, in user space coordinates.
MutatorMath + varLib.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.familyName = familyName
"""string. Family name of this instance.
MutatorMath + varLib.
"""
self.styleName = styleName
"""string. Style name of this instance.
MutatorMath + varLib.
"""
self.postScriptFontName = postScriptFontName
"""string. Postscript fontname for this instance.
MutatorMath + varLib.
"""
self.styleMapFamilyName = styleMapFamilyName
"""string. StyleMap familyname for this instance.
MutatorMath + varLib.
"""
self.styleMapStyleName = styleMapStyleName
"""string. StyleMap stylename for this instance.
MutatorMath + varLib.
"""
self.localisedFamilyName = localisedFamilyName or {}
"""dict. A dictionary of localised family name
strings, keyed by language code.
"""
self.localisedStyleName = localisedStyleName or {}
"""dict. A dictionary of localised stylename
strings, keyed by language code.
"""
self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {}
"""A dictionary of localised style map
familyname strings, keyed by language code.
"""
self.localisedStyleMapStyleName = localisedStyleMapStyleName or {}
"""A dictionary of localised style map
stylename strings, keyed by language code.
"""
self.glyphs = glyphs or {}
"""dict for special master definitions for glyphs. If glyphs
need special masters (to record the results of executed rules for
example).
MutatorMath.
.. deprecated:: 5.0
Use rules or sparse sources instead.
"""
self.kerning = kerning
""" bool. Indicates if this instance needs its kerning
calculated.
MutatorMath.
.. deprecated:: 5.0
"""
self.info = info
"""bool. Indicated if this instance needs the interpolating
font.info calculated.
.. deprecated:: 5.0
"""
self.lib = lib or {}
"""Custom data associated with this instance."""
@property
def location(self):
"""dict. Axis values for this instance.
MutatorMath + varLib.
.. deprecated:: 5.0
Use the more explicit alias for this property :attr:`designLocation`.
"""
return self.designLocation
@location.setter
def location(self, location: Optional[AnisotropicLocationDict]):
self.designLocation = location or {}
def setStyleName(self, styleName, languageCode="en"):
"""These methods give easier access to the localised names."""
self.localisedStyleName[languageCode] = tostr(styleName)
def getStyleName(self, languageCode="en"):
return self.localisedStyleName.get(languageCode)
def setFamilyName(self, familyName, languageCode="en"):
self.localisedFamilyName[languageCode] = tostr(familyName)
def getFamilyName(self, languageCode="en"):
return self.localisedFamilyName.get(languageCode)
def setStyleMapStyleName(self, styleMapStyleName, languageCode="en"):
self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName)
def getStyleMapStyleName(self, languageCode="en"):
return self.localisedStyleMapStyleName.get(languageCode)
def setStyleMapFamilyName(self, styleMapFamilyName, languageCode="en"):
self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName)
def getStyleMapFamilyName(self, languageCode="en"):
return self.localisedStyleMapFamilyName.get(languageCode)
def clearLocation(self, axisName: Optional[str] = None):
"""Clear all location-related fields. Ensures that
:attr:``designLocation`` and :attr:``userLocation`` are dictionaries
(possibly empty if clearing everything).
In order to update the location of this instance wholesale, a user
should first clear all the fields, then change the field(s) for which
they have data.
.. code:: python
instance.clearLocation()
instance.designLocation = {'Weight': (34, 36.5), 'Width': 100}
instance.userLocation = {'Opsz': 16}
In order to update a single axis location, the user should only clear
that axis, then edit the values:
.. code:: python
instance.clearLocation('Weight')
instance.designLocation['Weight'] = (34, 36.5)
Args:
axisName: if provided, only clear the location for that axis.
.. versionadded:: 5.0
"""
self.locationLabel = None
if axisName is None:
self.designLocation = {}
self.userLocation = {}
else:
if self.designLocation is None:
self.designLocation = {}
if axisName in self.designLocation:
del self.designLocation[axisName]
if self.userLocation is None:
self.userLocation = {}
if axisName in self.userLocation:
del self.userLocation[axisName]
def getLocationLabelDescriptor(
self, doc: "DesignSpaceDocument"
) -> Optional[LocationLabelDescriptor]:
"""Get the :class:`LocationLabelDescriptor` instance that matches
this instances's :attr:`locationLabel`.
Raises if the named label can't be found.
.. versionadded:: 5.0
"""
if self.locationLabel is None:
return None
label = doc.getLocationLabel(self.locationLabel)
if label is None:
raise DesignSpaceDocumentError(
"InstanceDescriptor.getLocationLabelDescriptor(): "
f"unknown location label `{self.locationLabel}` in instance `{self.name}`."
)
return label
def getFullDesignLocation(
self, doc: "DesignSpaceDocument"
) -> AnisotropicLocationDict:
"""Get the complete design location of this instance, by combining data
from the various location fields, default axis values and mappings, and
top-level location labels.
The source of truth for this instance's location is determined for each
axis independently by taking the first not-None field in this list:
- ``locationLabel``: the location along this axis is the same as the
matching STAT format 4 label. No anisotropy.
- ``designLocation[axisName]``: the explicit design location along this
axis, possibly anisotropic.
- ``userLocation[axisName]``: the explicit user location along this
axis. No anisotropy.
- ``axis.default``: default axis value. No anisotropy.
.. versionadded:: 5.0
"""
label = self.getLocationLabelDescriptor(doc)
if label is not None:
return doc.map_forward(label.userLocation) # type: ignore
result: AnisotropicLocationDict = {}
for axis in doc.axes:
if axis.name in self.designLocation:
result[axis.name] = self.designLocation[axis.name]
elif axis.name in self.userLocation:
result[axis.name] = axis.map_forward(self.userLocation[axis.name])
else:
result[axis.name] = axis.map_forward(axis.default)
return result
def getFullUserLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict:
"""Get the complete user location for this instance.
.. seealso:: :meth:`getFullDesignLocation`
.. versionadded:: 5.0
"""
return doc.map_backward(self.getFullDesignLocation(doc))
def tagForAxisName(name):
# try to find or make a tag name for this axis name
names = {
"weight": ("wght", dict(en="Weight")),
"width": ("wdth", dict(en="Width")),
"optical": ("opsz", dict(en="Optical Size")),
"slant": ("slnt", dict(en="Slant")),
"italic": ("ital", dict(en="Italic")),
}
if name.lower() in names:
return names[name.lower()]
if len(name) < 4:
tag = name + "*" * (4 - len(name))
else:
tag = name[:4]
return tag, dict(en=name)
class AbstractAxisDescriptor(SimpleDescriptor):
flavor = "axis"
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
# opentype tag for this axis
self.tag = tag
"""string. Four letter tag for this axis. Some might be
registered at the `OpenType
specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__.
Privately-defined axis tags must begin with an uppercase letter and
use only uppercase letters or digits.
"""
# name of the axis used in locations
self.name = name
"""string. Name of the axis as it is used in the location dicts.
MutatorMath + varLib.
"""
# names for UI purposes, if this is not a standard axis,
self.labelNames = labelNames or {}
"""dict. When defining a non-registered axis, it will be
necessary to define user-facing readable names for the axis. Keyed by
xml:lang code. Values are required to be ``unicode`` strings, even if
they only contain ASCII characters.
"""
self.hidden = hidden
"""bool. Whether this axis should be hidden in user interfaces.
"""
self.map = map or []
"""list of input / output values that can describe a warp of user space
to design space coordinates. If no map values are present, it is assumed
user space is the same as design space, as in [(minimum, minimum),
(maximum, maximum)].
varLib.
"""
self.axisOrdering = axisOrdering
"""STAT table field ``axisOrdering``.
See: `OTSpec STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_
.. versionadded:: 5.0
"""
self.axisLabels: List[AxisLabelDescriptor] = axisLabels or []
"""STAT table entries for Axis Value Tables format 1, 2, 3.
See: `OTSpec STAT Axis Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_
.. versionadded:: 5.0
"""
class AxisDescriptor(AbstractAxisDescriptor):
"""Simple container for the axis data.
Add more localisations?
.. code:: python
a1 = AxisDescriptor()
a1.minimum = 1
a1.maximum = 1000
a1.default = 400
a1.name = "weight"
a1.tag = "wght"
a1.labelNames['fa-IR'] = "قطر"
a1.labelNames['en'] = "Wéíght"
a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)]
a1.axisOrdering = 1
a1.axisLabels = [
AxisLabelDescriptor(name="Regular", userValue=400, elidable=True)
]
doc.addAxis(a1)
"""
_attrs = [
"tag",
"name",
"maximum",
"minimum",
"default",
"map",
"axisOrdering",
"axisLabels",
]
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
minimum=None,
default=None,
maximum=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
super().__init__(
tag=tag,
name=name,
labelNames=labelNames,
hidden=hidden,
map=map,
axisOrdering=axisOrdering,
axisLabels=axisLabels,
)
self.minimum = minimum
"""number. The minimum value for this axis in user space.
MutatorMath + varLib.
"""
self.maximum = maximum
"""number. The maximum value for this axis in user space.
MutatorMath + varLib.
"""
self.default = default
"""number. The default value for this axis, i.e. when a new location is
created, this is the value this axis will get in user space.
MutatorMath + varLib.
"""
def serialize(self):
# output to a dict, used in testing
return dict(
tag=self.tag,
name=self.name,
labelNames=self.labelNames,
maximum=self.maximum,
minimum=self.minimum,
default=self.default,
hidden=self.hidden,
map=self.map,
axisOrdering=self.axisOrdering,
axisLabels=self.axisLabels,
)
def map_forward(self, v):
"""Maps value from axis mapping's input (user) to output (design)."""
from fontTools.varLib.models import piecewiseLinearMap
if not self.map:
return v
return piecewiseLinearMap(v, {k: v for k, v in self.map})
def map_backward(self, v):
"""Maps value from axis mapping's output (design) to input (user)."""
from fontTools.varLib.models import piecewiseLinearMap
if isinstance(v, tuple):
v = v[0]
if not self.map:
return v
return piecewiseLinearMap(v, {v: k for k, v in self.map})
class DiscreteAxisDescriptor(AbstractAxisDescriptor):
"""Container for discrete axis data.
Use this for axes that do not interpolate. The main difference from a
continuous axis is that a continuous axis has a ``minimum`` and ``maximum``,
while a discrete axis has a list of ``values``.
Example: an Italic axis with 2 stops, Roman and Italic, that are not
compatible. The axis still allows to bind together the full font family,
which is useful for the STAT table, however it can't become a variation
axis in a VF.
.. code:: python
a2 = DiscreteAxisDescriptor()
a2.values = [0, 1]
a2.default = 0
a2.name = "Italic"
a2.tag = "ITAL"
a2.labelNames['fr'] = "Italique"
a2.map = [(0, 0), (1, -11)]
a2.axisOrdering = 2
a2.axisLabels = [
AxisLabelDescriptor(name="Roman", userValue=0, elidable=True)
]
doc.addAxis(a2)
.. versionadded:: 5.0
"""
flavor = "axis"
_attrs = ("tag", "name", "values", "default", "map", "axisOrdering", "axisLabels")
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
values=None,
default=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
super().__init__(
tag=tag,
name=name,
labelNames=labelNames,
hidden=hidden,
map=map,
axisOrdering=axisOrdering,
axisLabels=axisLabels,
)
self.default: float = default
"""The default value for this axis, i.e. when a new location is
created, this is the value this axis will get in user space.
However, this default value is less important than in continuous axes:
- it doesn't define the "neutral" version of outlines from which
deltas would apply, as this axis does not interpolate.
- it doesn't provide the reference glyph set for the designspace, as
fonts at each value can have different glyph sets.
"""
self.values: List[float] = values or []
"""List of possible values for this axis. Contrary to continuous axes,
only the values in this list can be taken by the axis, nothing in-between.
"""
def map_forward(self, value):
"""Maps value from axis mapping's input to output.
Returns value unchanged if no mapping entry is found.
Note: for discrete axes, each value must have its mapping entry, if
you intend that value to be mapped.
"""
return next((v for k, v in self.map if k == value), value)
def map_backward(self, value):
"""Maps value from axis mapping's output to input.
Returns value unchanged if no mapping entry is found.
Note: for discrete axes, each value must have its mapping entry, if
you intend that value to be mapped.
"""
if isinstance(value, tuple):
value = value[0]
return next((k for k, v in self.map if v == value), value)
class AxisLabelDescriptor(SimpleDescriptor):
"""Container for axis label data.
Analogue of OpenType's STAT data for a single axis (formats 1, 2 and 3).
All values are user values.
See: `OTSpec STAT Axis value table, format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_
The STAT format of the Axis value depends on which field are filled-in,
see :meth:`getFormat`
.. versionadded:: 5.0
"""
flavor = "label"
_attrs = (
"userMinimum",
"userValue",
"userMaximum",
"name",
"elidable",
"olderSibling",
"linkedUserValue",
"labelNames",
)
def __init__(
self,
*,
name,
userValue,
userMinimum=None,
userMaximum=None,
elidable=False,
olderSibling=False,
linkedUserValue=None,
labelNames=None,
):
self.userMinimum: Optional[float] = userMinimum
"""STAT field ``rangeMinValue`` (format 2)."""
self.userValue: float = userValue
"""STAT field ``value`` (format 1, 3) or ``nominalValue`` (format 2)."""
self.userMaximum: Optional[float] = userMaximum
"""STAT field ``rangeMaxValue`` (format 2)."""
self.name: str = name
"""Label for this axis location, STAT field ``valueNameID``."""
self.elidable: bool = elidable
"""STAT flag ``ELIDABLE_AXIS_VALUE_NAME``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.olderSibling: bool = olderSibling
"""STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.linkedUserValue: Optional[float] = linkedUserValue
"""STAT field ``linkedValue`` (format 3)."""
self.labelNames: MutableMapping[str, str] = labelNames or {}
"""User-facing translations of this location's label. Keyed by
``xml:lang`` code.
"""
def getFormat(self) -> int:
"""Determine which format of STAT Axis value to use to encode this label.
=========== ========= =========== =========== ===============
STAT Format userValue userMinimum userMaximum linkedUserValue
=========== ========= =========== =========== ===============
1 ✅ ❌ ❌ ❌
2 ✅ ✅ ✅ ❌
3 ✅ ❌ ❌ ✅
=========== ========= =========== =========== ===============
"""
if self.linkedUserValue is not None:
return 3
if self.userMinimum is not None or self.userMaximum is not None:
return 2
return 1
@property
def defaultName(self) -> str:
"""Return the English name from :attr:`labelNames` or the :attr:`name`."""
return self.labelNames.get("en") or self.name
class LocationLabelDescriptor(SimpleDescriptor):
"""Container for location label data.
Analogue of OpenType's STAT data for a free-floating location (format 4).
All values are user values.
See: `OTSpec STAT Axis value table, format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_
.. versionadded:: 5.0
"""
flavor = "label"
_attrs = ("name", "elidable", "olderSibling", "userLocation", "labelNames")
def __init__(
self,
*,
name,
userLocation,
elidable=False,
olderSibling=False,
labelNames=None,
):
self.name: str = name
"""Label for this named location, STAT field ``valueNameID``."""
self.userLocation: SimpleLocationDict = userLocation or {}
"""Location in user coordinates along each axis.
If an axis is not mentioned, it is assumed to be at its default location.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullUserLocation`
"""
self.elidable: bool = elidable
"""STAT flag ``ELIDABLE_AXIS_VALUE_NAME``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.olderSibling: bool = olderSibling
"""STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.labelNames: Dict[str, str] = labelNames or {}
"""User-facing translations of this location's label. Keyed by
xml:lang code.
"""
@property
def defaultName(self) -> str:
"""Return the English name from :attr:`labelNames` or the :attr:`name`."""
return self.labelNames.get("en") or self.name
def getFullUserLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict:
"""Get the complete user location of this label, by combining data
from the explicit user location and default axis values.
.. versionadded:: 5.0
"""
return {
axis.name: self.userLocation.get(axis.name, axis.default)
for axis in doc.axes
}
class VariableFontDescriptor(SimpleDescriptor):
"""Container for variable fonts, sub-spaces of the Designspace.
Use-cases:
- From a single DesignSpace with discrete axes, define 1 variable font
per value on the discrete axes. Before version 5, you would have needed
1 DesignSpace per such variable font, and a lot of data duplication.
- From a big variable font with many axes, define subsets of that variable
font that only include some axes and freeze other axes at a given location.
.. versionadded:: 5.0
"""
flavor = "variable-font"
_attrs = ("filename", "axisSubsets", "lib")
filename = posixpath_property("_filename")
def __init__(self, *, name, filename=None, axisSubsets=None, lib=None):
self.name: str = name
"""string, required. Name of this variable to identify it during the
build process and from other parts of the document, and also as a
filename in case the filename property is empty.
VarLib.
"""
self.filename: str = filename
"""string, optional. Relative path to the variable font file, **as it is
in the document**. The file may or may not exist.
If not specified, the :attr:`name` will be used as a basename for the file.
"""
self.axisSubsets: List[
Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]
] = (axisSubsets or [])
"""Axis subsets to include in this variable font.
If an axis is not mentioned, assume that we only want the default
location of that axis (same as a :class:`ValueAxisSubsetDescriptor`).
"""
self.lib: MutableMapping[str, Any] = lib or {}
"""Custom data associated with this variable font."""
class RangeAxisSubsetDescriptor(SimpleDescriptor):
"""Subset of a continuous axis to include in a variable font.
.. versionadded:: 5.0
"""
flavor = "axis-subset"
_attrs = ("name", "userMinimum", "userDefault", "userMaximum")
def __init__(
self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf
):
self.name: str = name
"""Name of the :class:`AxisDescriptor` to subset."""
self.userMinimum: float = userMinimum
"""New minimum value of the axis in the target variable font.
If not specified, assume the same minimum value as the full axis.
(default = ``-math.inf``)
"""
self.userDefault: Optional[float] = userDefault
"""New default value of the axis in the target variable font.
If not specified, assume the same default value as the full axis.
(default = ``None``)
"""
self.userMaximum: float = userMaximum
"""New maximum value of the axis in the target variable font.
If not specified, assume the same maximum value as the full axis.
(default = ``math.inf``)
"""
class ValueAxisSubsetDescriptor(SimpleDescriptor):
"""Single value of a discrete or continuous axis to use in a variable font.
.. versionadded:: 5.0
"""
flavor = "axis-subset"
_attrs = ("name", "userValue")
def __init__(self, *, name, userValue):
self.name: str = name
"""Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor`
to "snapshot" or "freeze".
"""
self.userValue: float = userValue
"""Value in user coordinates at which to freeze the given axis."""
class BaseDocWriter(object):
_whiteSpace = " "
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
axisMappingDescriptorClass = AxisMappingDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
variableFontDescriptorClass = VariableFontDescriptor
valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor
rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor
instanceDescriptorClass = InstanceDescriptor
@classmethod
def getAxisDecriptor(cls):
return cls.axisDescriptorClass()
@classmethod
def getAxisMappingDescriptor(cls):
return cls.axisMappingDescriptorClass()
@classmethod
def getSourceDescriptor(cls):
return cls.sourceDescriptorClass()
@classmethod
def getInstanceDescriptor(cls):
return cls.instanceDescriptorClass()
@classmethod
def getRuleDescriptor(cls):
return cls.ruleDescriptorClass()
def __init__(self, documentPath, documentObject: DesignSpaceDocument):
self.path = documentPath
self.documentObject = documentObject
self.effectiveFormatTuple = self._getEffectiveFormatTuple()
self.root = ET.Element("designspace")
def write(self, pretty=True, encoding="UTF-8", xml_declaration=True):
self.root.attrib["format"] = ".".join(str(i) for i in self.effectiveFormatTuple)
if (
self.documentObject.axes
or self.documentObject.axisMappings
or self.documentObject.elidedFallbackName is not None
):
axesElement = ET.Element("axes")
if self.documentObject.elidedFallbackName is not None:
axesElement.attrib["elidedfallbackname"] = (
self.documentObject.elidedFallbackName
)
self.root.append(axesElement)
for axisObject in self.documentObject.axes:
self._addAxis(axisObject)
if self.documentObject.axisMappings:
mappingsElement = None
lastGroup = object()
for mappingObject in self.documentObject.axisMappings:
if getattr(mappingObject, "groupDescription", None) != lastGroup:
if mappingsElement is not None:
self.root.findall(".axes")[0].append(mappingsElement)
lastGroup = getattr(mappingObject, "groupDescription", None)
mappingsElement = ET.Element("mappings")
if lastGroup is not None:
mappingsElement.attrib["description"] = lastGroup
self._addAxisMapping(mappingsElement, mappingObject)
if mappingsElement is not None:
self.root.findall(".axes")[0].append(mappingsElement)
if self.documentObject.locationLabels:
labelsElement = ET.Element("labels")
for labelObject in self.documentObject.locationLabels:
self._addLocationLabel(labelsElement, labelObject)
self.root.append(labelsElement)
if self.documentObject.rules:
if getattr(self.documentObject, "rulesProcessingLast", False):
attributes = {"processing": "last"}
else:
attributes = {}
self.root.append(ET.Element("rules", attributes))
for ruleObject in self.documentObject.rules:
self._addRule(ruleObject)
if self.documentObject.sources:
self.root.append(ET.Element("sources"))
for sourceObject in self.documentObject.sources:
self._addSource(sourceObject)
if self.documentObject.variableFonts:
variableFontsElement = ET.Element("variable-fonts")
for variableFont in self.documentObject.variableFonts:
self._addVariableFont(variableFontsElement, variableFont)
self.root.append(variableFontsElement)
if self.documentObject.instances:
self.root.append(ET.Element("instances"))
for instanceObject in self.documentObject.instances:
self._addInstance(instanceObject)
if self.documentObject.lib:
self._addLib(self.root, self.documentObject.lib, 2)
tree = ET.ElementTree(self.root)
tree.write(
self.path,
encoding=encoding,
method="xml",
xml_declaration=xml_declaration,
pretty_print=pretty,
)
def _getEffectiveFormatTuple(self):
"""Try to use the version specified in the document, or a sufficiently
recent version to be able to encode what the document contains.
"""
minVersion = self.documentObject.formatTuple
if (
any(
hasattr(axis, "values")
or axis.axisOrdering is not None
or axis.axisLabels
for axis in self.documentObject.axes
)
or self.documentObject.locationLabels
or any(source.localisedFamilyName for source in self.documentObject.sources)
or self.documentObject.variableFonts
or any(
instance.locationLabel or instance.userLocation
for instance in self.documentObject.instances
)
):
if minVersion < (5, 0):
minVersion = (5, 0)
if self.documentObject.axisMappings:
if minVersion < (5, 1):
minVersion = (5, 1)
return minVersion
def _makeLocationElement(self, locationObject, name=None):
"""Convert Location dict to a locationElement."""
locElement = ET.Element("location")
if name is not None:
locElement.attrib["name"] = name
validatedLocation = self.documentObject.newDefaultLocation()
for axisName, axisValue in locationObject.items():
if axisName in validatedLocation:
# only accept values we know
validatedLocation[axisName] = axisValue
for dimensionName, dimensionValue in validatedLocation.items():
dimElement = ET.Element("dimension")
dimElement.attrib["name"] = dimensionName
if type(dimensionValue) == tuple:
dimElement.attrib["xvalue"] = self.intOrFloat(dimensionValue[0])
dimElement.attrib["yvalue"] = self.intOrFloat(dimensionValue[1])
else:
dimElement.attrib["xvalue"] = self.intOrFloat(dimensionValue)
locElement.append(dimElement)
return locElement, validatedLocation
def intOrFloat(self, num):
if int(num) == num:
return "%d" % num
return ("%f" % num).rstrip("0").rstrip(".")
def _addRule(self, ruleObject):
# if none of the conditions have minimum or maximum values, do not add the rule.
ruleElement = ET.Element("rule")
if ruleObject.name is not None:
ruleElement.attrib["name"] = ruleObject.name
for conditions in ruleObject.conditionSets:
conditionsetElement = ET.Element("conditionset")
for cond in conditions:
if cond.get("minimum") is None and cond.get("maximum") is None:
# neither is defined, don't add this condition
continue
conditionElement = ET.Element("condition")
conditionElement.attrib["name"] = cond.get("name")
if cond.get("minimum") is not None:
conditionElement.attrib["minimum"] = self.intOrFloat(
cond.get("minimum")
)
if cond.get("maximum") is not None:
conditionElement.attrib["maximum"] = self.intOrFloat(
cond.get("maximum")
)
conditionsetElement.append(conditionElement)
if len(conditionsetElement):
ruleElement.append(conditionsetElement)
for sub in ruleObject.subs:
subElement = ET.Element("sub")
subElement.attrib["name"] = sub[0]
subElement.attrib["with"] = sub[1]
ruleElement.append(subElement)
if len(ruleElement):
self.root.findall(".rules")[0].append(ruleElement)
def _addAxis(self, axisObject):
axisElement = ET.Element("axis")
axisElement.attrib["tag"] = axisObject.tag
axisElement.attrib["name"] = axisObject.name
self._addLabelNames(axisElement, axisObject.labelNames)
if axisObject.map:
for inputValue, outputValue in axisObject.map:
mapElement = ET.Element("map")
mapElement.attrib["input"] = self.intOrFloat(inputValue)
mapElement.attrib["output"] = self.intOrFloat(outputValue)
axisElement.append(mapElement)
if axisObject.axisOrdering or axisObject.axisLabels:
labelsElement = ET.Element("labels")
if axisObject.axisOrdering is not None:
labelsElement.attrib["ordering"] = str(axisObject.axisOrdering)
for label in axisObject.axisLabels:
self._addAxisLabel(labelsElement, label)
axisElement.append(labelsElement)
if hasattr(axisObject, "minimum"):
axisElement.attrib["minimum"] = self.intOrFloat(axisObject.minimum)
axisElement.attrib["maximum"] = self.intOrFloat(axisObject.maximum)
elif hasattr(axisObject, "values"):
axisElement.attrib["values"] = " ".join(
self.intOrFloat(v) for v in axisObject.values
)
axisElement.attrib["default"] = self.intOrFloat(axisObject.default)
if axisObject.hidden:
axisElement.attrib["hidden"] = "1"
self.root.findall(".axes")[0].append(axisElement)
def _addAxisMapping(self, mappingsElement, mappingObject):
mappingElement = ET.Element("mapping")
if getattr(mappingObject, "description", None) is not None:
mappingElement.attrib["description"] = mappingObject.description
for what in ("inputLocation", "outputLocation"):
whatObject = getattr(mappingObject, what, None)
if whatObject is None:
continue
whatElement = ET.Element(what[:-8])
mappingElement.append(whatElement)
for name, value in whatObject.items():
dimensionElement = ET.Element("dimension")
dimensionElement.attrib["name"] = name
dimensionElement.attrib["xvalue"] = self.intOrFloat(value)
whatElement.append(dimensionElement)
mappingsElement.append(mappingElement)
def _addAxisLabel(
self, axisElement: ET.Element, label: AxisLabelDescriptor
) -> None:
labelElement = ET.Element("label")
labelElement.attrib["uservalue"] = self.intOrFloat(label.userValue)
if label.userMinimum is not None:
labelElement.attrib["userminimum"] = self.intOrFloat(label.userMinimum)
if label.userMaximum is not None:
labelElement.attrib["usermaximum"] = self.intOrFloat(label.userMaximum)
labelElement.attrib["name"] = label.name
if label.elidable:
labelElement.attrib["elidable"] = "true"
if label.olderSibling:
labelElement.attrib["oldersibling"] = "true"
if label.linkedUserValue is not None:
labelElement.attrib["linkeduservalue"] = self.intOrFloat(
label.linkedUserValue
)
self._addLabelNames(labelElement, label.labelNames)
axisElement.append(labelElement)
def _addLabelNames(self, parentElement, labelNames):
for languageCode, labelName in sorted(labelNames.items()):
languageElement = ET.Element("labelname")
languageElement.attrib[XML_LANG] = languageCode
languageElement.text = labelName
parentElement.append(languageElement)
def _addLocationLabel(
self, parentElement: ET.Element, label: LocationLabelDescriptor
) -> None:
labelElement = ET.Element("label")
labelElement.attrib["name"] = label.name
if label.elidable:
labelElement.attrib["elidable"] = "true"
if label.olderSibling:
labelElement.attrib["oldersibling"] = "true"
self._addLabelNames(labelElement, label.labelNames)
self._addLocationElement(labelElement, userLocation=label.userLocation)
parentElement.append(labelElement)
def _addLocationElement(
self,
parentElement,
*,
designLocation: AnisotropicLocationDict = None,
userLocation: SimpleLocationDict = None,
):
locElement = ET.Element("location")
for axis in self.documentObject.axes:
if designLocation is not None and axis.name in designLocation:
dimElement = ET.Element("dimension")
dimElement.attrib["name"] = axis.name
value = designLocation[axis.name]
if isinstance(value, tuple):
dimElement.attrib["xvalue"] = self.intOrFloat(value[0])
dimElement.attrib["yvalue"] = self.intOrFloat(value[1])
else:
dimElement.attrib["xvalue"] = self.intOrFloat(value)
locElement.append(dimElement)
elif userLocation is not None and axis.name in userLocation:
dimElement = ET.Element("dimension")
dimElement.attrib["name"] = axis.name
value = userLocation[axis.name]
dimElement.attrib["uservalue"] = self.intOrFloat(value)
locElement.append(dimElement)
if len(locElement) > 0:
parentElement.append(locElement)
def _addInstance(self, instanceObject):
instanceElement = ET.Element("instance")
if instanceObject.name is not None:
instanceElement.attrib["name"] = instanceObject.name
if instanceObject.locationLabel is not None:
instanceElement.attrib["location"] = instanceObject.locationLabel
if instanceObject.familyName is not None:
instanceElement.attrib["familyname"] = instanceObject.familyName
if instanceObject.styleName is not None:
instanceElement.attrib["stylename"] = instanceObject.styleName
# add localisations
if instanceObject.localisedStyleName:
languageCodes = list(instanceObject.localisedStyleName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedStyleNameElement = ET.Element("stylename")
localisedStyleNameElement.attrib[XML_LANG] = code
localisedStyleNameElement.text = instanceObject.getStyleName(code)
instanceElement.append(localisedStyleNameElement)
if instanceObject.localisedFamilyName:
languageCodes = list(instanceObject.localisedFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedFamilyNameElement = ET.Element("familyname")
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = instanceObject.getFamilyName(code)
instanceElement.append(localisedFamilyNameElement)
if instanceObject.localisedStyleMapStyleName:
languageCodes = list(instanceObject.localisedStyleMapStyleName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue
localisedStyleMapStyleNameElement = ET.Element("stylemapstylename")
localisedStyleMapStyleNameElement.attrib[XML_LANG] = code
localisedStyleMapStyleNameElement.text = (
instanceObject.getStyleMapStyleName(code)
)
instanceElement.append(localisedStyleMapStyleNameElement)
if instanceObject.localisedStyleMapFamilyName:
languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue
localisedStyleMapFamilyNameElement = ET.Element("stylemapfamilyname")
localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code
localisedStyleMapFamilyNameElement.text = (
instanceObject.getStyleMapFamilyName(code)
)
instanceElement.append(localisedStyleMapFamilyNameElement)
if self.effectiveFormatTuple >= (5, 0):
if instanceObject.locationLabel is None:
self._addLocationElement(
instanceElement,
designLocation=instanceObject.designLocation,
userLocation=instanceObject.userLocation,
)
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
if instanceObject.location is not None:
locationElement, instanceObject.location = self._makeLocationElement(
instanceObject.location
)
instanceElement.append(locationElement)
if instanceObject.filename is not None:
instanceElement.attrib["filename"] = instanceObject.filename
if instanceObject.postScriptFontName is not None:
instanceElement.attrib["postscriptfontname"] = (
instanceObject.postScriptFontName
)
if instanceObject.styleMapFamilyName is not None:
instanceElement.attrib["stylemapfamilyname"] = (
instanceObject.styleMapFamilyName
)
if instanceObject.styleMapStyleName is not None:
instanceElement.attrib["stylemapstylename"] = (
instanceObject.styleMapStyleName
)
if self.effectiveFormatTuple < (5, 0):
# Deprecated members as of version 5.0
if instanceObject.glyphs:
if instanceElement.findall(".glyphs") == []:
glyphsElement = ET.Element("glyphs")
instanceElement.append(glyphsElement)
glyphsElement = instanceElement.findall(".glyphs")[0]
for glyphName, data in sorted(instanceObject.glyphs.items()):
glyphElement = self._writeGlyphElement(
instanceElement, instanceObject, glyphName, data
)
glyphsElement.append(glyphElement)
if instanceObject.kerning:
kerningElement = ET.Element("kerning")
instanceElement.append(kerningElement)
if instanceObject.info:
infoElement = ET.Element("info")
instanceElement.append(infoElement)
self._addLib(instanceElement, instanceObject.lib, 4)
self.root.findall(".instances")[0].append(instanceElement)
def _addSource(self, sourceObject):
sourceElement = ET.Element("source")
if sourceObject.filename is not None:
sourceElement.attrib["filename"] = sourceObject.filename
if sourceObject.name is not None:
if sourceObject.name.find("temp_master") != 0:
# do not save temporary source names
sourceElement.attrib["name"] = sourceObject.name
if sourceObject.familyName is not None:
sourceElement.attrib["familyname"] = sourceObject.familyName
if sourceObject.styleName is not None:
sourceElement.attrib["stylename"] = sourceObject.styleName
if sourceObject.layerName is not None:
sourceElement.attrib["layer"] = sourceObject.layerName
if sourceObject.localisedFamilyName:
languageCodes = list(sourceObject.localisedFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedFamilyNameElement = ET.Element("familyname")
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = sourceObject.getFamilyName(code)
sourceElement.append(localisedFamilyNameElement)
if sourceObject.copyLib:
libElement = ET.Element("lib")
libElement.attrib["copy"] = "1"
sourceElement.append(libElement)
if sourceObject.copyGroups:
groupsElement = ET.Element("groups")
groupsElement.attrib["copy"] = "1"
sourceElement.append(groupsElement)
if sourceObject.copyFeatures:
featuresElement = ET.Element("features")
featuresElement.attrib["copy"] = "1"
sourceElement.append(featuresElement)
if sourceObject.copyInfo or sourceObject.muteInfo:
infoElement = ET.Element("info")
if sourceObject.copyInfo:
infoElement.attrib["copy"] = "1"
if sourceObject.muteInfo:
infoElement.attrib["mute"] = "1"
sourceElement.append(infoElement)
if sourceObject.muteKerning:
kerningElement = ET.Element("kerning")
kerningElement.attrib["mute"] = "1"
sourceElement.append(kerningElement)
if sourceObject.mutedGlyphNames:
for name in sourceObject.mutedGlyphNames:
glyphElement = ET.Element("glyph")
glyphElement.attrib["name"] = name
glyphElement.attrib["mute"] = "1"
sourceElement.append(glyphElement)
if self.effectiveFormatTuple >= (5, 0):
self._addLocationElement(
sourceElement, designLocation=sourceObject.location
)
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
locationElement, sourceObject.location = self._makeLocationElement(
sourceObject.location
)
sourceElement.append(locationElement)
self.root.findall(".sources")[0].append(sourceElement)
def _addVariableFont(
self, parentElement: ET.Element, vf: VariableFontDescriptor
) -> None:
vfElement = ET.Element("variable-font")
vfElement.attrib["name"] = vf.name
if vf.filename is not None:
vfElement.attrib["filename"] = vf.filename
if vf.axisSubsets:
subsetsElement = ET.Element("axis-subsets")
for subset in vf.axisSubsets:
subsetElement = ET.Element("axis-subset")
subsetElement.attrib["name"] = subset.name
# Mypy doesn't support narrowing union types via hasattr()
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
# TODO(Python 3.10): use TypeGuard
if hasattr(subset, "userMinimum"):
subset = cast(RangeAxisSubsetDescriptor, subset)
if subset.userMinimum != -math.inf:
subsetElement.attrib["userminimum"] = self.intOrFloat(
subset.userMinimum
)
if subset.userMaximum != math.inf:
subsetElement.attrib["usermaximum"] = self.intOrFloat(
subset.userMaximum
)
if subset.userDefault is not None:
subsetElement.attrib["userdefault"] = self.intOrFloat(
subset.userDefault
)
elif hasattr(subset, "userValue"):
subset = cast(ValueAxisSubsetDescriptor, subset)
subsetElement.attrib["uservalue"] = self.intOrFloat(
subset.userValue
)
subsetsElement.append(subsetElement)
vfElement.append(subsetsElement)
self._addLib(vfElement, vf.lib, 4)
parentElement.append(vfElement)
def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int) -> None:
if not data:
return
libElement = ET.Element("lib")
libElement.append(plistlib.totree(data, indent_level=indent_level))
parentElement.append(libElement)
def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data):
glyphElement = ET.Element("glyph")
if data.get("mute"):
glyphElement.attrib["mute"] = "1"
if data.get("unicodes") is not None:
glyphElement.attrib["unicode"] = " ".join(
[hex(u) for u in data.get("unicodes")]
)
if data.get("instanceLocation") is not None:
locationElement, data["instanceLocation"] = self._makeLocationElement(
data.get("instanceLocation")
)
glyphElement.append(locationElement)
if glyphName is not None:
glyphElement.attrib["name"] = glyphName
if data.get("note") is not None:
noteElement = ET.Element("note")
noteElement.text = data.get("note")
glyphElement.append(noteElement)
if data.get("masters") is not None:
mastersElement = ET.Element("masters")
for m in data.get("masters"):
masterElement = ET.Element("master")
if m.get("glyphName") is not None:
masterElement.attrib["glyphname"] = m.get("glyphName")
if m.get("font") is not None:
masterElement.attrib["source"] = m.get("font")
if m.get("location") is not None:
locationElement, m["location"] = self._makeLocationElement(
m.get("location")
)
masterElement.append(locationElement)
mastersElement.append(masterElement)
glyphElement.append(mastersElement)
return glyphElement
class BaseDocReader(LogMixin):
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
axisMappingDescriptorClass = AxisMappingDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
variableFontsDescriptorClass = VariableFontDescriptor
valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor
rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor
instanceDescriptorClass = InstanceDescriptor
def __init__(self, documentPath, documentObject):
self.path = documentPath
self.documentObject = documentObject
tree = ET.parse(self.path)
self.root = tree.getroot()
self.documentObject.formatVersion = self.root.attrib.get("format", "3.0")
self._axes = []
self.rules = []
self.sources = []
self.instances = []
self.axisDefaults = {}
self._strictAxisNames = True
@classmethod
def fromstring(cls, string, documentObject):
f = BytesIO(tobytes(string, encoding="utf-8"))
self = cls(f, documentObject)
self.path = None
return self
def read(self):
self.readAxes()
self.readLabels()
self.readRules()
self.readVariableFonts()
self.readSources()
self.readInstances()
self.readLib()
def readRules(self):
# we also need to read any conditions that are outside of a condition set.
rules = []
rulesElement = self.root.find(".rules")
if rulesElement is not None:
processingValue = rulesElement.attrib.get("processing", "first")
if processingValue not in {"first", "last"}:
raise DesignSpaceDocumentError(
"<rules> processing attribute value is not valid: %r, "
"expected 'first' or 'last'" % processingValue
)
self.documentObject.rulesProcessingLast = processingValue == "last"
for ruleElement in self.root.findall(".rules/rule"):
ruleObject = self.ruleDescriptorClass()
ruleName = ruleObject.name = ruleElement.attrib.get("name")
# read any stray conditions outside a condition set
externalConditions = self._readConditionElements(
ruleElement,
ruleName,
)
if externalConditions:
ruleObject.conditionSets.append(externalConditions)
self.log.info(
"Found stray rule conditions outside a conditionset. "
"Wrapped them in a new conditionset."
)
# read the conditionsets
for conditionSetElement in ruleElement.findall(".conditionset"):
conditionSet = self._readConditionElements(
conditionSetElement,
ruleName,
)
if conditionSet is not None:
ruleObject.conditionSets.append(conditionSet)
for subElement in ruleElement.findall(".sub"):
a = subElement.attrib["name"]
b = subElement.attrib["with"]
ruleObject.subs.append((a, b))
rules.append(ruleObject)
self.documentObject.rules = rules
def _readConditionElements(self, parentElement, ruleName=None):
cds = []
for conditionElement in parentElement.findall(".condition"):
cd = {}
cdMin = conditionElement.attrib.get("minimum")
if cdMin is not None:
cd["minimum"] = float(cdMin)
else:
# will allow these to be None, assume axis.minimum
cd["minimum"] = None
cdMax = conditionElement.attrib.get("maximum")
if cdMax is not None:
cd["maximum"] = float(cdMax)
else:
# will allow these to be None, assume axis.maximum
cd["maximum"] = None
cd["name"] = conditionElement.attrib.get("name")
# # test for things
if cd.get("minimum") is None and cd.get("maximum") is None:
raise DesignSpaceDocumentError(
"condition missing required minimum or maximum in rule"
+ (" '%s'" % ruleName if ruleName is not None else "")
)
cds.append(cd)
return cds
def readAxes(self):
# read the axes elements, including the warp map.
axesElement = self.root.find(".axes")
if axesElement is not None and "elidedfallbackname" in axesElement.attrib:
self.documentObject.elidedFallbackName = axesElement.attrib[
"elidedfallbackname"
]
axisElements = self.root.findall(".axes/axis")
if not axisElements:
return
for axisElement in axisElements:
if (
self.documentObject.formatTuple >= (5, 0)
and "values" in axisElement.attrib
):
axisObject = self.discreteAxisDescriptorClass()
axisObject.values = [
float(s) for s in axisElement.attrib["values"].split(" ")
]
else:
axisObject = self.axisDescriptorClass()
axisObject.minimum = float(axisElement.attrib.get("minimum"))
axisObject.maximum = float(axisElement.attrib.get("maximum"))
axisObject.default = float(axisElement.attrib.get("default"))
axisObject.name = axisElement.attrib.get("name")
if axisElement.attrib.get("hidden", False):
axisObject.hidden = True
axisObject.tag = axisElement.attrib.get("tag")
for mapElement in axisElement.findall("map"):
a = float(mapElement.attrib["input"])
b = float(mapElement.attrib["output"])
axisObject.map.append((a, b))
for labelNameElement in axisElement.findall("labelname"):
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
for key, lang in labelNameElement.items():
if key == XML_LANG:
axisObject.labelNames[lang] = tostr(labelNameElement.text)
labelElement = axisElement.find(".labels")
if labelElement is not None:
if "ordering" in labelElement.attrib:
axisObject.axisOrdering = int(labelElement.attrib["ordering"])
for label in labelElement.findall(".label"):
axisObject.axisLabels.append(self.readAxisLabel(label))
self.documentObject.axes.append(axisObject)
self.axisDefaults[axisObject.name] = axisObject.default
self.documentObject.axisMappings = []
for mappingsElement in self.root.findall(".axes/mappings"):
groupDescription = mappingsElement.attrib.get("description")
for mappingElement in mappingsElement.findall("mapping"):
description = mappingElement.attrib.get("description")
inputElement = mappingElement.find("input")
outputElement = mappingElement.find("output")
inputLoc = {}
outputLoc = {}
for dimElement in inputElement.findall(".dimension"):
name = dimElement.attrib["name"]
value = float(dimElement.attrib["xvalue"])
inputLoc[name] = value
for dimElement in outputElement.findall(".dimension"):
name = dimElement.attrib["name"]
value = float(dimElement.attrib["xvalue"])
outputLoc[name] = value
axisMappingObject = self.axisMappingDescriptorClass(
inputLocation=inputLoc,
outputLocation=outputLoc,
description=description,
groupDescription=groupDescription,
)
self.documentObject.axisMappings.append(axisMappingObject)
def readAxisLabel(self, element: ET.Element):
xml_attrs = {
"userminimum",
"uservalue",
"usermaximum",
"name",
"elidable",
"oldersibling",
"linkeduservalue",
}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(
f"label element contains unknown attributes: {', '.join(unknown_attrs)}"
)
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError("label element must have a name attribute.")
valueStr = element.get("uservalue")
if valueStr is None:
raise DesignSpaceDocumentError(
"label element must have a uservalue attribute."
)
value = float(valueStr)
minimumStr = element.get("userminimum")
minimum = float(minimumStr) if minimumStr is not None else None
maximumStr = element.get("usermaximum")
maximum = float(maximumStr) if maximumStr is not None else None
linkedValueStr = element.get("linkeduservalue")
linkedValue = float(linkedValueStr) if linkedValueStr is not None else None
elidable = True if element.get("elidable") == "true" else False
olderSibling = True if element.get("oldersibling") == "true" else False
labelNames = {
lang: label_name.text or ""
for label_name in element.findall("labelname")
for attr, lang in label_name.items()
if attr == XML_LANG
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
}
return self.axisLabelDescriptorClass(
name=name,
userValue=value,
userMinimum=minimum,
userMaximum=maximum,
elidable=elidable,
olderSibling=olderSibling,
linkedUserValue=linkedValue,
labelNames=labelNames,
)
def readLabels(self):
if self.documentObject.formatTuple < (5, 0):
return
xml_attrs = {"name", "elidable", "oldersibling"}
for labelElement in self.root.findall(".labels/label"):
unknown_attrs = set(labelElement.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(
f"Label element contains unknown attributes: {', '.join(unknown_attrs)}"
)
name = labelElement.get("name")
if name is None:
raise DesignSpaceDocumentError(
"label element must have a name attribute."
)
designLocation, userLocation = self.locationFromElement(labelElement)
if designLocation:
raise DesignSpaceDocumentError(
f'<label> element "{name}" must only have user locations (using uservalue="").'
)
elidable = True if labelElement.get("elidable") == "true" else False
olderSibling = True if labelElement.get("oldersibling") == "true" else False
labelNames = {
lang: label_name.text or ""
for label_name in labelElement.findall("labelname")
for attr, lang in label_name.items()
if attr == XML_LANG
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
}
locationLabel = self.locationLabelDescriptorClass(
name=name,
userLocation=userLocation,
elidable=elidable,
olderSibling=olderSibling,
labelNames=labelNames,
)
self.documentObject.locationLabels.append(locationLabel)
def readVariableFonts(self):
if self.documentObject.formatTuple < (5, 0):
return
xml_attrs = {"name", "filename"}
for variableFontElement in self.root.findall(".variable-fonts/variable-font"):
unknown_attrs = set(variableFontElement.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(
f"variable-font element contains unknown attributes: {', '.join(unknown_attrs)}"
)
name = variableFontElement.get("name")
if name is None:
raise DesignSpaceDocumentError(
"variable-font element must have a name attribute."
)
filename = variableFontElement.get("filename")
axisSubsetsElement = variableFontElement.find(".axis-subsets")
if axisSubsetsElement is None:
raise DesignSpaceDocumentError(
"variable-font element must contain an axis-subsets element."
)
axisSubsets = []
for axisSubset in axisSubsetsElement.iterfind(".axis-subset"):
axisSubsets.append(self.readAxisSubset(axisSubset))
lib = None
libElement = variableFontElement.find(".lib")
if libElement is not None:
lib = plistlib.fromtree(libElement[0])
variableFont = self.variableFontsDescriptorClass(
name=name,
filename=filename,
axisSubsets=axisSubsets,
lib=lib,
)
self.documentObject.variableFonts.append(variableFont)
def readAxisSubset(self, element: ET.Element):
if "uservalue" in element.attrib:
xml_attrs = {"name", "uservalue"}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(
f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}"
)
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError(
"axis-subset element must have a name attribute."
)
userValueStr = element.get("uservalue")
if userValueStr is None:
raise DesignSpaceDocumentError(
"The axis-subset element for a discrete subset must have a uservalue attribute."
)
userValue = float(userValueStr)
return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue)
else:
xml_attrs = {"name", "userminimum", "userdefault", "usermaximum"}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(
f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}"
)
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError(
"axis-subset element must have a name attribute."
)
userMinimum = element.get("userminimum")
userDefault = element.get("userdefault")
userMaximum = element.get("usermaximum")
if (
userMinimum is not None
and userDefault is not None
and userMaximum is not None
):
return self.rangeAxisSubsetDescriptorClass(
name=name,
userMinimum=float(userMinimum),
userDefault=float(userDefault),
userMaximum=float(userMaximum),
)
if all(v is None for v in (userMinimum, userDefault, userMaximum)):
return self.rangeAxisSubsetDescriptorClass(name=name)
raise DesignSpaceDocumentError(
"axis-subset element must have min/max/default values or none at all."
)
def readSources(self):
for sourceCount, sourceElement in enumerate(
self.root.findall(".sources/source")
):
filename = sourceElement.attrib.get("filename")
if filename is not None and self.path is not None:
sourcePath = os.path.abspath(
os.path.join(os.path.dirname(self.path), filename)
)
else:
sourcePath = None
sourceName = sourceElement.attrib.get("name")
if sourceName is None:
# add a temporary source name
sourceName = "temp_master.%d" % (sourceCount)
sourceObject = self.sourceDescriptorClass()
sourceObject.path = sourcePath # absolute path to the ufo source
sourceObject.filename = filename # path as it is stored in the document
sourceObject.name = sourceName
familyName = sourceElement.attrib.get("familyname")
if familyName is not None:
sourceObject.familyName = familyName
styleName = sourceElement.attrib.get("stylename")
if styleName is not None:
sourceObject.styleName = styleName
for familyNameElement in sourceElement.findall("familyname"):
for key, lang in familyNameElement.items():
if key == XML_LANG:
familyName = familyNameElement.text
sourceObject.setFamilyName(familyName, lang)
designLocation, userLocation = self.locationFromElement(sourceElement)
if userLocation:
raise DesignSpaceDocumentError(
f'<source> element "{sourceName}" must only have design locations (using xvalue="").'
)
sourceObject.location = designLocation
layerName = sourceElement.attrib.get("layer")
if layerName is not None:
sourceObject.layerName = layerName
for libElement in sourceElement.findall(".lib"):
if libElement.attrib.get("copy") == "1":
sourceObject.copyLib = True
for groupsElement in sourceElement.findall(".groups"):
if groupsElement.attrib.get("copy") == "1":
sourceObject.copyGroups = True
for infoElement in sourceElement.findall(".info"):
if infoElement.attrib.get("copy") == "1":
sourceObject.copyInfo = True
if infoElement.attrib.get("mute") == "1":
sourceObject.muteInfo = True
for featuresElement in sourceElement.findall(".features"):
if featuresElement.attrib.get("copy") == "1":
sourceObject.copyFeatures = True
for glyphElement in sourceElement.findall(".glyph"):
glyphName = glyphElement.attrib.get("name")
if glyphName is None:
continue
if glyphElement.attrib.get("mute") == "1":
sourceObject.mutedGlyphNames.append(glyphName)
for kerningElement in sourceElement.findall(".kerning"):
if kerningElement.attrib.get("mute") == "1":
sourceObject.muteKerning = True
self.documentObject.sources.append(sourceObject)
def locationFromElement(self, element):
"""Read a nested ``<location>`` element inside the given ``element``.
.. versionchanged:: 5.0
Return a tuple of (designLocation, userLocation)
"""
elementLocation = (None, None)
for locationElement in element.findall(".location"):
elementLocation = self.readLocationElement(locationElement)
break
return elementLocation
def readLocationElement(self, locationElement):
"""Read a ``<location>`` element.
.. versionchanged:: 5.0
Return a tuple of (designLocation, userLocation)
"""
if self._strictAxisNames and not self.documentObject.axes:
raise DesignSpaceDocumentError("No axes defined")
userLoc = {}
designLoc = {}
for dimensionElement in locationElement.findall(".dimension"):
dimName = dimensionElement.attrib.get("name")
if self._strictAxisNames and dimName not in self.axisDefaults:
# In case the document contains no axis definitions,
self.log.warning('Location with undefined axis: "%s".', dimName)
continue
userValue = xValue = yValue = None
try:
userValue = dimensionElement.attrib.get("uservalue")
if userValue is not None:
userValue = float(userValue)
except ValueError:
self.log.warning(
"ValueError in readLocation userValue %3.3f", userValue
)
try:
xValue = dimensionElement.attrib.get("xvalue")
if xValue is not None:
xValue = float(xValue)
except ValueError:
self.log.warning("ValueError in readLocation xValue %3.3f", xValue)
try:
yValue = dimensionElement.attrib.get("yvalue")
if yValue is not None:
yValue = float(yValue)
except ValueError:
self.log.warning("ValueError in readLocation yValue %3.3f", yValue)
if userValue is None == xValue is None:
raise DesignSpaceDocumentError(
f'Exactly one of uservalue="" or xvalue="" must be provided for location dimension "{dimName}"'
)
if yValue is not None:
if xValue is None:
raise DesignSpaceDocumentError(
f'Missing xvalue="" for the location dimension "{dimName}"" with yvalue="{yValue}"'
)
designLoc[dimName] = (xValue, yValue)
elif xValue is not None:
designLoc[dimName] = xValue
else:
userLoc[dimName] = userValue
return designLoc, userLoc
def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True):
instanceElements = self.root.findall(".instances/instance")
for instanceElement in instanceElements:
self._readSingleInstanceElement(
instanceElement,
makeGlyphs=makeGlyphs,
makeKerning=makeKerning,
makeInfo=makeInfo,
)
def _readSingleInstanceElement(
self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True
):
filename = instanceElement.attrib.get("filename")
if filename is not None and self.documentObject.path is not None:
instancePath = os.path.join(
os.path.dirname(self.documentObject.path), filename
)
else:
instancePath = None
instanceObject = self.instanceDescriptorClass()
instanceObject.path = instancePath # absolute path to the instance
instanceObject.filename = filename # path as it is stored in the document
name = instanceElement.attrib.get("name")
if name is not None:
instanceObject.name = name
familyname = instanceElement.attrib.get("familyname")
if familyname is not None:
instanceObject.familyName = familyname
stylename = instanceElement.attrib.get("stylename")
if stylename is not None:
instanceObject.styleName = stylename
postScriptFontName = instanceElement.attrib.get("postscriptfontname")
if postScriptFontName is not None:
instanceObject.postScriptFontName = postScriptFontName
styleMapFamilyName = instanceElement.attrib.get("stylemapfamilyname")
if styleMapFamilyName is not None:
instanceObject.styleMapFamilyName = styleMapFamilyName
styleMapStyleName = instanceElement.attrib.get("stylemapstylename")
if styleMapStyleName is not None:
instanceObject.styleMapStyleName = styleMapStyleName
# read localised names
for styleNameElement in instanceElement.findall("stylename"):
for key, lang in styleNameElement.items():
if key == XML_LANG:
styleName = styleNameElement.text
instanceObject.setStyleName(styleName, lang)
for familyNameElement in instanceElement.findall("familyname"):
for key, lang in familyNameElement.items():
if key == XML_LANG:
familyName = familyNameElement.text
instanceObject.setFamilyName(familyName, lang)
for styleMapStyleNameElement in instanceElement.findall("stylemapstylename"):
for key, lang in styleMapStyleNameElement.items():
if key == XML_LANG:
styleMapStyleName = styleMapStyleNameElement.text
instanceObject.setStyleMapStyleName(styleMapStyleName, lang)
for styleMapFamilyNameElement in instanceElement.findall("stylemapfamilyname"):
for key, lang in styleMapFamilyNameElement.items():
if key == XML_LANG:
styleMapFamilyName = styleMapFamilyNameElement.text
instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang)
designLocation, userLocation = self.locationFromElement(instanceElement)
locationLabel = instanceElement.attrib.get("location")
if (designLocation or userLocation) and locationLabel is not None:
raise DesignSpaceDocumentError(
'instance element must have at most one of the location="..." attribute or the nested location element'
)
instanceObject.locationLabel = locationLabel
instanceObject.userLocation = userLocation or {}
instanceObject.designLocation = designLocation or {}
for glyphElement in instanceElement.findall(".glyphs/glyph"):
self.readGlyphElement(glyphElement, instanceObject)
for infoElement in instanceElement.findall("info"):
self.readInfoElement(infoElement, instanceObject)
for libElement in instanceElement.findall("lib"):
self.readLibElement(libElement, instanceObject)
self.documentObject.instances.append(instanceObject)
def readLibElement(self, libElement, instanceObject):
"""Read the lib element for the given instance."""
instanceObject.lib = plistlib.fromtree(libElement[0])
def readInfoElement(self, infoElement, instanceObject):
"""Read the info element."""
instanceObject.info = True
def readGlyphElement(self, glyphElement, instanceObject):
"""
Read the glyph element, which could look like either one of these:
.. code-block:: xml
<glyph name="b" unicode="0x62"/>
<glyph name="b"/>
<glyph name="b">
<master location="location-token-bbb" source="master-token-aaa2"/>
<master glyphname="b.alt1" location="location-token-ccc" source="master-token-aaa3"/>
<note>
This is an instance from an anisotropic interpolation.
</note>
</glyph>
"""
glyphData = {}
glyphName = glyphElement.attrib.get("name")
if glyphName is None:
raise DesignSpaceDocumentError("Glyph object without name attribute")
mute = glyphElement.attrib.get("mute")
if mute == "1":
glyphData["mute"] = True
# unicode
unicodes = glyphElement.attrib.get("unicode")
if unicodes is not None:
try:
unicodes = [int(u, 16) for u in unicodes.split(" ")]
glyphData["unicodes"] = unicodes
except ValueError:
raise DesignSpaceDocumentError(
"unicode values %s are not integers" % unicodes
)
for noteElement in glyphElement.findall(".note"):
glyphData["note"] = noteElement.text
break
designLocation, userLocation = self.locationFromElement(glyphElement)
if userLocation:
raise DesignSpaceDocumentError(
f'<glyph> element "{glyphName}" must only have design locations (using xvalue="").'
)
if designLocation is not None:
glyphData["instanceLocation"] = designLocation
glyphSources = None
for masterElement in glyphElement.findall(".masters/master"):
fontSourceName = masterElement.attrib.get("source")
designLocation, userLocation = self.locationFromElement(masterElement)
if userLocation:
raise DesignSpaceDocumentError(
f'<master> element "{fontSourceName}" must only have design locations (using xvalue="").'
)
masterGlyphName = masterElement.attrib.get("glyphname")
if masterGlyphName is None:
# if we don't read a glyphname, use the one we have
masterGlyphName = glyphName
d = dict(
font=fontSourceName, location=designLocation, glyphName=masterGlyphName
)
if glyphSources is None:
glyphSources = []
glyphSources.append(d)
if glyphSources is not None:
glyphData["masters"] = glyphSources
instanceObject.glyphs[glyphName] = glyphData
def readLib(self):
"""Read the lib element for the whole document."""
for libElement in self.root.findall(".lib"):
self.documentObject.lib = plistlib.fromtree(libElement[0])
class DesignSpaceDocument(LogMixin, AsDictMixin):
"""The DesignSpaceDocument object can read and write ``.designspace`` data.
It imports the axes, sources, variable fonts and instances to very basic
**descriptor** objects that store the data in attributes. Data is added to
the document by creating such descriptor objects, filling them with data
and then adding them to the document. This makes it easy to integrate this
object in different contexts.
The **DesignSpaceDocument** object can be subclassed to work with
different objects, as long as they have the same attributes. Reader and
Writer objects can be subclassed as well.
**Note:** Python attribute names are usually camelCased, the
corresponding `XML <document-xml-structure>`_ attributes are usually
all lowercase.
.. code:: python
from fontTools.designspaceLib import DesignSpaceDocument
doc = DesignSpaceDocument.fromfile("some/path/to/my.designspace")
doc.formatVersion
doc.elidedFallbackName
doc.axes
doc.axisMappings
doc.locationLabels
doc.rules
doc.rulesProcessingLast
doc.sources
doc.variableFonts
doc.instances
doc.lib
"""
def __init__(self, readerClass=None, writerClass=None):
self.path = None
"""String, optional. When the document is read from the disk, this is
the full path that was given to :meth:`read` or :meth:`fromfile`.
"""
self.filename = None
"""String, optional. When the document is read from the disk, this is
its original file name, i.e. the last part of its path.
When the document is produced by a Python script and still only exists
in memory, the producing script can write here an indication of a
possible "good" filename, in case one wants to save the file somewhere.
"""
self.formatVersion: Optional[str] = None
"""Format version for this document, as a string. E.g. "4.0" """
self.elidedFallbackName: Optional[str] = None
"""STAT Style Attributes Header field ``elidedFallbackNameID``.
See: `OTSpec STAT Style Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_
.. versionadded:: 5.0
"""
self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = []
"""List of this document's axes."""
self.axisMappings: List[AxisMappingDescriptor] = []
"""List of this document's axis mappings."""
self.locationLabels: List[LocationLabelDescriptor] = []
"""List of this document's STAT format 4 labels.
.. versionadded:: 5.0"""
self.rules: List[RuleDescriptor] = []
"""List of this document's rules."""
self.rulesProcessingLast: bool = False
"""This flag indicates whether the substitution rules should be applied
before or after other glyph substitution features.
- False: before
- True: after.
Default is False. For new projects, you probably want True. See
the following issues for more information:
`fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__
`fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__
If you want to use a different feature altogether, e.g. ``calt``,
use the lib key ``com.github.fonttools.varLib.featureVarsFeatureTag``
.. code:: xml
<lib>
<dict>
<key>com.github.fonttools.varLib.featureVarsFeatureTag</key>
<string>calt</string>
</dict>
</lib>
"""
self.sources: List[SourceDescriptor] = []
"""List of this document's sources."""
self.variableFonts: List[VariableFontDescriptor] = []
"""List of this document's variable fonts.
.. versionadded:: 5.0"""
self.instances: List[InstanceDescriptor] = []
"""List of this document's instances."""
self.lib: Dict = {}
"""User defined, custom data associated with the whole document.
Use reverse-DNS notation to identify your own data.
Respect the data stored by others.
"""
self.default: Optional[str] = None
"""Name of the default master.
This attribute is updated by the :meth:`findDefault`
"""
if readerClass is not None:
self.readerClass = readerClass
else:
self.readerClass = BaseDocReader
if writerClass is not None:
self.writerClass = writerClass
else:
self.writerClass = BaseDocWriter
@classmethod
def fromfile(cls, path, readerClass=None, writerClass=None):
"""Read a designspace file from ``path`` and return a new instance of
:class:.
"""
self = cls(readerClass=readerClass, writerClass=writerClass)
self.read(path)
return self
@classmethod
def fromstring(cls, string, readerClass=None, writerClass=None):
self = cls(readerClass=readerClass, writerClass=writerClass)
reader = self.readerClass.fromstring(string, self)
reader.read()
if self.sources:
self.findDefault()
return self
def tostring(self, encoding=None):
"""Returns the designspace as a string. Default encoding ``utf-8``."""
if encoding is str or (encoding is not None and encoding.lower() == "unicode"):
f = StringIO()
xml_declaration = False
elif encoding is None or encoding == "utf-8":
f = BytesIO()
encoding = "UTF-8"
xml_declaration = True
else:
raise ValueError("unsupported encoding: '%s'" % encoding)
writer = self.writerClass(f, self)
writer.write(encoding=encoding, xml_declaration=xml_declaration)
return f.getvalue()
def read(self, path):
"""Read a designspace file from ``path`` and populates the fields of
``self`` with the data.
"""
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
self.path = path
self.filename = os.path.basename(path)
reader = self.readerClass(path, self)
reader.read()
if self.sources:
self.findDefault()
def write(self, path):
"""Write this designspace to ``path``."""
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
self.path = path
self.filename = os.path.basename(path)
self.updatePaths()
writer = self.writerClass(path, self)
writer.write()
def _posixRelativePath(self, otherPath):
relative = os.path.relpath(otherPath, os.path.dirname(self.path))
return posix(relative)
def updatePaths(self):
"""
Right before we save we need to identify and respond to the following situations:
In each descriptor, we have to do the right thing for the filename attribute.
::
case 1.
descriptor.filename == None
descriptor.path == None
-- action:
write as is, descriptors will not have a filename attr.
useless, but no reason to interfere.
case 2.
descriptor.filename == "../something"
descriptor.path == None
-- action:
write as is. The filename attr should not be touched.
case 3.
descriptor.filename == None
descriptor.path == "~/absolute/path/there"
-- action:
calculate the relative path for filename.
We're not overwriting some other value for filename, it should be fine
case 4.
descriptor.filename == '../somewhere'
descriptor.path == "~/absolute/path/there"
-- action:
there is a conflict between the given filename, and the path.
So we know where the file is relative to the document.
Can't guess why they're different, we just choose for path to be correct and update filename.
"""
assert self.path is not None
for descriptor in self.sources + self.instances:
if descriptor.path is not None:
# case 3 and 4: filename gets updated and relativized
descriptor.filename = self._posixRelativePath(descriptor.path)
def addSource(self, sourceDescriptor: SourceDescriptor):
"""Add the given ``sourceDescriptor`` to ``doc.sources``."""
self.sources.append(sourceDescriptor)
def addSourceDescriptor(self, **kwargs):
"""Instantiate a new :class:`SourceDescriptor` using the given
``kwargs`` and add it to ``doc.sources``.
"""
source = self.writerClass.sourceDescriptorClass(**kwargs)
self.addSource(source)
return source
def addInstance(self, instanceDescriptor: InstanceDescriptor):
"""Add the given ``instanceDescriptor`` to :attr:`instances`."""
self.instances.append(instanceDescriptor)
def addInstanceDescriptor(self, **kwargs):
"""Instantiate a new :class:`InstanceDescriptor` using the given
``kwargs`` and add it to :attr:`instances`.
"""
instance = self.writerClass.instanceDescriptorClass(**kwargs)
self.addInstance(instance)
return instance
def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]):
"""Add the given ``axisDescriptor`` to :attr:`axes`."""
self.axes.append(axisDescriptor)
def addAxisDescriptor(self, **kwargs):
"""Instantiate a new :class:`AxisDescriptor` using the given
``kwargs`` and add it to :attr:`axes`.
The axis will be and instance of :class:`DiscreteAxisDescriptor` if
the ``kwargs`` provide a ``value``, or a :class:`AxisDescriptor` otherwise.
"""
if "values" in kwargs:
axis = self.writerClass.discreteAxisDescriptorClass(**kwargs)
else:
axis = self.writerClass.axisDescriptorClass(**kwargs)
self.addAxis(axis)
return axis
def addAxisMapping(self, axisMappingDescriptor: AxisMappingDescriptor):
"""Add the given ``axisMappingDescriptor`` to :attr:`axisMappings`."""
self.axisMappings.append(axisMappingDescriptor)
def addAxisMappingDescriptor(self, **kwargs):
"""Instantiate a new :class:`AxisMappingDescriptor` using the given
``kwargs`` and add it to :attr:`rules`.
"""
axisMapping = self.writerClass.axisMappingDescriptorClass(**kwargs)
self.addAxisMapping(axisMapping)
return axisMapping
def addRule(self, ruleDescriptor: RuleDescriptor):
"""Add the given ``ruleDescriptor`` to :attr:`rules`."""
self.rules.append(ruleDescriptor)
def addRuleDescriptor(self, **kwargs):
"""Instantiate a new :class:`RuleDescriptor` using the given
``kwargs`` and add it to :attr:`rules`.
"""
rule = self.writerClass.ruleDescriptorClass(**kwargs)
self.addRule(rule)
return rule
def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor):
"""Add the given ``variableFontDescriptor`` to :attr:`variableFonts`.
.. versionadded:: 5.0
"""
self.variableFonts.append(variableFontDescriptor)
def addVariableFontDescriptor(self, **kwargs):
"""Instantiate a new :class:`VariableFontDescriptor` using the given
``kwargs`` and add it to :attr:`variableFonts`.
.. versionadded:: 5.0
"""
variableFont = self.writerClass.variableFontDescriptorClass(**kwargs)
self.addVariableFont(variableFont)
return variableFont
def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor):
"""Add the given ``locationLabelDescriptor`` to :attr:`locationLabels`.
.. versionadded:: 5.0
"""
self.locationLabels.append(locationLabelDescriptor)
def addLocationLabelDescriptor(self, **kwargs):
"""Instantiate a new :class:`LocationLabelDescriptor` using the given
``kwargs`` and add it to :attr:`locationLabels`.
.. versionadded:: 5.0
"""
locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs)
self.addLocationLabel(locationLabel)
return locationLabel
def newDefaultLocation(self):
"""Return a dict with the default location in design space coordinates."""
# Without OrderedDict, output XML would be non-deterministic.
# https://github.com/LettError/designSpaceDocument/issues/10
loc = collections.OrderedDict()
for axisDescriptor in self.axes:
loc[axisDescriptor.name] = axisDescriptor.map_forward(
axisDescriptor.default
)
return loc
def labelForUserLocation(
self, userLocation: SimpleLocationDict
) -> Optional[LocationLabelDescriptor]:
"""Return the :class:`LocationLabel` that matches the given
``userLocation``, or ``None`` if no such label exists.
.. versionadded:: 5.0
"""
return next(
(
label
for label in self.locationLabels
if label.userLocation == userLocation
),
None,
)
def updateFilenameFromPath(self, masters=True, instances=True, force=False):
"""Set a descriptor filename attr from the path and this document path.
If the filename attribute is not None: skip it.
"""
if masters:
for descriptor in self.sources:
if descriptor.filename is not None and not force:
continue
if self.path is not None:
descriptor.filename = self._posixRelativePath(descriptor.path)
if instances:
for descriptor in self.instances:
if descriptor.filename is not None and not force:
continue
if self.path is not None:
descriptor.filename = self._posixRelativePath(descriptor.path)
def newAxisDescriptor(self):
"""Ask the writer class to make us a new axisDescriptor."""
return self.writerClass.getAxisDecriptor()
def newSourceDescriptor(self):
"""Ask the writer class to make us a new sourceDescriptor."""
return self.writerClass.getSourceDescriptor()
def newInstanceDescriptor(self):
"""Ask the writer class to make us a new instanceDescriptor."""
return self.writerClass.getInstanceDescriptor()
def getAxisOrder(self):
"""Return a list of axis names, in the same order as defined in the document."""
names = []
for axisDescriptor in self.axes:
names.append(axisDescriptor.name)
return names
def getAxis(self, name: str) -> AxisDescriptor | DiscreteAxisDescriptor | None:
"""Return the axis with the given ``name``, or ``None`` if no such axis exists."""
return next((axis for axis in self.axes if axis.name == name), None)
def getAxisByTag(self, tag: str) -> AxisDescriptor | DiscreteAxisDescriptor | None:
"""Return the axis with the given ``tag``, or ``None`` if no such axis exists."""
return next((axis for axis in self.axes if axis.tag == tag), None)
def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]:
"""Return the top-level location label with the given ``name``, or
``None`` if no such label exists.
.. versionadded:: 5.0
"""
for label in self.locationLabels:
if label.name == name:
return label
return None
def map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict:
"""Map a user location to a design location.
Assume that missing coordinates are at the default location for that axis.
Note: the output won't be anisotropic, only the xvalue is set.
.. versionadded:: 5.0
"""
return {
axis.name: axis.map_forward(userLocation.get(axis.name, axis.default))
for axis in self.axes
}
def map_backward(
self, designLocation: AnisotropicLocationDict
) -> SimpleLocationDict:
"""Map a design location to a user location.
Assume that missing coordinates are at the default location for that axis.
When the input has anisotropic locations, only the xvalue is used.
.. versionadded:: 5.0
"""
return {
axis.name: (
axis.map_backward(designLocation[axis.name])
if axis.name in designLocation
else axis.default
)
for axis in self.axes
}
def findDefault(self):
"""Set and return SourceDescriptor at the default location or None.
The default location is the set of all `default` values in user space
of all axes.
This function updates the document's :attr:`default` value.
.. versionchanged:: 5.0
Allow the default source to not specify some of the axis values, and
they are assumed to be the default.
See :meth:`SourceDescriptor.getFullDesignLocation()`
"""
self.default = None
# Convert the default location from user space to design space before comparing
# it against the SourceDescriptor locations (always in design space).
defaultDesignLocation = self.newDefaultLocation()
for sourceDescriptor in self.sources:
if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation:
self.default = sourceDescriptor
return sourceDescriptor
return None
def normalizeLocation(self, location):
"""Return a dict with normalized axis values."""
from fontTools.varLib.models import normalizeValue
new = {}
for axis in self.axes:
if axis.name not in location:
# skipping this dimension it seems
continue
value = location[axis.name]
# 'anisotropic' location, take first coord only
if isinstance(value, tuple):
value = value[0]
triple = [
axis.map_forward(v) for v in (axis.minimum, axis.default, axis.maximum)
]
new[axis.name] = normalizeValue(value, triple)
return new
def normalize(self):
"""
Normalise the geometry of this designspace:
- scale all the locations of all masters and instances to the -1 - 0 - 1 value.
- we need the axis data to do the scaling, so we do those last.
"""
# masters
for item in self.sources:
item.location = self.normalizeLocation(item.location)
# instances
for item in self.instances:
# glyph masters for this instance
for _, glyphData in item.glyphs.items():
glyphData["instanceLocation"] = self.normalizeLocation(
glyphData["instanceLocation"]
)
for glyphMaster in glyphData["masters"]:
glyphMaster["location"] = self.normalizeLocation(
glyphMaster["location"]
)
item.location = self.normalizeLocation(item.location)
# the axes
for axis in self.axes:
# scale the map first
newMap = []
for inputValue, outputValue in axis.map:
newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(
axis.name
)
newMap.append((inputValue, newOutputValue))
if newMap:
axis.map = newMap
# finally the axis values
minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name)
maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name)
default = self.normalizeLocation({axis.name: axis.default}).get(axis.name)
# and set them in the axis.minimum
axis.minimum = minimum
axis.maximum = maximum
axis.default = default
# now the rules
for rule in self.rules:
newConditionSets = []
for conditions in rule.conditionSets:
newConditions = []
for cond in conditions:
if cond.get("minimum") is not None:
minimum = self.normalizeLocation(
{cond["name"]: cond["minimum"]}
).get(cond["name"])
else:
minimum = None
if cond.get("maximum") is not None:
maximum = self.normalizeLocation(
{cond["name"]: cond["maximum"]}
).get(cond["name"])
else:
maximum = None
newConditions.append(
dict(name=cond["name"], minimum=minimum, maximum=maximum)
)
newConditionSets.append(newConditions)
rule.conditionSets = newConditionSets
def loadSourceFonts(self, opener, **kwargs):
"""Ensure SourceDescriptor.font attributes are loaded, and return list of fonts.
Takes a callable which initializes a new font object (e.g. TTFont, or
defcon.Font, etc.) from the SourceDescriptor.path, and sets the
SourceDescriptor.font attribute.
If the font attribute is already not None, it is not loaded again.
Fonts with the same path are only loaded once and shared among SourceDescriptors.
For example, to load UFO sources using defcon:
designspace = DesignSpaceDocument.fromfile("path/to/my.designspace")
designspace.loadSourceFonts(defcon.Font)
Or to load masters as FontTools binary fonts, including extra options:
designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False)
Args:
opener (Callable): takes one required positional argument, the source.path,
and an optional list of keyword arguments, and returns a new font object
loaded from the path.
**kwargs: extra options passed on to the opener function.
Returns:
List of font objects in the order they appear in the sources list.
"""
# we load fonts with the same source.path only once
loaded = {}
fonts = []
for source in self.sources:
if source.font is not None: # font already loaded
fonts.append(source.font)
continue
if source.path in loaded:
source.font = loaded[source.path]
else:
if source.path is None:
raise DesignSpaceDocumentError(
"Designspace source '%s' has no 'path' attribute"
% (source.name or "<Unknown>")
)
source.font = opener(source.path, **kwargs)
loaded[source.path] = source.font
fonts.append(source.font)
return fonts
@property
def formatTuple(self):
"""Return the formatVersion as a tuple of (major, minor).
.. versionadded:: 5.0
"""
if self.formatVersion is None:
return (5, 0)
numbers = (int(i) for i in self.formatVersion.split("."))
major = next(numbers)
minor = next(numbers, 0)
return (major, minor)
def getVariableFonts(self) -> List[VariableFontDescriptor]:
"""Return all variable fonts defined in this document, or implicit
variable fonts that can be built from the document's continuous axes.
In the case of Designspace documents before version 5, the whole
document was implicitly describing a variable font that covers the
whole space.
In version 5 and above documents, there can be as many variable fonts
as there are locations on discrete axes.
.. seealso:: :func:`splitInterpolable`
.. versionadded:: 5.0
"""
if self.variableFonts:
return self.variableFonts
variableFonts = []
discreteAxes = []
rangeAxisSubsets: List[
Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]
] = []
for axis in self.axes:
if hasattr(axis, "values"):
# Mypy doesn't support narrowing union types via hasattr()
# TODO(Python 3.10): use TypeGuard
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
axis = cast(DiscreteAxisDescriptor, axis)
discreteAxes.append(axis) # type: ignore
else:
rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name))
valueCombinations = itertools.product(*[axis.values for axis in discreteAxes])
for values in valueCombinations:
basename = None
if self.filename is not None:
basename = os.path.splitext(self.filename)[0] + "-VF"
if self.path is not None:
basename = os.path.splitext(os.path.basename(self.path))[0] + "-VF"
if basename is None:
basename = "VF"
axisNames = "".join(
[f"-{axis.tag}{value}" for axis, value in zip(discreteAxes, values)]
)
variableFonts.append(
VariableFontDescriptor(
name=f"{basename}{axisNames}",
axisSubsets=rangeAxisSubsets
+ [
ValueAxisSubsetDescriptor(name=axis.name, userValue=value)
for axis, value in zip(discreteAxes, values)
],
)
)
return variableFonts
def deepcopyExceptFonts(self):
"""Allow deep-copying a DesignSpace document without deep-copying
attached UFO fonts or TTFont objects. The :attr:`font` attribute
is shared by reference between the original and the copy.
.. versionadded:: 5.0
"""
fonts = [source.font for source in self.sources]
try:
for source in self.sources:
source.font = None
res = copy.deepcopy(self)
for source, font in zip(res.sources, fonts):
source.font = font
return res
finally:
for source, font in zip(self.sources, fonts):
source.font = font
def main(args=None):
"""Roundtrip .designspace file through the DesignSpaceDocument class"""
if args is None:
import sys
args = sys.argv[1:]
from argparse import ArgumentParser
parser = ArgumentParser(prog="designspaceLib", description=main.__doc__)
parser.add_argument("input")
parser.add_argument("output")
options = parser.parse_args(args)
ds = DesignSpaceDocument.fromfile(options.input)
ds.write(options.output)
PK aZZZ�Iݑg g $ fontTools/designspaceLib/__main__.pyimport sys
from fontTools.designspaceLib import main
if __name__ == "__main__":
sys.exit(main())
PK aZZZP;u�'K 'K ! fontTools/designspaceLib/split.py"""Allows building all the variable fonts of a DesignSpace version 5 by
splitting the document into interpolable sub-space, then into each VF.
"""
from __future__ import annotations
import itertools
import logging
import math
from typing import Any, Callable, Dict, Iterator, List, Tuple, cast
from fontTools.designspaceLib import (
AxisDescriptor,
AxisMappingDescriptor,
DesignSpaceDocument,
DiscreteAxisDescriptor,
InstanceDescriptor,
RuleDescriptor,
SimpleLocationDict,
SourceDescriptor,
VariableFontDescriptor,
)
from fontTools.designspaceLib.statNames import StatNames, getStatNames
from fontTools.designspaceLib.types import (
ConditionSet,
Range,
Region,
getVFUserRegion,
locationInRegion,
regionInRegion,
userRegionToDesignRegion,
)
LOGGER = logging.getLogger(__name__)
MakeInstanceFilenameCallable = Callable[
[DesignSpaceDocument, InstanceDescriptor, StatNames], str
]
def defaultMakeInstanceFilename(
doc: DesignSpaceDocument, instance: InstanceDescriptor, statNames: StatNames
) -> str:
"""Default callable to synthesize an instance filename
when makeNames=True, for instances that don't specify an instance name
in the designspace. This part of the name generation can be overriden
because it's not specified by the STAT table.
"""
familyName = instance.familyName or statNames.familyNames.get("en")
styleName = instance.styleName or statNames.styleNames.get("en")
return f"{familyName}-{styleName}.ttf"
def splitInterpolable(
doc: DesignSpaceDocument,
makeNames: bool = True,
expandLocations: bool = True,
makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename,
) -> Iterator[Tuple[SimpleLocationDict, DesignSpaceDocument]]:
"""Split the given DS5 into several interpolable sub-designspaces.
There are as many interpolable sub-spaces as there are combinations of
discrete axis values.
E.g. with axes:
- italic (discrete) Upright or Italic
- style (discrete) Sans or Serif
- weight (continuous) 100 to 900
There are 4 sub-spaces in which the Weight axis should interpolate:
(Upright, Sans), (Upright, Serif), (Italic, Sans) and (Italic, Serif).
The sub-designspaces still include the full axis definitions and STAT data,
but the rules, sources, variable fonts, instances are trimmed down to only
keep what falls within the interpolable sub-space.
Args:
- ``makeNames``: Whether to compute the instance family and style
names using the STAT data.
- ``expandLocations``: Whether to turn all locations into "full"
locations, including implicit default axis values where missing.
- ``makeInstanceFilename``: Callable to synthesize an instance filename
when makeNames=True, for instances that don't specify an instance name
in the designspace. This part of the name generation can be overridden
because it's not specified by the STAT table.
.. versionadded:: 5.0
"""
discreteAxes = []
interpolableUserRegion: Region = {}
for axis in doc.axes:
if hasattr(axis, "values"):
# Mypy doesn't support narrowing union types via hasattr()
# TODO(Python 3.10): use TypeGuard
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
axis = cast(DiscreteAxisDescriptor, axis)
discreteAxes.append(axis)
else:
axis = cast(AxisDescriptor, axis)
interpolableUserRegion[axis.name] = Range(
axis.minimum,
axis.maximum,
axis.default,
)
valueCombinations = itertools.product(*[axis.values for axis in discreteAxes])
for values in valueCombinations:
discreteUserLocation = {
discreteAxis.name: value
for discreteAxis, value in zip(discreteAxes, values)
}
subDoc = _extractSubSpace(
doc,
{**interpolableUserRegion, **discreteUserLocation},
keepVFs=True,
makeNames=makeNames,
expandLocations=expandLocations,
makeInstanceFilename=makeInstanceFilename,
)
yield discreteUserLocation, subDoc
def splitVariableFonts(
doc: DesignSpaceDocument,
makeNames: bool = False,
expandLocations: bool = False,
makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename,
) -> Iterator[Tuple[str, DesignSpaceDocument]]:
"""Convert each variable font listed in this document into a standalone
designspace. This can be used to compile all the variable fonts from a
format 5 designspace using tools that can only deal with 1 VF at a time.
Args:
- ``makeNames``: Whether to compute the instance family and style
names using the STAT data.
- ``expandLocations``: Whether to turn all locations into "full"
locations, including implicit default axis values where missing.
- ``makeInstanceFilename``: Callable to synthesize an instance filename
when makeNames=True, for instances that don't specify an instance name
in the designspace. This part of the name generation can be overridden
because it's not specified by the STAT table.
.. versionadded:: 5.0
"""
# Make one DesignspaceDoc v5 for each variable font
for vf in doc.getVariableFonts():
vfUserRegion = getVFUserRegion(doc, vf)
vfDoc = _extractSubSpace(
doc,
vfUserRegion,
keepVFs=False,
makeNames=makeNames,
expandLocations=expandLocations,
makeInstanceFilename=makeInstanceFilename,
)
vfDoc.lib = {**vfDoc.lib, **vf.lib}
yield vf.name, vfDoc
def convert5to4(
doc: DesignSpaceDocument,
) -> Dict[str, DesignSpaceDocument]:
"""Convert each variable font listed in this document into a standalone
format 4 designspace. This can be used to compile all the variable fonts
from a format 5 designspace using tools that only know about format 4.
.. versionadded:: 5.0
"""
vfs = {}
for _location, subDoc in splitInterpolable(doc):
for vfName, vfDoc in splitVariableFonts(subDoc):
vfDoc.formatVersion = "4.1"
vfs[vfName] = vfDoc
return vfs
def _extractSubSpace(
doc: DesignSpaceDocument,
userRegion: Region,
*,
keepVFs: bool,
makeNames: bool,
expandLocations: bool,
makeInstanceFilename: MakeInstanceFilenameCallable,
) -> DesignSpaceDocument:
subDoc = DesignSpaceDocument()
# Don't include STAT info
# FIXME: (Jany) let's think about it. Not include = OK because the point of
# the splitting is to build VFs and we'll use the STAT data of the full
# document to generate the STAT of the VFs, so "no need" to have STAT data
# in sub-docs. Counterpoint: what if someone wants to split this DS for
# other purposes? Maybe for that it would be useful to also subset the STAT
# data?
# subDoc.elidedFallbackName = doc.elidedFallbackName
def maybeExpandDesignLocation(object):
if expandLocations:
return object.getFullDesignLocation(doc)
else:
return object.designLocation
for axis in doc.axes:
range = userRegion[axis.name]
if isinstance(range, Range) and hasattr(axis, "minimum"):
# Mypy doesn't support narrowing union types via hasattr()
# TODO(Python 3.10): use TypeGuard
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
axis = cast(AxisDescriptor, axis)
subDoc.addAxis(
AxisDescriptor(
# Same info
tag=axis.tag,
name=axis.name,
labelNames=axis.labelNames,
hidden=axis.hidden,
# Subset range
minimum=max(range.minimum, axis.minimum),
default=range.default or axis.default,
maximum=min(range.maximum, axis.maximum),
map=[
(user, design)
for user, design in axis.map
if range.minimum <= user <= range.maximum
],
# Don't include STAT info
axisOrdering=None,
axisLabels=None,
)
)
subDoc.axisMappings = mappings = []
subDocAxes = {axis.name for axis in subDoc.axes}
for mapping in doc.axisMappings:
if not all(axis in subDocAxes for axis in mapping.inputLocation.keys()):
continue
if not all(axis in subDocAxes for axis in mapping.outputLocation.keys()):
LOGGER.error(
"In axis mapping from input %s, some output axes are not in the variable-font: %s",
mapping.inputLocation,
mapping.outputLocation,
)
continue
mappingAxes = set()
mappingAxes.update(mapping.inputLocation.keys())
mappingAxes.update(mapping.outputLocation.keys())
for axis in doc.axes:
if axis.name not in mappingAxes:
continue
range = userRegion[axis.name]
if (
range.minimum != axis.minimum
or (range.default is not None and range.default != axis.default)
or range.maximum != axis.maximum
):
LOGGER.error(
"Limiting axis ranges used in <mapping> elements not supported: %s",
axis.name,
)
continue
mappings.append(
AxisMappingDescriptor(
inputLocation=mapping.inputLocation,
outputLocation=mapping.outputLocation,
)
)
# Don't include STAT info
# subDoc.locationLabels = doc.locationLabels
# Rules: subset them based on conditions
designRegion = userRegionToDesignRegion(doc, userRegion)
subDoc.rules = _subsetRulesBasedOnConditions(doc.rules, designRegion)
subDoc.rulesProcessingLast = doc.rulesProcessingLast
# Sources: keep only the ones that fall within the kept axis ranges
for source in doc.sources:
if not locationInRegion(doc.map_backward(source.designLocation), userRegion):
continue
subDoc.addSource(
SourceDescriptor(
filename=source.filename,
path=source.path,
font=source.font,
name=source.name,
designLocation=_filterLocation(
userRegion, maybeExpandDesignLocation(source)
),
layerName=source.layerName,
familyName=source.familyName,
styleName=source.styleName,
muteKerning=source.muteKerning,
muteInfo=source.muteInfo,
mutedGlyphNames=source.mutedGlyphNames,
)
)
# Copy family name translations from the old default source to the new default
vfDefault = subDoc.findDefault()
oldDefault = doc.findDefault()
if vfDefault is not None and oldDefault is not None:
vfDefault.localisedFamilyName = oldDefault.localisedFamilyName
# Variable fonts: keep only the ones that fall within the kept axis ranges
if keepVFs:
# Note: call getVariableFont() to make the implicit VFs explicit
for vf in doc.getVariableFonts():
vfUserRegion = getVFUserRegion(doc, vf)
if regionInRegion(vfUserRegion, userRegion):
subDoc.addVariableFont(
VariableFontDescriptor(
name=vf.name,
filename=vf.filename,
axisSubsets=[
axisSubset
for axisSubset in vf.axisSubsets
if isinstance(userRegion[axisSubset.name], Range)
],
lib=vf.lib,
)
)
# Instances: same as Sources + compute missing names
for instance in doc.instances:
if not locationInRegion(instance.getFullUserLocation(doc), userRegion):
continue
if makeNames:
statNames = getStatNames(doc, instance.getFullUserLocation(doc))
familyName = instance.familyName or statNames.familyNames.get("en")
styleName = instance.styleName or statNames.styleNames.get("en")
subDoc.addInstance(
InstanceDescriptor(
filename=instance.filename
or makeInstanceFilename(doc, instance, statNames),
path=instance.path,
font=instance.font,
name=instance.name or f"{familyName} {styleName}",
userLocation={} if expandLocations else instance.userLocation,
designLocation=_filterLocation(
userRegion, maybeExpandDesignLocation(instance)
),
familyName=familyName,
styleName=styleName,
postScriptFontName=instance.postScriptFontName
or statNames.postScriptFontName,
styleMapFamilyName=instance.styleMapFamilyName
or statNames.styleMapFamilyNames.get("en"),
styleMapStyleName=instance.styleMapStyleName
or statNames.styleMapStyleName,
localisedFamilyName=instance.localisedFamilyName
or statNames.familyNames,
localisedStyleName=instance.localisedStyleName
or statNames.styleNames,
localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName
or statNames.styleMapFamilyNames,
localisedStyleMapStyleName=instance.localisedStyleMapStyleName
or {},
lib=instance.lib,
)
)
else:
subDoc.addInstance(
InstanceDescriptor(
filename=instance.filename,
path=instance.path,
font=instance.font,
name=instance.name,
userLocation={} if expandLocations else instance.userLocation,
designLocation=_filterLocation(
userRegion, maybeExpandDesignLocation(instance)
),
familyName=instance.familyName,
styleName=instance.styleName,
postScriptFontName=instance.postScriptFontName,
styleMapFamilyName=instance.styleMapFamilyName,
styleMapStyleName=instance.styleMapStyleName,
localisedFamilyName=instance.localisedFamilyName,
localisedStyleName=instance.localisedStyleName,
localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName,
localisedStyleMapStyleName=instance.localisedStyleMapStyleName,
lib=instance.lib,
)
)
subDoc.lib = doc.lib
return subDoc
def _conditionSetFrom(conditionSet: List[Dict[str, Any]]) -> ConditionSet:
c: Dict[str, Range] = {}
for condition in conditionSet:
minimum, maximum = condition.get("minimum"), condition.get("maximum")
c[condition["name"]] = Range(
minimum if minimum is not None else -math.inf,
maximum if maximum is not None else math.inf,
)
return c
def _subsetRulesBasedOnConditions(
rules: List[RuleDescriptor], designRegion: Region
) -> List[RuleDescriptor]:
# What rules to keep:
# - Keep the rule if any conditionset is relevant.
# - A conditionset is relevant if all conditions are relevant or it is empty.
# - A condition is relevant if
# - axis is point (C-AP),
# - and point in condition's range (C-AP-in)
# (in this case remove the condition because it's always true)
# - else (C-AP-out) whole conditionset can be discarded (condition false
# => conditionset false)
# - axis is range (C-AR),
# - (C-AR-all) and axis range fully contained in condition range: we can
# scrap the condition because it's always true
# - (C-AR-inter) and intersection(axis range, condition range) not empty:
# keep the condition with the smaller range (= intersection)
# - (C-AR-none) else, whole conditionset can be discarded
newRules: List[RuleDescriptor] = []
for rule in rules:
newRule: RuleDescriptor = RuleDescriptor(
name=rule.name, conditionSets=[], subs=rule.subs
)
for conditionset in rule.conditionSets:
cs = _conditionSetFrom(conditionset)
newConditionset: List[Dict[str, Any]] = []
discardConditionset = False
for selectionName, selectionValue in designRegion.items():
# TODO: Ensure that all(key in conditionset for key in region.keys())?
if selectionName not in cs:
# raise Exception("Selection has different axes than the rules")
continue
if isinstance(selectionValue, (float, int)): # is point
# Case C-AP-in
if selectionValue in cs[selectionName]:
pass # always matches, conditionset can stay empty for this one.
# Case C-AP-out
else:
discardConditionset = True
else: # is range
# Case C-AR-all
if selectionValue in cs[selectionName]:
pass # always matches, conditionset can stay empty for this one.
else:
intersection = cs[selectionName].intersection(selectionValue)
# Case C-AR-inter
if intersection is not None:
newConditionset.append(
{
"name": selectionName,
"minimum": intersection.minimum,
"maximum": intersection.maximum,
}
)
# Case C-AR-none
else:
discardConditionset = True
if not discardConditionset:
newRule.conditionSets.append(newConditionset)
if newRule.conditionSets:
newRules.append(newRule)
return newRules
def _filterLocation(
userRegion: Region,
location: Dict[str, float],
) -> Dict[str, float]:
return {
name: value
for name, value in location.items()
if name in userRegion and isinstance(userRegion[name], Range)
}
PK aZZZ�
�xm# m# % fontTools/designspaceLib/statNames.py"""Compute name information for a given location in user-space coordinates
using STAT data. This can be used to fill-in automatically the names of an
instance:
.. code:: python
instance = doc.instances[0]
names = getStatNames(doc, instance.getFullUserLocation(doc))
print(names.styleNames)
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import logging
from fontTools.designspaceLib import (
AxisDescriptor,
AxisLabelDescriptor,
DesignSpaceDocument,
DesignSpaceDocumentError,
DiscreteAxisDescriptor,
SimpleLocationDict,
SourceDescriptor,
)
LOGGER = logging.getLogger(__name__)
# TODO(Python 3.8): use Literal
# RibbiStyleName = Union[Literal["regular"], Literal["bold"], Literal["italic"], Literal["bold italic"]]
RibbiStyle = str
BOLD_ITALIC_TO_RIBBI_STYLE = {
(False, False): "regular",
(False, True): "italic",
(True, False): "bold",
(True, True): "bold italic",
}
@dataclass
class StatNames:
"""Name data generated from the STAT table information."""
familyNames: Dict[str, str]
styleNames: Dict[str, str]
postScriptFontName: Optional[str]
styleMapFamilyNames: Dict[str, str]
styleMapStyleName: Optional[RibbiStyle]
def getStatNames(
doc: DesignSpaceDocument, userLocation: SimpleLocationDict
) -> StatNames:
"""Compute the family, style, PostScript names of the given ``userLocation``
using the document's STAT information.
Also computes localizations.
If not enough STAT data is available for a given name, either its dict of
localized names will be empty (family and style names), or the name will be
None (PostScript name).
.. versionadded:: 5.0
"""
familyNames: Dict[str, str] = {}
defaultSource: Optional[SourceDescriptor] = doc.findDefault()
if defaultSource is None:
LOGGER.warning("Cannot determine default source to look up family name.")
elif defaultSource.familyName is None:
LOGGER.warning(
"Cannot look up family name, assign the 'familyname' attribute to the default source."
)
else:
familyNames = {
"en": defaultSource.familyName,
**defaultSource.localisedFamilyName,
}
styleNames: Dict[str, str] = {}
# If a free-standing label matches the location, use it for name generation.
label = doc.labelForUserLocation(userLocation)
if label is not None:
styleNames = {"en": label.name, **label.labelNames}
# Otherwise, scour the axis labels for matches.
else:
# Gather all languages in which at least one translation is provided
# Then build names for all these languages, but fallback to English
# whenever a translation is missing.
labels = _getAxisLabelsForUserLocation(doc.axes, userLocation)
if labels:
languages = set(
language for label in labels for language in label.labelNames
)
languages.add("en")
for language in languages:
styleName = " ".join(
label.labelNames.get(language, label.defaultName)
for label in labels
if not label.elidable
)
if not styleName and doc.elidedFallbackName is not None:
styleName = doc.elidedFallbackName
styleNames[language] = styleName
if "en" not in familyNames or "en" not in styleNames:
# Not enough information to compute PS names of styleMap names
return StatNames(
familyNames=familyNames,
styleNames=styleNames,
postScriptFontName=None,
styleMapFamilyNames={},
styleMapStyleName=None,
)
postScriptFontName = f"{familyNames['en']}-{styleNames['en']}".replace(" ", "")
styleMapStyleName, regularUserLocation = _getRibbiStyle(doc, userLocation)
styleNamesForStyleMap = styleNames
if regularUserLocation != userLocation:
regularStatNames = getStatNames(doc, regularUserLocation)
styleNamesForStyleMap = regularStatNames.styleNames
styleMapFamilyNames = {}
for language in set(familyNames).union(styleNames.keys()):
familyName = familyNames.get(language, familyNames["en"])
styleName = styleNamesForStyleMap.get(language, styleNamesForStyleMap["en"])
styleMapFamilyNames[language] = (familyName + " " + styleName).strip()
return StatNames(
familyNames=familyNames,
styleNames=styleNames,
postScriptFontName=postScriptFontName,
styleMapFamilyNames=styleMapFamilyNames,
styleMapStyleName=styleMapStyleName,
)
def _getSortedAxisLabels(
axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]],
) -> Dict[str, list[AxisLabelDescriptor]]:
"""Returns axis labels sorted by their ordering, with unordered ones appended as
they are listed."""
# First, get the axis labels with explicit ordering...
sortedAxes = sorted(
(axis for axis in axes if axis.axisOrdering is not None),
key=lambda a: a.axisOrdering,
)
sortedLabels: Dict[str, list[AxisLabelDescriptor]] = {
axis.name: axis.axisLabels for axis in sortedAxes
}
# ... then append the others in the order they appear.
# NOTE: This relies on Python 3.7+ dict's preserved insertion order.
for axis in axes:
if axis.axisOrdering is None:
sortedLabels[axis.name] = axis.axisLabels
return sortedLabels
def _getAxisLabelsForUserLocation(
axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]],
userLocation: SimpleLocationDict,
) -> list[AxisLabelDescriptor]:
labels: list[AxisLabelDescriptor] = []
allAxisLabels = _getSortedAxisLabels(axes)
if allAxisLabels.keys() != userLocation.keys():
LOGGER.warning(
f"Mismatch between user location '{userLocation.keys()}' and available "
f"labels for '{allAxisLabels.keys()}'."
)
for axisName, axisLabels in allAxisLabels.items():
userValue = userLocation[axisName]
label: Optional[AxisLabelDescriptor] = next(
(
l
for l in axisLabels
if l.userValue == userValue
or (
l.userMinimum is not None
and l.userMaximum is not None
and l.userMinimum <= userValue <= l.userMaximum
)
),
None,
)
if label is None:
LOGGER.debug(
f"Document needs a label for axis '{axisName}', user value '{userValue}'."
)
else:
labels.append(label)
return labels
def _getRibbiStyle(
self: DesignSpaceDocument, userLocation: SimpleLocationDict
) -> Tuple[RibbiStyle, SimpleLocationDict]:
"""Compute the RIBBI style name of the given user location,
return the location of the matching Regular in the RIBBI group.
.. versionadded:: 5.0
"""
regularUserLocation = {}
axes_by_tag = {axis.tag: axis for axis in self.axes}
bold: bool = False
italic: bool = False
axis = axes_by_tag.get("wght")
if axis is not None:
for regular_label in axis.axisLabels:
if (
regular_label.linkedUserValue == userLocation[axis.name]
# In the "recursive" case where both the Regular has
# linkedUserValue pointing the Bold, and the Bold has
# linkedUserValue pointing to the Regular, only consider the
# first case: Regular (e.g. 400) has linkedUserValue pointing to
# Bold (e.g. 700, higher than Regular)
and regular_label.userValue < regular_label.linkedUserValue
):
regularUserLocation[axis.name] = regular_label.userValue
bold = True
break
axis = axes_by_tag.get("ital") or axes_by_tag.get("slnt")
if axis is not None:
for upright_label in axis.axisLabels:
if (
upright_label.linkedUserValue == userLocation[axis.name]
# In the "recursive" case where both the Upright has
# linkedUserValue pointing the Italic, and the Italic has
# linkedUserValue pointing to the Upright, only consider the
# first case: Upright (e.g. ital=0, slant=0) has
# linkedUserValue pointing to Italic (e.g ital=1, slant=-12 or
# slant=12 for backwards italics, in any case higher than
# Upright in absolute value, hence the abs() below.
and abs(upright_label.userValue) < abs(upright_label.linkedUserValue)
):
regularUserLocation[axis.name] = upright_label.userValue
italic = True
break
return BOLD_ITALIC_TO_RIBBI_STYLE[bold, italic], {
**userLocation,
**regularUserLocation,
}
PK aZZZĮ��� � ! fontTools/designspaceLib/types.pyfrom __future__ import annotations
from dataclasses import dataclass
from typing import Dict, List, Optional, Union, cast
from fontTools.designspaceLib import (
AxisDescriptor,
DesignSpaceDocument,
DesignSpaceDocumentError,
RangeAxisSubsetDescriptor,
SimpleLocationDict,
ValueAxisSubsetDescriptor,
VariableFontDescriptor,
)
def clamp(value, minimum, maximum):
return min(max(value, minimum), maximum)
@dataclass
class Range:
minimum: float
"""Inclusive minimum of the range."""
maximum: float
"""Inclusive maximum of the range."""
default: float = 0
"""Default value"""
def __post_init__(self):
self.minimum, self.maximum = sorted((self.minimum, self.maximum))
self.default = clamp(self.default, self.minimum, self.maximum)
def __contains__(self, value: Union[float, Range]) -> bool:
if isinstance(value, Range):
return self.minimum <= value.minimum and value.maximum <= self.maximum
return self.minimum <= value <= self.maximum
def intersection(self, other: Range) -> Optional[Range]:
if self.maximum < other.minimum or self.minimum > other.maximum:
return None
else:
return Range(
max(self.minimum, other.minimum),
min(self.maximum, other.maximum),
self.default, # We don't care about the default in this use-case
)
# A region selection is either a range or a single value, as a Designspace v5
# axis-subset element only allows a single discrete value or a range for a
# variable-font element.
Region = Dict[str, Union[Range, float]]
# A conditionset is a set of named ranges.
ConditionSet = Dict[str, Range]
# A rule is a list of conditionsets where any has to be relevant for the whole rule to be relevant.
Rule = List[ConditionSet]
Rules = Dict[str, Rule]
def locationInRegion(location: SimpleLocationDict, region: Region) -> bool:
for name, value in location.items():
if name not in region:
return False
regionValue = region[name]
if isinstance(regionValue, (float, int)):
if value != regionValue:
return False
else:
if value not in regionValue:
return False
return True
def regionInRegion(region: Region, superRegion: Region) -> bool:
for name, value in region.items():
if not name in superRegion:
return False
superValue = superRegion[name]
if isinstance(superValue, (float, int)):
if value != superValue:
return False
else:
if value not in superValue:
return False
return True
def userRegionToDesignRegion(doc: DesignSpaceDocument, userRegion: Region) -> Region:
designRegion = {}
for name, value in userRegion.items():
axis = doc.getAxis(name)
if axis is None:
raise DesignSpaceDocumentError(
f"Cannot find axis named '{name}' for region."
)
if isinstance(value, (float, int)):
designRegion[name] = axis.map_forward(value)
else:
designRegion[name] = Range(
axis.map_forward(value.minimum),
axis.map_forward(value.maximum),
axis.map_forward(value.default),
)
return designRegion
def getVFUserRegion(doc: DesignSpaceDocument, vf: VariableFontDescriptor) -> Region:
vfUserRegion: Region = {}
# For each axis, 2 cases:
# - it has a range = it's an axis in the VF DS
# - it's a single location = use it to know which rules should apply in the VF
for axisSubset in vf.axisSubsets:
axis = doc.getAxis(axisSubset.name)
if axis is None:
raise DesignSpaceDocumentError(
f"Cannot find axis named '{axisSubset.name}' for variable font '{vf.name}'."
)
if hasattr(axisSubset, "userMinimum"):
# Mypy doesn't support narrowing union types via hasattr()
# TODO(Python 3.10): use TypeGuard
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
axisSubset = cast(RangeAxisSubsetDescriptor, axisSubset)
if not hasattr(axis, "minimum"):
raise DesignSpaceDocumentError(
f"Cannot select a range over '{axis.name}' for variable font '{vf.name}' "
"because it's a discrete axis, use only 'userValue' instead."
)
axis = cast(AxisDescriptor, axis)
vfUserRegion[axis.name] = Range(
max(axisSubset.userMinimum, axis.minimum),
min(axisSubset.userMaximum, axis.maximum),
axisSubset.userDefault or axis.default,
)
else:
axisSubset = cast(ValueAxisSubsetDescriptor, axisSubset)
vfUserRegion[axis.name] = axisSubset.userValue
# Any axis not mentioned explicitly has a single location = default value
for axis in doc.axes:
if axis.name not in vfUserRegion:
assert isinstance(
axis.default, (int, float)
), f"Axis '{axis.name}' has no valid default value."
vfUserRegion[axis.name] = axis.default
return vfUserRegion
PK aZZZ�C��
�
fontTools/encodings/MacRoman.pyMacRoman = [
"NUL",
"Eth",
"eth",
"Lslash",
"lslash",
"Scaron",
"scaron",
"Yacute",
"yacute",
"HT",
"LF",
"Thorn",
"thorn",
"CR",
"Zcaron",
"zcaron",
"DLE",
"DC1",
"DC2",
"DC3",
"DC4",
"onehalf",
"onequarter",
"onesuperior",
"threequarters",
"threesuperior",
"twosuperior",
"brokenbar",
"minus",
"multiply",
"RS",
"US",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quotesingle",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"grave",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"DEL",
"Adieresis",
"Aring",
"Ccedilla",
"Eacute",
"Ntilde",
"Odieresis",
"Udieresis",
"aacute",
"agrave",
"acircumflex",
"adieresis",
"atilde",
"aring",
"ccedilla",
"eacute",
"egrave",
"ecircumflex",
"edieresis",
"iacute",
"igrave",
"icircumflex",
"idieresis",
"ntilde",
"oacute",
"ograve",
"ocircumflex",
"odieresis",
"otilde",
"uacute",
"ugrave",
"ucircumflex",
"udieresis",
"dagger",
"degree",
"cent",
"sterling",
"section",
"bullet",
"paragraph",
"germandbls",
"registered",
"copyright",
"trademark",
"acute",
"dieresis",
"notequal",
"AE",
"Oslash",
"infinity",
"plusminus",
"lessequal",
"greaterequal",
"yen",
"mu",
"partialdiff",
"summation",
"product",
"pi",
"integral",
"ordfeminine",
"ordmasculine",
"Omega",
"ae",
"oslash",
"questiondown",
"exclamdown",
"logicalnot",
"radical",
"florin",
"approxequal",
"Delta",
"guillemotleft",
"guillemotright",
"ellipsis",
"nbspace",
"Agrave",
"Atilde",
"Otilde",
"OE",
"oe",
"endash",
"emdash",
"quotedblleft",
"quotedblright",
"quoteleft",
"quoteright",
"divide",
"lozenge",
"ydieresis",
"Ydieresis",
"fraction",
"currency",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"daggerdbl",
"periodcentered",
"quotesinglbase",
"quotedblbase",
"perthousand",
"Acircumflex",
"Ecircumflex",
"Aacute",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Oacute",
"Ocircumflex",
"apple",
"Ograve",
"Uacute",
"Ucircumflex",
"Ugrave",
"dotlessi",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
]
PK aZZZD�)��
�
'