makeunicodedata.py 38 KB
Newer Older
1
#
2 3
# (re)generate unicode property and type databases
#
4
# this script converts a unicode 3.2 database file to
5 6
# Modules/unicodedata_db.h, Modules/unicodename_db.h,
# and Objects/unicodetype_db.h
7 8 9 10
#
# history:
# 2000-09-24 fl   created (based on bits and pieces from unidb)
# 2000-09-25 fl   merged tim's splitbin fixes, separate decomposition table
11
# 2000-09-25 fl   added character type table
12
# 2000-09-26 fl   added LINEBREAK, DECIMAL, and DIGIT flags/fields (2.0)
13
# 2000-11-03 fl   expand first/last ranges
14
# 2001-01-19 fl   added character name tables (2.1)
15
# 2001-01-21 fl   added decomp compression; dynamic phrasebook threshold
16 17 18
# 2002-09-11 wd   use string methods
# 2002-10-18 mvl  update to Unicode 3.2
# 2002-10-22 mvl  generate NFC tables
19
# 2002-11-24 mvl  expand all ranges, sort names version-independently
20
# 2002-11-25 mvl  add UNIDATA_VERSION
21
# 2004-05-29 perky add east asian width information
Martin v. Löwis's avatar
Martin v. Löwis committed
22
# 2006-03-10 mvl  update to Unicode 4.1; add UCD 3.2 delta
23
#
24
# written by Fredrik Lundh (fredrik@pythonware.com)
25 26 27 28 29
#

import sys

SCRIPT = sys.argv[0]
30
VERSION = "2.6"
31

32
# The Unicode Database
33
UNIDATA_VERSION = "5.2.0"
34 35 36
UNICODE_DATA = "UnicodeData%s.txt"
COMPOSITION_EXCLUSIONS = "CompositionExclusions%s.txt"
EASTASIAN_WIDTH = "EastAsianWidth%s.txt"
37
UNIHAN = "Unihan%s.txt"
38
DERIVEDNORMALIZATION_PROPS = "DerivedNormalizationProps%s.txt"
39
LINE_BREAK = "LineBreak%s.txt"
40 41

old_versions = ["3.2.0"]
42 43 44 45 46 47 48 49 50 51

CATEGORY_NAMES = [ "Cn", "Lu", "Ll", "Lt", "Mn", "Mc", "Me", "Nd",
    "Nl", "No", "Zs", "Zl", "Zp", "Cc", "Cf", "Cs", "Co", "Cn", "Lm",
    "Lo", "Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po", "Sm", "Sc", "Sk",
    "So" ]

BIDIRECTIONAL_NAMES = [ "", "L", "LRE", "LRO", "R", "AL", "RLE", "RLO",
    "PDF", "EN", "ES", "ET", "AN", "CS", "NSM", "BN", "B", "S", "WS",
    "ON" ]

52 53
EASTASIANWIDTH_NAMES = [ "F", "H", "W", "Na", "A", "N" ]

54 55
MANDATORY_LINE_BREAKS = [ "BK", "CR", "LF", "NL" ]

56
# note: should match definitions in Objects/unicodectype.c
57 58 59 60
ALPHA_MASK = 0x01
DECIMAL_MASK = 0x02
DIGIT_MASK = 0x04
LOWER_MASK = 0x08
61
LINEBREAK_MASK = 0x10
62 63 64
SPACE_MASK = 0x20
TITLE_MASK = 0x40
UPPER_MASK = 0x80
65
NODELTA_MASK = 0x100
66
NUMERIC_MASK = 0x200
67

68
def maketables(trace=0):
69

70
    print "--- Reading", UNICODE_DATA % "", "..."
71

72 73 74
    version = ""
    unicode = UnicodeData(UNICODE_DATA % version,
                          COMPOSITION_EXCLUSIONS % version,
75
                          EASTASIAN_WIDTH % version,
76
                          UNIHAN % version,
77 78
                          DERIVEDNORMALIZATION_PROPS % version,
                          LINE_BREAK % version)
79

80 81
    print len(filter(None, unicode.table)), "characters"

82 83 84 85
    for version in old_versions:
        print "--- Reading", UNICODE_DATA % ("-"+version), "..."
        old_unicode = UnicodeData(UNICODE_DATA % ("-"+version),
                                  COMPOSITION_EXCLUSIONS % ("-"+version),
86 87
                                  EASTASIAN_WIDTH % ("-"+version),
                                  UNIHAN % ("-"+version))
88 89 90
        print len(filter(None, old_unicode.table)), "characters"
        merge_old_version(version, unicode, old_unicode)

91
    makeunicodename(unicode, trace)
92
    makeunicodedata(unicode, trace)
93
    makeunicodetype(unicode, trace)
94 95 96 97 98 99

# --------------------------------------------------------------------
# unicode character properties

def makeunicodedata(unicode, trace):

100
    dummy = (0, 0, 0, 0, 0, 0)
101 102 103 104
    table = [dummy]
    cache = {0: dummy}
    index = [0] * len(unicode.chars)

105 106 107 108
    FILE = "Modules/unicodedata_db.h"

    print "--- Preparing", FILE, "..."

109
    # 1) database properties
110

111 112 113 114 115 116 117 118
    for char in unicode.chars:
        record = unicode.table[char]
        if record:
            # extract database properties
            category = CATEGORY_NAMES.index(record[2])
            combining = int(record[3])
            bidirectional = BIDIRECTIONAL_NAMES.index(record[4])
            mirrored = record[9] == "Y"
119
            eastasianwidth = EASTASIANWIDTH_NAMES.index(record[15])
120
            normalizationquickcheck = record[17]
121
            item = (
122 123
                category, combining, bidirectional, mirrored, eastasianwidth,
                normalizationquickcheck
124 125 126 127 128 129 130 131
                )
            # add entry to index and item tables
            i = cache.get(item)
            if i is None:
                cache[item] = i = len(table)
                table.append(item)
            index[char] = i

132 133
    # 2) decomposition data

134 135
    decomp_data = [0]
    decomp_prefix = [""]
136
    decomp_index = [0] * len(unicode.chars)
137
    decomp_size = 0
138

139 140 141 142
    comp_pairs = []
    comp_first = [None] * len(unicode.chars)
    comp_last = [None] * len(unicode.chars)

143 144 145 146
    for char in unicode.chars:
        record = unicode.table[char]
        if record:
            if record[5]:
147
                decomp = record[5].split()
148 149
                if len(decomp) > 19:
                    raise Exception, "character %x has a decomposition too large for nfd_nfkd" % char
150 151 152 153 154 155 156 157 158 159 160 161 162
                # prefix
                if decomp[0][0] == "<":
                    prefix = decomp.pop(0)
                else:
                    prefix = ""
                try:
                    i = decomp_prefix.index(prefix)
                except ValueError:
                    i = len(decomp_prefix)
                    decomp_prefix.append(prefix)
                prefix = i
                assert prefix < 256
                # content
163
                decomp = [prefix + (len(decomp)<<8)] + [int(s, 16) for s in decomp]
164 165 166 167 168 169 170 171
                # Collect NFC pairs
                if not prefix and len(decomp) == 3 and \
                   char not in unicode.exclusions and \
                   unicode.table[decomp[1]][3] == "0":
                    p, l, r = decomp
                    comp_first[l] = 1
                    comp_last[r] = 1
                    comp_pairs.append((l,r,char))
172
                try:
173
                    i = decomp_data.index(decomp)
174 175
                except ValueError:
                    i = len(decomp_data)
176 177
                    decomp_data.extend(decomp)
                    decomp_size = decomp_size + len(decomp) * 2
178 179 180
            else:
                i = 0
            decomp_index[char] = i
181

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
    f = l = 0
    comp_first_ranges = []
    comp_last_ranges = []
    prev_f = prev_l = None
    for i in unicode.chars:
        if comp_first[i] is not None:
            comp_first[i] = f
            f += 1
            if prev_f is None:
                prev_f = (i,i)
            elif prev_f[1]+1 == i:
                prev_f = prev_f[0],i
            else:
                comp_first_ranges.append(prev_f)
                prev_f = (i,i)
        if comp_last[i] is not None:
            comp_last[i] = l
            l += 1
            if prev_l is None:
                prev_l = (i,i)
            elif prev_l[1]+1 == i:
                prev_l = prev_l[0],i
            else:
                comp_last_ranges.append(prev_l)
                prev_l = (i,i)
    comp_first_ranges.append(prev_f)
    comp_last_ranges.append(prev_l)
    total_first = f
    total_last = l

    comp_data = [0]*(total_first*total_last)
    for f,l,char in comp_pairs:
        f = comp_first[f]
        l = comp_last[l]
        comp_data[f*total_last+l] = char

218
    print len(table), "unique properties"
219 220 221
    print len(decomp_prefix), "unique decomposition prefixes"
    print len(decomp_data), "unique decomposition entries:",
    print decomp_size, "bytes"
222 223 224
    print total_first, "first characters in NFC"
    print total_last, "last characters in NFC"
    print len(comp_pairs), "NFC pairs"
225

226 227
    print "--- Writing", FILE, "..."

228 229 230
    fp = open(FILE, "w")
    print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION)
    print >>fp
231
    print >>fp, '#define UNIDATA_VERSION "%s"' % UNIDATA_VERSION
232 233 234
    print >>fp, "/* a list of unique database records */"
    print >>fp, \
          "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {"
235
    for item in table:
236
        print >>fp, "    {%d, %d, %d, %d, %d, %d}," % item
237 238
    print >>fp, "};"
    print >>fp
239

240 241 242 243
    print >>fp, "/* Reindexing of NFC first characters. */"
    print >>fp, "#define TOTAL_FIRST",total_first
    print >>fp, "#define TOTAL_LAST",total_last
    print >>fp, "struct reindex{int start;short count,index;};"
244
    print >>fp, "static struct reindex nfc_first[] = {"
245 246 247 248
    for start,end in comp_first_ranges:
        print >>fp,"  { %d, %d, %d}," % (start,end-start,comp_first[start])
    print >>fp,"  {0,0,0}"
    print >>fp,"};\n"
249
    print >>fp, "static struct reindex nfc_last[] = {"
250 251 252 253 254
    for start,end in comp_last_ranges:
        print >>fp,"  { %d, %d, %d}," % (start,end-start,comp_last[start])
    print >>fp,"  {0,0,0}"
    print >>fp,"};\n"

255
    # FIXME: <fl> the following tables could be made static, and
256 257
    # the support code moved into unicodedatabase.c

258 259
    print >>fp, "/* string literals */"
    print >>fp, "const char *_PyUnicode_CategoryNames[] = {"
260
    for name in CATEGORY_NAMES:
261 262 263
        print >>fp, "    \"%s\"," % name
    print >>fp, "    NULL"
    print >>fp, "};"
264

265
    print >>fp, "const char *_PyUnicode_BidirectionalNames[] = {"
266
    for name in BIDIRECTIONAL_NAMES:
267 268 269
        print >>fp, "    \"%s\"," % name
    print >>fp, "    NULL"
    print >>fp, "};"
270

271 272 273 274 275 276
    print >>fp, "const char *_PyUnicode_EastAsianWidthNames[] = {"
    for name in EASTASIANWIDTH_NAMES:
        print >>fp, "    \"%s\"," % name
    print >>fp, "    NULL"
    print >>fp, "};"

277 278
    print >>fp, "static const char *decomp_prefix[] = {"
    for name in decomp_prefix:
279 280 281
        print >>fp, "    \"%s\"," % name
    print >>fp, "    NULL"
    print >>fp, "};"
282

283
    # split record index table
284
    index1, index2, shift = splitbins(index, trace)
285

286 287
    print >>fp, "/* index tables for the database records */"
    print >>fp, "#define SHIFT", shift
288 289
    Array("index1", index1).dump(fp, trace)
    Array("index2", index2).dump(fp, trace)
290

291
    # split decomposition index table
292
    index1, index2, shift = splitbins(decomp_index, trace)
293

294 295 296
    print >>fp, "/* decomposition data */"
    Array("decomp_data", decomp_data).dump(fp, trace)

297 298
    print >>fp, "/* index tables for the decomposition data */"
    print >>fp, "#define DECOMP_SHIFT", shift
299 300
    Array("decomp_index1", index1).dump(fp, trace)
    Array("decomp_index2", index2).dump(fp, trace)
301

302 303 304 305 306 307
    index, index2, shift = splitbins(comp_data, trace)
    print >>fp, "/* NFC pairs */"
    print >>fp, "#define COMP_SHIFT", shift
    Array("comp_index", index).dump(fp, trace)
    Array("comp_data", index2).dump(fp, trace)

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
    # Generate delta tables for old versions
    for version, table, normalization in unicode.changed:
        cversion = version.replace(".","_")
        records = [table[0]]
        cache = {table[0]:0}
        index = [0] * len(table)
        for i, record in enumerate(table):
            try:
                index[i] = cache[record]
            except KeyError:
                index[i] = cache[record] = len(records)
                records.append(record)
        index1, index2, shift = splitbins(index, trace)
        print >>fp, "static const change_record change_records_%s[] = {" % cversion
        for record in records:
            print >>fp, "\t{ %s }," % ", ".join(map(str,record))
        print >>fp, "};"
        Array("changes_%s_index" % cversion, index1).dump(fp, trace)
        Array("changes_%s_data" % cversion, index2).dump(fp, trace)
        print >>fp, "static const change_record* get_change_%s(Py_UCS4 n)" % cversion
        print >>fp, "{"
        print >>fp, "\tint index;"
        print >>fp, "\tif (n >= 0x110000) index = 0;"
        print >>fp, "\telse {"
        print >>fp, "\t\tindex = changes_%s_index[n>>%d];" % (cversion, shift)
        print >>fp, "\t\tindex = changes_%s_data[(index<<%d)+(n & %d)];" % \
              (cversion, shift, ((1<<shift)-1))
        print >>fp, "\t}"
        print >>fp, "\treturn change_records_%s+index;" % cversion
        print >>fp, "}\n"
        print >>fp, "static Py_UCS4 normalization_%s(Py_UCS4 n)" % cversion
        print >>fp, "{"
        print >>fp, "\tswitch(n) {"
        for k, v in normalization:
            print >>fp, "\tcase %s: return 0x%s;" % (hex(k), v)
        print >>fp, "\tdefault: return 0;"
        print >>fp, "\t}\n}\n"

346 347 348 349 350 351 352 353 354 355
    fp.close()

# --------------------------------------------------------------------
# unicode character type tables

def makeunicodetype(unicode, trace):

    FILE = "Objects/unicodetype_db.h"

    print "--- Preparing", FILE, "..."
356 357

    # extract unicode types
358
    dummy = (0, 0, 0, 0, 0, 0)
359 360 361
    table = [dummy]
    cache = {0: dummy}
    index = [0] * len(unicode.chars)
362 363 364
    numeric = {}
    spaces = []
    linebreaks = []
365 366 367 368 369 370 371

    for char in unicode.chars:
        record = unicode.table[char]
        if record:
            # extract database properties
            category = record[2]
            bidirectional = record[4]
372
            properties = record[16]
373
            flags = 0
374
            delta = True
375 376 377 378
            if category in ["Lm", "Lt", "Lu", "Ll", "Lo"]:
                flags |= ALPHA_MASK
            if category == "Ll":
                flags |= LOWER_MASK
379
            if 'Line_Break' in properties or bidirectional == "B":
380
                flags |= LINEBREAK_MASK
381
                linebreaks.append(char)
382 383
            if category == "Zs" or bidirectional in ("WS", "B", "S"):
                flags |= SPACE_MASK
384
                spaces.append(char)
385
            if category == "Lt":
386 387 388
                flags |= TITLE_MASK
            if category == "Lu":
                flags |= UPPER_MASK
389
            # use delta predictor for upper/lower/title if it fits
390
            if record[12]:
391
                upper = int(record[12], 16)
392
            else:
393
                upper = char
394
            if record[13]:
395
                lower = int(record[13], 16)
396
            else:
397
                lower = char
398
            if record[14]:
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
                title = int(record[14], 16)
            else:
                # UCD.html says that a missing title char means that
                # it defaults to the uppercase character, not to the
                # character itself. Apparently, in the current UCD (5.x)
                # this feature is never used
                title = upper
            upper_d = upper - char
            lower_d = lower - char
            title_d = title - char
            if -32768 <= upper_d <= 32767 and \
               -32768 <= lower_d <= 32767 and \
               -32768 <= title_d <= 32767:
                # use deltas
                upper = upper_d & 0xffff
                lower = lower_d & 0xffff
                title = title_d & 0xffff
416
            else:
417
                flags |= NODELTA_MASK
418 419 420 421 422 423 424 425 426
            # decimal digit, integer digit
            decimal = 0
            if record[6]:
                flags |= DECIMAL_MASK
                decimal = int(record[6])
            digit = 0
            if record[7]:
                flags |= DIGIT_MASK
                digit = int(record[7])
427 428 429
            if record[8]:
                flags |= NUMERIC_MASK
                numeric.setdefault(record[8], []).append(char)
430
            item = (
431
                upper, lower, title, decimal, digit, flags
432 433 434 435 436 437 438 439
                )
            # add entry to index and item tables
            i = cache.get(item)
            if i is None:
                cache[item] = i = len(table)
                table.append(item)
            index[char] = i

440
    print len(table), "unique character type entries"
441 442 443
    print sum(map(len, numeric.values())), "numeric code points"
    print len(spaces), "whitespace code points"
    print len(linebreaks), "linebreak code points"
444

445 446
    print "--- Writing", FILE, "..."

447
    fp = open(FILE, "w")
448 449 450 451
    print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION)
    print >>fp
    print >>fp, "/* a list of unique character type descriptors */"
    print >>fp, "const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {"
452
    for item in table:
453 454 455
        print >>fp, "    {%d, %d, %d, %d, %d, %d}," % item
    print >>fp, "};"
    print >>fp
456 457

    # split decomposition index table
458
    index1, index2, shift = splitbins(index, trace)
459

460 461
    print >>fp, "/* type indexes */"
    print >>fp, "#define SHIFT", shift
462 463
    Array("index1", index1).dump(fp, trace)
    Array("index2", index2).dump(fp, trace)
464

465
    # Generate code for _PyUnicode_ToNumeric()
466
    numeric_items = sorted(numeric.items())
467 468 469 470 471 472 473
    print >>fp, '/* Returns the numeric value as double for Unicode characters'
    print >>fp, ' * having this property, -1.0 otherwise.'
    print >>fp, ' */'
    print >>fp, 'double _PyUnicode_ToNumeric(Py_UNICODE ch)'
    print >>fp, '{'
    print >>fp, '    switch (ch) {'
    for value, codepoints in numeric_items:
474 475 476 477 478
        # Turn text into float literals
        parts = value.split('/')
        parts = [repr(float(part)) for part in parts]
        value = '/'.join(parts)

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
        haswide = False
        hasnonewide = False
        codepoints.sort()
        for codepoint in codepoints:
            if codepoint < 0x10000:
                hasnonewide = True
            if codepoint >= 0x10000 and not haswide:
                print >>fp, '#ifdef Py_UNICODE_WIDE'
                haswide = True
            print >>fp, '    case 0x%04X:' % (codepoint,)
        if haswide and hasnonewide:
            print >>fp, '#endif'
        print >>fp, '        return (double) %s;' % (value,)
        if haswide and not hasnonewide:
            print >>fp, '#endif'
    print >>fp,'    }'
    print >>fp,'    return -1.0;'
    print >>fp,'}'
    print >>fp

    # Generate code for _PyUnicode_IsWhitespace()
    print >>fp, "/* Returns 1 for Unicode characters having the bidirectional"
    print >>fp, " * type 'WS', 'B' or 'S' or the category 'Zs', 0 otherwise."
    print >>fp, " */"
    print >>fp, 'int _PyUnicode_IsWhitespace(register const Py_UNICODE ch)'
    print >>fp, '{'
    print >>fp, '#ifdef WANT_WCTYPE_FUNCTIONS'
    print >>fp, '    return iswspace(ch);'
    print >>fp, '#else'
    print >>fp, '    switch (ch) {'

    haswide = False
    hasnonewide = False
512
    for codepoint in sorted(spaces):
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
        if codepoint < 0x10000:
            hasnonewide = True
        if codepoint >= 0x10000 and not haswide:
            print >>fp, '#ifdef Py_UNICODE_WIDE'
            haswide = True
        print >>fp, '    case 0x%04X:' % (codepoint,)
    if haswide and hasnonewide:
        print >>fp, '#endif'
    print >>fp, '        return 1;'
    if haswide and not hasnonewide:
        print >>fp, '#endif'

    print >>fp,'    }'
    print >>fp,'    return 0;'
    print >>fp, '#endif'
    print >>fp,'}'
    print >>fp

    # Generate code for _PyUnicode_IsLinebreak()
532 533 534
    print >>fp, "/* Returns 1 for Unicode characters having the line break"
    print >>fp, " * property 'BK', 'CR', 'LF' or 'NL' or having bidirectional"
    print >>fp, " * type 'B', 0 otherwise."
535 536 537 538 539 540
    print >>fp, " */"
    print >>fp, 'int _PyUnicode_IsLinebreak(register const Py_UNICODE ch)'
    print >>fp, '{'
    print >>fp, '    switch (ch) {'
    haswide = False
    hasnonewide = False
541
    for codepoint in sorted(linebreaks):
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
        if codepoint < 0x10000:
            hasnonewide = True
        if codepoint >= 0x10000 and not haswide:
            print >>fp, '#ifdef Py_UNICODE_WIDE'
            haswide = True
        print >>fp, '    case 0x%04X:' % (codepoint,)
    if haswide and hasnonewide:
        print >>fp, '#endif'
    print >>fp, '        return 1;'
    if haswide and not hasnonewide:
        print >>fp, '#endif'

    print >>fp,'    }'
    print >>fp,'    return 0;'
    print >>fp,'}'
    print >>fp

559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
    fp.close()

# --------------------------------------------------------------------
# unicode name database

def makeunicodename(unicode, trace):

    FILE = "Modules/unicodename_db.h"

    print "--- Preparing", FILE, "..."

    # collect names
    names = [None] * len(unicode.chars)

    for char in unicode.chars:
        record = unicode.table[char]
        if record:
            name = record[1].strip()
            if name and name[0] != "<":
                names[char] = name + chr(0)

    print len(filter(lambda n: n is not None, names)), "distinct names"

    # collect unique words from names (note that we differ between
    # words inside a sentence, and words ending a sentence.  the
    # latter includes the trailing null byte.

    words = {}
    n = b = 0
    for char in unicode.chars:
        name = names[char]
        if name:
            w = name.split()
            b = b + len(name)
            n = n + len(w)
            for w in w:
                l = words.get(w)
                if l:
                    l.append(None)
                else:
                    words[w] = [len(words)]

    print n, "words in text;", b, "bytes"

    wordlist = words.items()

605
    # sort on falling frequency, then by name
606 607 608 609
    def word_key(a):
        aword, alist = a
        return -len(alist), aword
    wordlist.sort(key=word_key)
610

611 612 613 614 615 616 617 618 619 620 621 622
    # figure out how many phrasebook escapes we need
    escapes = 0
    while escapes * 256 < len(wordlist):
        escapes = escapes + 1
    print escapes, "escapes"

    short = 256 - escapes

    assert short > 0

    print short, "short indexes in lexicon"

623 624
    # statistics
    n = 0
625
    for i in range(short):
626
        n = n + len(wordlist[i][1])
627
    print n, "short indexes in phrasebook"
628

629 630
    # pick the most commonly used words, and sort the rest on falling
    # length (to maximize overlap)
631

632
    wordlist, wordtail = wordlist[:short], wordlist[short:]
633
    wordtail.sort(key=lambda a: a[0], reverse=True)
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
    wordlist.extend(wordtail)

    # generate lexicon from words

    lexicon_offset = [0]
    lexicon = ""
    words = {}

    # build a lexicon string
    offset = 0
    for w, x in wordlist:
        # encoding: bit 7 indicates last character in word (chr(128)
        # indicates the last character in an entire string)
        ww = w[:-1] + chr(ord(w[-1])+128)
        # reuse string tails, when possible
649
        o = lexicon.find(ww)
650 651 652 653 654
        if o < 0:
            o = offset
            lexicon = lexicon + ww
            offset = offset + len(w)
        words[w] = len(lexicon_offset)
655
        lexicon_offset.append(o)
656 657 658 659 660 661 662 663 664 665 666 667 668

    lexicon = map(ord, lexicon)

    # generate phrasebook from names and lexicon
    phrasebook = [0]
    phrasebook_offset = [0] * len(unicode.chars)
    for char in unicode.chars:
        name = names[char]
        if name:
            w = name.split()
            phrasebook_offset[char] = len(phrasebook)
            for w in w:
                i = words[w]
669 670
                if i < short:
                    phrasebook.append(i)
671
                else:
672 673
                    # store as two bytes
                    phrasebook.append((i>>8) + short)
674 675
                    phrasebook.append(i&255)

676 677
    assert getsize(phrasebook) == 1

678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
    #
    # unicode name hash table

    # extract names
    data = []
    for char in unicode.chars:
        record = unicode.table[char]
        if record:
            name = record[1].strip()
            if name and name[0] != "<":
                data.append((name, char))

    # the magic number 47 was chosen to minimize the number of
    # collisions on the current data set.  if you like, change it
    # and see what happens...

    codehash = Hash("code", data, 47)

    print "--- Writing", FILE, "..."

    fp = open(FILE, "w")
    print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION)
    print >>fp
    print >>fp, "#define NAME_MAXLEN", 256
    print >>fp
    print >>fp, "/* lexicon */"
704 705
    Array("lexicon", lexicon).dump(fp, trace)
    Array("lexicon_offset", lexicon_offset).dump(fp, trace)
706 707 708 709 710 711

    # split decomposition index table
    offset1, offset2, shift = splitbins(phrasebook_offset, trace)

    print >>fp, "/* code->name phrasebook */"
    print >>fp, "#define phrasebook_shift", shift
712
    print >>fp, "#define phrasebook_short", short
713

714 715 716
    Array("phrasebook", phrasebook).dump(fp, trace)
    Array("phrasebook_offset1", offset1).dump(fp, trace)
    Array("phrasebook_offset2", offset2).dump(fp, trace)
717 718

    print >>fp, "/* name->code dictionary */"
719
    codehash.dump(fp, trace)
720 721 722

    fp.close()

723 724 725 726 727 728 729 730 731 732

def merge_old_version(version, new, old):
    # Changes to exclusion file not implemented yet
    if old.exclusions != new.exclusions:
        raise NotImplementedError, "exclusions differ"

    # In these change records, 0xFF means "no change"
    bidir_changes = [0xFF]*0x110000
    category_changes = [0xFF]*0x110000
    decimal_changes = [0xFF]*0x110000
733
    mirrored_changes = [0xFF]*0x110000
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
    # In numeric data, 0 means "no change",
    # -1 means "did not have a numeric value
    numeric_changes = [0] * 0x110000
    # normalization_changes is a list of key-value pairs
    normalization_changes = []
    for i in range(0x110000):
        if new.table[i] is None:
            # Characters unassigned in the new version ought to
            # be unassigned in the old one
            assert old.table[i] is None
            continue
        # check characters unassigned in the old version
        if old.table[i] is None:
            # category 0 is "unassigned"
            category_changes[i] = 0
            continue
        # check characters that differ
        if old.table[i] != new.table[i]:
            for k in range(len(old.table[i])):
                if old.table[i][k] != new.table[i][k]:
                    value = old.table[i][k]
                    if k == 2:
                        #print "CATEGORY",hex(i), old.table[i][k], new.table[i][k]
                        category_changes[i] = CATEGORY_NAMES.index(value)
                    elif k == 4:
                        #print "BIDIR",hex(i), old.table[i][k], new.table[i][k]
                        bidir_changes[i] = BIDIRECTIONAL_NAMES.index(value)
                    elif k == 5:
                        #print "DECOMP",hex(i), old.table[i][k], new.table[i][k]
                        # We assume that all normalization changes are in 1:1 mappings
                        assert " " not in value
                        normalization_changes.append((i, value))
                    elif k == 6:
                        #print "DECIMAL",hex(i), old.table[i][k], new.table[i][k]
                        # we only support changes where the old value is a single digit
                        assert value in "0123456789"
                        decimal_changes[i] = int(value)
                    elif k == 8:
                        # print "NUMERIC",hex(i), `old.table[i][k]`, new.table[i][k]
                        # Since 0 encodes "no change", the old value is better not 0
                        if not value:
                            numeric_changes[i] = -1
                        else:
777 778
                            numeric_changes[i] = float(value)
                            assert numeric_changes[i] not in (0, -1)
779 780 781 782 783
                    elif k == 9:
                        if value == 'Y':
                            mirrored_changes[i] = '1'
                        else:
                            mirrored_changes[i] = '0'
784 785 786 787 788 789 790 791 792 793 794 795
                    elif k == 11:
                        # change to ISO comment, ignore
                        pass
                    elif k == 12:
                        # change to simple uppercase mapping; ignore
                        pass
                    elif k == 13:
                        # change to simple lowercase mapping; ignore
                        pass
                    elif k == 14:
                        # change to simple titlecase mapping; ignore
                        pass
796 797 798
                    elif k == 16:
                        # change to properties; not yet
                        pass
799 800 801 802
                    else:
                        class Difference(Exception):pass
                        raise Difference, (hex(i), k, old.table[i], new.table[i])
    new.changed.append((version, zip(bidir_changes, category_changes,
803 804
                                     decimal_changes, mirrored_changes,
                                     numeric_changes),
805
                        normalization_changes))
Tim Peters's avatar
Tim Peters committed
806

807

808 809 810 811 812 813 814
# --------------------------------------------------------------------
# the following support code is taken from the unidb utilities
# Copyright (c) 1999-2000 by Secret Labs AB

# load a unicode-data file from disk

class UnicodeData:
815 816 817 818 819
    # Record structure:
    # [ID, name, category, combining, bidi, decomp,  (6)
    #  decimal, digit, numeric, bidi-mirrored, Unicode-1-name, (11)
    #  ISO-comment, uppercase, lowercase, titlecase, ea-width, (16)
    #  properties] (17)
820

821
    def __init__(self, filename, exclusions, eastasianwidth, unihan,
822 823
                 derivednormalizationprops=None, linebreakprops=None,
                 expand=1):
824
        self.changed = []
825
        file = open(filename)
826
        table = [None] * 0x110000
827 828 829 830
        while 1:
            s = file.readline()
            if not s:
                break
831 832
            s = s.strip().split(";")
            char = int(s[0], 16)
833 834
            table[char] = s

835
        # expand first-last ranges
836 837
        if expand:
            field = None
838
            for i in range(0, 0x110000):
839 840 841 842
                s = table[i]
                if s:
                    if s[1][-6:] == "First>":
                        s[1] = ""
843
                        field = s
844 845 846 847
                    elif s[1][-5:] == "Last>":
                        s[1] = ""
                        field = None
                elif field:
848 849 850
                    f2 = field[:]
                    f2[0] = "%X" % i
                    table[i] = f2
851

852 853 854
        # public attributes
        self.filename = filename
        self.table = table
855
        self.chars = range(0x110000) # unicode 3.2
856

857 858 859 860 861 862 863 864 865 866 867
        file = open(exclusions)
        self.exclusions = {}
        for s in file:
            s = s.strip()
            if not s:
                continue
            if s[0] == '#':
                continue
            char = int(s.split()[0],16)
            self.exclusions[char] = 1

868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
        widths = [None] * 0x110000
        for s in open(eastasianwidth):
            s = s.strip()
            if not s:
                continue
            if s[0] == '#':
                continue
            s = s.split()[0].split(';')
            if '..' in s[0]:
                first, last = [int(c, 16) for c in s[0].split('..')]
                chars = range(first, last+1)
            else:
                chars = [int(s[0], 16)]
            for char in chars:
                widths[char] = s[1]
        for i in range(0, 0x110000):
            if table[i] is not None:
                table[i].append(widths[i])
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902

        for i in range(0, 0x110000):
            if table[i] is not None:
                table[i].append(set())
        if linebreakprops:
            for s in open(linebreakprops):
                s = s.partition('#')[0]
                s = [i.strip() for i in s.split(';')]
                if len(s) < 2 or s[1] not in MANDATORY_LINE_BREAKS:
                    continue
                if '..' not in s[0]:
                    first = last = int(s[0], 16)
                else:
                    first, last = [int(c, 16) for c in s[0].split('..')]
                for char in range(first, last+1):
                    table[char][-1].add('Line_Break')

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
        if derivednormalizationprops:
            quickchecks = [0] * 0x110000 # default is Yes
            qc_order = 'NFD_QC NFKD_QC NFC_QC NFKC_QC'.split()
            for s in open(derivednormalizationprops):
                if '#' in s:
                    s = s[:s.index('#')]
                s = [i.strip() for i in s.split(';')]
                if len(s) < 2 or s[1] not in qc_order:
                    continue
                quickcheck = 'MN'.index(s[2]) + 1 # Maybe or No
                quickcheck_shift = qc_order.index(s[1])*2
                quickcheck <<= quickcheck_shift
                if '..' not in s[0]:
                    first = last = int(s[0], 16)
                else:
                    first, last = [int(c, 16) for c in s[0].split('..')]
                for char in range(first, last+1):
                    assert not (quickchecks[char]>>quickcheck_shift)&3
                    quickchecks[char] |= quickcheck
            for i in range(0, 0x110000):
                if table[i] is not None:
                    table[i].append(quickchecks[i])
925

926 927 928 929 930 931 932 933 934 935 936 937 938
        for line in open(unihan):
            if not line.startswith('U+'):
                continue
            code, tag, value = line.split(None, 3)[:3]
            if tag not in ('kAccountingNumeric', 'kPrimaryNumeric',
                           'kOtherNumeric'):
                continue
            value = value.strip().replace(',', '')
            i = int(code[2:], 16)
            # Patch the numeric field
            if table[i] is not None:
                table[i][8] = value

939 940 941 942
    def uselatin1(self):
        # restrict character range to ISO Latin 1
        self.chars = range(256)

943 944 945 946 947 948 949 950
# hash table tools

# this is a straight-forward reimplementation of Python's built-in
# dictionary type, using a static data structure, and a custom string
# hash algorithm.

def myhash(s, magic):
    h = 0
951
    for c in map(ord, s.upper()):
952
        h = (h * magic) + c
953
        ix = h & 0xff000000L
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
        if ix:
            h = (h ^ ((ix>>24) & 0xff)) & 0x00ffffff
    return h

SIZES = [
    (4,3), (8,3), (16,3), (32,5), (64,3), (128,3), (256,29), (512,17),
    (1024,9), (2048,5), (4096,83), (8192,27), (16384,43), (32768,3),
    (65536,45), (131072,9), (262144,39), (524288,39), (1048576,9),
    (2097152,5), (4194304,3), (8388608,33), (16777216,27)
]

class Hash:
    def __init__(self, name, data, magic):
        # turn a (key, value) list into a static hash table structure

        # determine table size
        for size, poly in SIZES:
            if size > len(data):
                poly = size + poly
                break
        else:
            raise AssertionError, "ran out of polynominals"

        print size, "slots in hash table"

        table = [None] * size

        mask = size-1

        n = 0

        hash = myhash

        # initialize hash table
        for key, value in data:
            h = hash(key, magic)
            i = (~h) & mask
            v = table[i]
            if v is None:
                table[i] = value
                continue
            incr = (h ^ (h >> 3)) & mask;
            if not incr:
                incr = mask
            while 1:
                n = n + 1
                i = (i + incr) & mask
                v = table[i]
                if v is None:
                    table[i] = value
                    break
                incr = incr << 1
                if incr > mask:
                    incr = incr ^ poly

        print n, "collisions"
        self.collisions = n

        for i in range(len(table)):
            if table[i] is None:
                table[i] = 0

        self.data = Array(name + "_hash", table)
        self.magic = magic
        self.name = name
        self.size = size
        self.poly = poly

1022
    def dump(self, file, trace):
1023
        # write data to file, as a C array
1024
        self.data.dump(file, trace)
1025 1026 1027 1028
        file.write("#define %s_magic %d\n" % (self.name, self.magic))
        file.write("#define %s_size %d\n" % (self.name, self.size))
        file.write("#define %s_poly %d\n" % (self.name, self.poly))

1029 1030 1031 1032 1033 1034 1035 1036
# stuff to deal with arrays of unsigned integers

class Array:

    def __init__(self, name, data):
        self.name = name
        self.data = data

1037
    def dump(self, file, trace=0):
1038 1039
        # write data to file, as a C array
        size = getsize(self.data)
1040 1041
        if trace:
            print >>sys.stderr, self.name+":", size*len(self.data), "bytes"
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
        file.write("static ")
        if size == 1:
            file.write("unsigned char")
        elif size == 2:
            file.write("unsigned short")
        else:
            file.write("unsigned int")
        file.write(" " + self.name + "[] = {\n")
        if self.data:
            s = "    "
            for item in self.data:
                i = str(item) + ", "
                if len(s) + len(i) > 78:
                    file.write(s + "\n")
                    s = "    " + i
                else:
                    s = s + i
1059
            if s.strip():
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
                file.write(s + "\n")
        file.write("};\n\n")

def getsize(data):
    # return smallest possible integer size for the given array
    maxdata = max(data)
    if maxdata < 256:
        return 1
    elif maxdata < 65536:
        return 2
    else:
        return 4

1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
def splitbins(t, trace=0):
    """t, trace=0 -> (t1, t2, shift).  Split a table to save space.

    t is a sequence of ints.  This function can be useful to save space if
    many of the ints are the same.  t1 and t2 are lists of ints, and shift
    is an int, chosen to minimize the combined size of t1 and t2 (in C
    code), and where for each i in range(len(t)),
        t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
    where mask is a bitmask isolating the last "shift" bits.

1083 1084 1085
    If optional arg trace is non-zero (default zero), progress info
    is printed to sys.stderr.  The higher the value, the more info
    you'll get.
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
    """

    if trace:
        def dump(t1, t2, shift, bytes):
            print >>sys.stderr, "%d+%d bins at shift %d; %d bytes" % (
                len(t1), len(t2), shift, bytes)
        print >>sys.stderr, "Size of original table:", len(t)*getsize(t), \
                            "bytes"
    n = len(t)-1    # last valid index
    maxshift = 0    # the most we can shift n and still have something left
    if n > 0:
        while n >> 1:
            n >>= 1
            maxshift += 1
    del n
    bytes = sys.maxint  # smallest total size so far
    t = tuple(t)    # so slices can be dict keys
    for shift in range(maxshift + 1):
        t1 = []
        t2 = []
1106 1107
        size = 2**shift
        bincache = {}
1108 1109 1110
        for i in range(0, len(t), size):
            bin = t[i:i+size]
            index = bincache.get(bin)
1111
            if index is None:
1112 1113 1114 1115
                index = len(t2)
                bincache[bin] = index
                t2.extend(bin)
            t1.append(index >> shift)
1116
        # determine memory size
1117
        b = len(t1)*getsize(t1) + len(t2)*getsize(t2)
1118
        if trace > 1:
1119
            dump(t1, t2, shift, b)
1120
        if b < bytes:
1121
            best = t1, t2, shift
1122
            bytes = b
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
    t1, t2, shift = best
    if trace:
        print >>sys.stderr, "Best:",
        dump(t1, t2, shift, bytes)
    if __debug__:
        # exhaustively verify that the decomposition is correct
        mask = ~((~0) << shift) # i.e., low-bit mask of shift bits
        for i in xrange(len(t)):
            assert t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
    return best
1133 1134

if __name__ == "__main__":
1135
    maketables(1)