1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
|
#
# (re)generate unicode property and type databases
#
# this script converts a unicode 3.2 database file to
# Modules/unicodedata_db.h, Modules/unicodename_db.h,
# and Objects/unicodetype_db.h
#
# history:
# 2000-09-24 fl created (based on bits and pieces from unidb)
# 2000-09-25 fl merged tim's splitbin fixes, separate decomposition table
# 2000-09-25 fl added character type table
# 2000-09-26 fl added LINEBREAK, DECIMAL, and DIGIT flags/fields (2.0)
# 2000-11-03 fl expand first/last ranges
# 2001-01-19 fl added character name tables (2.1)
# 2001-01-21 fl added decomp compression; dynamic phrasebook threshold
# 2002-09-11 wd use string methods
# 2002-10-18 mvl update to Unicode 3.2
# 2002-10-22 mvl generate NFC tables
# 2002-11-24 mvl expand all ranges, sort names version-independently
# 2002-11-25 mvl add UNIDATA_VERSION
# 2004-05-29 perky add east asian width information
# 2006-03-10 mvl update to Unicode 4.1; add UCD 3.2 delta
#
# written by Fredrik Lundh (fredrik@pythonware.com)
#
import sys
SCRIPT = sys.argv[0]
VERSION = "2.5"
# The Unicode Database
UNIDATA_VERSION = "4.1.0"
UNICODE_DATA = "UnicodeData%s.txt"
COMPOSITION_EXCLUSIONS = "CompositionExclusions%s.txt"
EASTASIAN_WIDTH = "EastAsianWidth%s.txt"
DERIVED_CORE_PROPERTIES = "DerivedCoreProperties%s.txt"
old_versions = ["3.2.0"]
CATEGORY_NAMES = [ "Cn", "Lu", "Ll", "Lt", "Mn", "Mc", "Me", "Nd",
"Nl", "No", "Zs", "Zl", "Zp", "Cc", "Cf", "Cs", "Co", "Cn", "Lm",
"Lo", "Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po", "Sm", "Sc", "Sk",
"So" ]
BIDIRECTIONAL_NAMES = [ "", "L", "LRE", "LRO", "R", "AL", "RLE", "RLO",
"PDF", "EN", "ES", "ET", "AN", "CS", "NSM", "BN", "B", "S", "WS",
"ON" ]
EASTASIANWIDTH_NAMES = [ "F", "H", "W", "Na", "A", "N" ]
# note: should match definitions in Objects/unicodectype.c
ALPHA_MASK = 0x01
DECIMAL_MASK = 0x02
DIGIT_MASK = 0x04
LOWER_MASK = 0x08
LINEBREAK_MASK = 0x10
SPACE_MASK = 0x20
TITLE_MASK = 0x40
UPPER_MASK = 0x80
XID_START_MASK = 0x100
XID_CONTINUE_MASK = 0x200
def maketables(trace=0):
print("--- Reading", UNICODE_DATA % "", "...")
version = ""
unicode = UnicodeData(UNICODE_DATA % version,
COMPOSITION_EXCLUSIONS % version,
EASTASIAN_WIDTH % version,
DERIVED_CORE_PROPERTIES % version)
print(len(list(filter(None, unicode.table))), "characters")
for version in old_versions:
print("--- Reading", UNICODE_DATA % ("-"+version), "...")
old_unicode = UnicodeData(UNICODE_DATA % ("-"+version),
COMPOSITION_EXCLUSIONS % ("-"+version),
EASTASIAN_WIDTH % ("-"+version),
DERIVED_CORE_PROPERTIES % ("-"+version))
print(len(list(filter(None, old_unicode.table))), "characters")
merge_old_version(version, unicode, old_unicode)
makeunicodename(unicode, trace)
makeunicodedata(unicode, trace)
makeunicodetype(unicode, trace)
# --------------------------------------------------------------------
# unicode character properties
def makeunicodedata(unicode, trace):
dummy = (0, 0, 0, 0, 0)
table = [dummy]
cache = {0: dummy}
index = [0] * len(unicode.chars)
FILE = "Modules/unicodedata_db.h"
print("--- Preparing", FILE, "...")
# 1) database properties
for char in unicode.chars:
record = unicode.table[char]
if record:
# extract database properties
category = CATEGORY_NAMES.index(record[2])
combining = int(record[3])
bidirectional = BIDIRECTIONAL_NAMES.index(record[4])
mirrored = record[9] == "Y"
eastasianwidth = EASTASIANWIDTH_NAMES.index(record[15])
item = (
category, combining, bidirectional, mirrored, eastasianwidth
)
# add entry to index and item tables
i = cache.get(item)
if i is None:
cache[item] = i = len(table)
table.append(item)
index[char] = i
# 2) decomposition data
decomp_data = [0]
decomp_prefix = [""]
decomp_index = [0] * len(unicode.chars)
decomp_size = 0
comp_pairs = []
comp_first = [None] * len(unicode.chars)
comp_last = [None] * len(unicode.chars)
for char in unicode.chars:
record = unicode.table[char]
if record:
if record[5]:
decomp = record[5].split()
if len(decomp) > 19:
raise Exception("character %x has a decomposition too large for nfd_nfkd" % char)
# prefix
if decomp[0][0] == "<":
prefix = decomp.pop(0)
else:
prefix = ""
try:
i = decomp_prefix.index(prefix)
except ValueError:
i = len(decomp_prefix)
decomp_prefix.append(prefix)
prefix = i
assert prefix < 256
# content
decomp = [prefix + (len(decomp)<<8)] +\
list(map(lambda s: int(s, 16), decomp))
# Collect NFC pairs
if not prefix and len(decomp) == 3 and \
char not in unicode.exclusions and \
unicode.table[decomp[1]][3] == "0":
p, l, r = decomp
comp_first[l] = 1
comp_last[r] = 1
comp_pairs.append((l,r,char))
try:
i = decomp_data.index(decomp)
except ValueError:
i = len(decomp_data)
decomp_data.extend(decomp)
decomp_size = decomp_size + len(decomp) * 2
else:
i = 0
decomp_index[char] = i
f = l = 0
comp_first_ranges = []
comp_last_ranges = []
prev_f = prev_l = None
for i in unicode.chars:
if comp_first[i] is not None:
comp_first[i] = f
f += 1
if prev_f is None:
prev_f = (i,i)
elif prev_f[1]+1 == i:
prev_f = prev_f[0],i
else:
comp_first_ranges.append(prev_f)
prev_f = (i,i)
if comp_last[i] is not None:
comp_last[i] = l
l += 1
if prev_l is None:
prev_l = (i,i)
elif prev_l[1]+1 == i:
prev_l = prev_l[0],i
else:
comp_last_ranges.append(prev_l)
prev_l = (i,i)
comp_first_ranges.append(prev_f)
comp_last_ranges.append(prev_l)
total_first = f
total_last = l
comp_data = [0]*(total_first*total_last)
for f,l,char in comp_pairs:
f = comp_first[f]
l = comp_last[l]
comp_data[f*total_last+l] = char
print(len(table), "unique properties")
print(len(decomp_prefix), "unique decomposition prefixes")
print(len(decomp_data), "unique decomposition entries:", end=' ')
print(decomp_size, "bytes")
print(total_first, "first characters in NFC")
print(total_last, "last characters in NFC")
print(len(comp_pairs), "NFC pairs")
print("--- Writing", FILE, "...")
fp = open(FILE, "w")
print("/* this file was generated by %s %s */" % (SCRIPT, VERSION), file=fp)
print(file=fp)
print('#define UNIDATA_VERSION "%s"' % UNIDATA_VERSION, file=fp)
print("/* a list of unique database records */", file=fp)
print("const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {", file=fp)
for item in table:
print(" {%d, %d, %d, %d, %d}," % item, file=fp)
print("};", file=fp)
print(file=fp)
print("/* Reindexing of NFC first characters. */", file=fp)
print("#define TOTAL_FIRST",total_first, file=fp)
print("#define TOTAL_LAST",total_last, file=fp)
print("struct reindex{int start;short count,index;};", file=fp)
print("struct reindex nfc_first[] = {", file=fp)
for start,end in comp_first_ranges:
print(" { %d, %d, %d}," % (start,end-start,comp_first[start]), file=fp)
print(" {0,0,0}", file=fp)
print("};\n", file=fp)
print("struct reindex nfc_last[] = {", file=fp)
for start,end in comp_last_ranges:
print(" { %d, %d, %d}," % (start,end-start,comp_last[start]), file=fp)
print(" {0,0,0}", file=fp)
print("};\n", file=fp)
# FIXME: <fl> the following tables could be made static, and
# the support code moved into unicodedatabase.c
print("/* string literals */", file=fp)
print("const char *_PyUnicode_CategoryNames[] = {", file=fp)
for name in CATEGORY_NAMES:
print(" \"%s\"," % name, file=fp)
print(" NULL", file=fp)
print("};", file=fp)
print("const char *_PyUnicode_BidirectionalNames[] = {", file=fp)
for name in BIDIRECTIONAL_NAMES:
print(" \"%s\"," % name, file=fp)
print(" NULL", file=fp)
print("};", file=fp)
print("const char *_PyUnicode_EastAsianWidthNames[] = {", file=fp)
for name in EASTASIANWIDTH_NAMES:
print(" \"%s\"," % name, file=fp)
print(" NULL", file=fp)
print("};", file=fp)
print("static const char *decomp_prefix[] = {", file=fp)
for name in decomp_prefix:
print(" \"%s\"," % name, file=fp)
print(" NULL", file=fp)
print("};", file=fp)
# split record index table
index1, index2, shift = splitbins(index, trace)
print("/* index tables for the database records */", file=fp)
print("#define SHIFT", shift, file=fp)
Array("index1", index1).dump(fp, trace)
Array("index2", index2).dump(fp, trace)
# split decomposition index table
index1, index2, shift = splitbins(decomp_index, trace)
print("/* decomposition data */", file=fp)
Array("decomp_data", decomp_data).dump(fp, trace)
print("/* index tables for the decomposition data */", file=fp)
print("#define DECOMP_SHIFT", shift, file=fp)
Array("decomp_index1", index1).dump(fp, trace)
Array("decomp_index2", index2).dump(fp, trace)
index, index2, shift = splitbins(comp_data, trace)
print("/* NFC pairs */", file=fp)
print("#define COMP_SHIFT", shift, file=fp)
Array("comp_index", index).dump(fp, trace)
Array("comp_data", index2).dump(fp, trace)
# Generate delta tables for old versions
for version, table, normalization in unicode.changed:
cversion = version.replace(".","_")
records = [table[0]]
cache = {table[0]:0}
index = [0] * len(table)
for i, record in enumerate(table):
try:
index[i] = cache[record]
except KeyError:
index[i] = cache[record] = len(records)
records.append(record)
index1, index2, shift = splitbins(index, trace)
print("static const change_record change_records_%s[] = {" % cversion, file=fp)
for record in records:
print("\t{ %s }," % ", ".join(map(str,record)), file=fp)
print("};", file=fp)
Array("changes_%s_index" % cversion, index1).dump(fp, trace)
Array("changes_%s_data" % cversion, index2).dump(fp, trace)
print("static const change_record* get_change_%s(Py_UCS4 n)" % cversion, file=fp)
print("{", file=fp)
print("\tint index;", file=fp)
print("\tif (n >= 0x110000) index = 0;", file=fp)
print("\telse {", file=fp)
print("\t\tindex = changes_%s_index[n>>%d];" % (cversion, shift), file=fp)
print("\t\tindex = changes_%s_data[(index<<%d)+(n & %d)];" % \
(cversion, shift, ((1<<shift)-1)), file=fp)
print("\t}", file=fp)
print("\treturn change_records_%s+index;" % cversion, file=fp)
print("}\n", file=fp)
print("static Py_UCS4 normalization_%s(Py_UCS4 n)" % cversion, file=fp)
print("{", file=fp)
print("\tswitch(n) {", file=fp)
for k, v in normalization:
print("\tcase %s: return 0x%s;" % (hex(k), v), file=fp)
print("\tdefault: return 0;", file=fp)
print("\t}\n}\n", file=fp)
fp.close()
# --------------------------------------------------------------------
# unicode character type tables
def makeunicodetype(unicode, trace):
FILE = "Objects/unicodetype_db.h"
print("--- Preparing", FILE, "...")
# extract unicode types
dummy = (0, 0, 0, 0, 0, 0)
table = [dummy]
cache = {0: dummy}
index = [0] * len(unicode.chars)
for char in unicode.chars:
record = unicode.table[char]
if record:
# extract database properties
category = record[2]
bidirectional = record[4]
properties = record[16]
flags = 0
if category in ["Lm", "Lt", "Lu", "Ll", "Lo"]:
flags |= ALPHA_MASK
if category == "Ll":
flags |= LOWER_MASK
if category == "Zl" or bidirectional == "B":
flags |= LINEBREAK_MASK
if category == "Zs" or bidirectional in ("WS", "B", "S"):
flags |= SPACE_MASK
if category == "Lt":
flags |= TITLE_MASK
if category == "Lu":
flags |= UPPER_MASK
if "XID_Start" in properties:
flags |= XID_START_MASK
if "XID_Continue" in properties:
flags |= XID_CONTINUE_MASK
# use delta predictor for upper/lower/title
if record[12]:
upper = int(record[12], 16) - char
assert -32768 <= upper <= 32767
upper = upper & 0xffff
else:
upper = 0
if record[13]:
lower = int(record[13], 16) - char
assert -32768 <= lower <= 32767
lower = lower & 0xffff
else:
lower = 0
if record[14]:
title = int(record[14], 16) - char
assert -32768 <= lower <= 32767
title = title & 0xffff
else:
title = 0
# decimal digit, integer digit
decimal = 0
if record[6]:
flags |= DECIMAL_MASK
decimal = int(record[6])
digit = 0
if record[7]:
flags |= DIGIT_MASK
digit = int(record[7])
item = (
upper, lower, title, decimal, digit, flags
)
# add entry to index and item tables
i = cache.get(item)
if i is None:
cache[item] = i = len(table)
table.append(item)
index[char] = i
print(len(table), "unique character type entries")
print("--- Writing", FILE, "...")
fp = open(FILE, "w")
print("/* this file was generated by %s %s */" % (SCRIPT, VERSION), file=fp)
print(file=fp)
print("/* a list of unique character type descriptors */", file=fp)
print("const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {", file=fp)
for item in table:
print(" {%d, %d, %d, %d, %d, %d}," % item, file=fp)
print("};", file=fp)
print(file=fp)
# split decomposition index table
index1, index2, shift = splitbins(index, trace)
print("/* type indexes */", file=fp)
print("#define SHIFT", shift, file=fp)
Array("index1", index1).dump(fp, trace)
Array("index2", index2).dump(fp, trace)
fp.close()
# --------------------------------------------------------------------
# unicode name database
def makeunicodename(unicode, trace):
FILE = "Modules/unicodename_db.h"
print("--- Preparing", FILE, "...")
# collect names
names = [None] * len(unicode.chars)
for char in unicode.chars:
record = unicode.table[char]
if record:
name = record[1].strip()
if name and name[0] != "<":
names[char] = name + chr(0)
print(len(list(filter(lambda n: n is not None, names))), "distinct names")
# collect unique words from names (note that we differ between
# words inside a sentence, and words ending a sentence. the
# latter includes the trailing null byte.
words = {}
n = b = 0
for char in unicode.chars:
name = names[char]
if name:
w = name.split()
b = b + len(name)
n = n + len(w)
for w in w:
l = words.get(w)
if l:
l.append(None)
else:
words[w] = [len(words)]
print(n, "words in text;", b, "bytes")
wordlist = list(words.items())
# sort on falling frequency, then by name
def cmpwords(a,b):
aword, alist = a
bword, blist = b
r = -cmp(len(alist),len(blist))
if r:
return r
return cmp(aword, bword)
wordlist.sort(cmpwords)
# figure out how many phrasebook escapes we need
escapes = 0
while escapes * 256 < len(wordlist):
escapes = escapes + 1
print(escapes, "escapes")
short = 256 - escapes
assert short > 0
print(short, "short indexes in lexicon")
# statistics
n = 0
for i in range(short):
n = n + len(wordlist[i][1])
print(n, "short indexes in phrasebook")
# pick the most commonly used words, and sort the rest on falling
# length (to maximize overlap)
wordlist, wordtail = wordlist[:short], wordlist[short:]
wordtail.sort(lambda a, b: len(b[0])-len(a[0]))
wordlist.extend(wordtail)
# generate lexicon from words
lexicon_offset = [0]
lexicon = ""
words = {}
# build a lexicon string
offset = 0
for w, x in wordlist:
# encoding: bit 7 indicates last character in word (chr(128)
# indicates the last character in an entire string)
ww = w[:-1] + chr(ord(w[-1])+128)
# reuse string tails, when possible
o = lexicon.find(ww)
if o < 0:
o = offset
lexicon = lexicon + ww
offset = offset + len(w)
words[w] = len(lexicon_offset)
lexicon_offset.append(o)
lexicon = list(map(ord, lexicon))
# generate phrasebook from names and lexicon
phrasebook = [0]
phrasebook_offset = [0] * len(unicode.chars)
for char in unicode.chars:
name = names[char]
if name:
w = name.split()
phrasebook_offset[char] = len(phrasebook)
for w in w:
i = words[w]
if i < short:
phrasebook.append(i)
else:
# store as two bytes
phrasebook.append((i>>8) + short)
phrasebook.append(i&255)
assert getsize(phrasebook) == 1
#
# unicode name hash table
# extract names
data = []
for char in unicode.chars:
record = unicode.table[char]
if record:
name = record[1].strip()
if name and name[0] != "<":
data.append((name, char))
# the magic number 47 was chosen to minimize the number of
# collisions on the current data set. if you like, change it
# and see what happens...
codehash = Hash("code", data, 47)
print("--- Writing", FILE, "...")
fp = open(FILE, "w")
print("/* this file was generated by %s %s */" % (SCRIPT, VERSION), file=fp)
print(file=fp)
print("#define NAME_MAXLEN", 256, file=fp)
print(file=fp)
print("/* lexicon */", file=fp)
Array("lexicon", lexicon).dump(fp, trace)
Array("lexicon_offset", lexicon_offset).dump(fp, trace)
# split decomposition index table
offset1, offset2, shift = splitbins(phrasebook_offset, trace)
print("/* code->name phrasebook */", file=fp)
print("#define phrasebook_shift", shift, file=fp)
print("#define phrasebook_short", short, file=fp)
Array("phrasebook", phrasebook).dump(fp, trace)
Array("phrasebook_offset1", offset1).dump(fp, trace)
Array("phrasebook_offset2", offset2).dump(fp, trace)
print("/* name->code dictionary */", file=fp)
codehash.dump(fp, trace)
fp.close()
def merge_old_version(version, new, old):
# Changes to exclusion file not implemented yet
if old.exclusions != new.exclusions:
raise NotImplementedError("exclusions differ")
# In these change records, 0xFF means "no change"
bidir_changes = [0xFF]*0x110000
category_changes = [0xFF]*0x110000
decimal_changes = [0xFF]*0x110000
# In numeric data, 0 means "no change",
# -1 means "did not have a numeric value
numeric_changes = [0] * 0x110000
# normalization_changes is a list of key-value pairs
normalization_changes = []
for i in range(0x110000):
if new.table[i] is None:
# Characters unassigned in the new version ought to
# be unassigned in the old one
assert old.table[i] is None
continue
# check characters unassigned in the old version
if old.table[i] is None:
# category 0 is "unassigned"
category_changes[i] = 0
continue
# check characters that differ
if old.table[i] != new.table[i]:
for k in range(len(old.table[i])):
if old.table[i][k] != new.table[i][k]:
value = old.table[i][k]
if k == 2:
#print "CATEGORY",hex(i), old.table[i][k], new.table[i][k]
category_changes[i] = CATEGORY_NAMES.index(value)
elif k == 4:
#print "BIDIR",hex(i), old.table[i][k], new.table[i][k]
bidir_changes[i] = BIDIRECTIONAL_NAMES.index(value)
elif k == 5:
#print "DECOMP",hex(i), old.table[i][k], new.table[i][k]
# We assume that all normalization changes are in 1:1 mappings
assert " " not in value
normalization_changes.append((i, value))
elif k == 6:
#print "DECIMAL",hex(i), old.table[i][k], new.table[i][k]
# we only support changes where the old value is a single digit
assert value in "0123456789"
decimal_changes[i] = int(value)
elif k == 8:
# print "NUMERIC",hex(i), `old.table[i][k]`, new.table[i][k]
# Since 0 encodes "no change", the old value is better not 0
assert value != "0" and value != "-1"
if not value:
numeric_changes[i] = -1
else:
assert re.match("^[0-9]+$", value)
numeric_changes[i] = int(value)
elif k == 11:
# change to ISO comment, ignore
pass
elif k == 12:
# change to simple uppercase mapping; ignore
pass
elif k == 13:
# change to simple lowercase mapping; ignore
pass
elif k == 14:
# change to simple titlecase mapping; ignore
pass
elif k == 16:
# derived property changes; not yet
pass
else:
class Difference(Exception):pass
raise Difference(hex(i), k, old.table[i], new.table[i])
new.changed.append((version, list(zip(bidir_changes, category_changes,
decimal_changes, numeric_changes)),
normalization_changes))
# --------------------------------------------------------------------
# the following support code is taken from the unidb utilities
# Copyright (c) 1999-2000 by Secret Labs AB
# load a unicode-data file from disk
import sys
class UnicodeData:
# Record structure:
# [ID, name, category, combining, bidi, decomp, (6)
# decimal, digit, numeric, bidi-mirrored, Unicode-1-name, (11)
# ISO-comment, uppercase, lowercase, titlecase, ea-width, (16)
# derived-props] (17)
def __init__(self, filename, exclusions, eastasianwidth,
derivedprops, expand=1):
self.changed = []
file = open(filename)
table = [None] * 0x110000
while 1:
s = file.readline()
if not s:
break
s = s.strip().split(";")
char = int(s[0], 16)
table[char] = s
# expand first-last ranges
if expand:
field = None
for i in range(0, 0x110000):
s = table[i]
if s:
if s[1][-6:] == "First>":
s[1] = ""
field = s
elif s[1][-5:] == "Last>":
s[1] = ""
field = None
elif field:
f2 = field[:]
f2[0] = "%X" % i
table[i] = f2
# public attributes
self.filename = filename
self.table = table
self.chars = range(0x110000) # unicode 3.2
file = open(exclusions)
self.exclusions = {}
for s in file:
s = s.strip()
if not s:
continue
if s[0] == '#':
continue
char = int(s.split()[0],16)
self.exclusions[char] = 1
widths = [None] * 0x110000
for s in open(eastasianwidth):
s = s.strip()
if not s:
continue
if s[0] == '#':
continue
s = s.split()[0].split(';')
if '..' in s[0]:
first, last = [int(c, 16) for c in s[0].split('..')]
chars = range(first, last+1)
else:
chars = [int(s[0], 16)]
for char in chars:
widths[char] = s[1]
for i in range(0, 0x110000):
if table[i] is not None:
table[i].append(widths[i])
for i in range(0, 0x110000):
if table[i] is not None:
table[i].append(set())
for s in open(derivedprops):
s = s.split('#', 1)[0].strip()
if not s:
continue
r, p = s.split(";")
r = r.strip()
p = p.strip()
if ".." in r:
first, last = [int(c, 16) for c in r.split('..')]
chars = range(first, last+1)
else:
chars = [int(r, 16)]
for char in chars:
if table[char]:
# Some properties (e.g. Default_Ignorable_Code_Point)
# apply to unassigned code points; ignore them
table[char][-1].add(p)
def uselatin1(self):
# restrict character range to ISO Latin 1
self.chars = range(256)
# hash table tools
# this is a straight-forward reimplementation of Python's built-in
# dictionary type, using a static data structure, and a custom string
# hash algorithm.
def myhash(s, magic):
h = 0
for c in map(ord, s.upper()):
h = (h * magic) + c
ix = h & 0xff000000
if ix:
h = (h ^ ((ix>>24) & 0xff)) & 0x00ffffff
return h
SIZES = [
(4,3), (8,3), (16,3), (32,5), (64,3), (128,3), (256,29), (512,17),
(1024,9), (2048,5), (4096,83), (8192,27), (16384,43), (32768,3),
(65536,45), (131072,9), (262144,39), (524288,39), (1048576,9),
(2097152,5), (4194304,3), (8388608,33), (16777216,27)
]
class Hash:
def __init__(self, name, data, magic):
# turn a (key, value) list into a static hash table structure
# determine table size
for size, poly in SIZES:
if size > len(data):
poly = size + poly
break
else:
raise AssertionError("ran out of polynominals")
print(size, "slots in hash table")
table = [None] * size
mask = size-1
n = 0
hash = myhash
# initialize hash table
for key, value in data:
h = hash(key, magic)
i = (~h) & mask
v = table[i]
if v is None:
table[i] = value
continue
incr = (h ^ (h >> 3)) & mask;
if not incr:
incr = mask
while 1:
n = n + 1
i = (i + incr) & mask
v = table[i]
if v is None:
table[i] = value
break
incr = incr << 1
if incr > mask:
incr = incr ^ poly
print(n, "collisions")
self.collisions = n
for i in range(len(table)):
if table[i] is None:
table[i] = 0
self.data = Array(name + "_hash", table)
self.magic = magic
self.name = name
self.size = size
self.poly = poly
def dump(self, file, trace):
# write data to file, as a C array
self.data.dump(file, trace)
file.write("#define %s_magic %d\n" % (self.name, self.magic))
file.write("#define %s_size %d\n" % (self.name, self.size))
file.write("#define %s_poly %d\n" % (self.name, self.poly))
# stuff to deal with arrays of unsigned integers
class Array:
def __init__(self, name, data):
self.name = name
self.data = data
def dump(self, file, trace=0):
# write data to file, as a C array
size = getsize(self.data)
if trace:
print(self.name+":", size*len(self.data), "bytes", file=sys.stderr)
file.write("static ")
if size == 1:
file.write("unsigned char")
elif size == 2:
file.write("unsigned short")
else:
file.write("unsigned int")
file.write(" " + self.name + "[] = {\n")
if self.data:
s = " "
for item in self.data:
i = str(item) + ", "
if len(s) + len(i) > 78:
file.write(s + "\n")
s = " " + i
else:
s = s + i
if s.strip():
file.write(s + "\n")
file.write("};\n\n")
def getsize(data):
# return smallest possible integer size for the given array
maxdata = max(data)
if maxdata < 256:
return 1
elif maxdata < 65536:
return 2
else:
return 4
def splitbins(t, trace=0):
"""t, trace=0 -> (t1, t2, shift). Split a table to save space.
t is a sequence of ints. This function can be useful to save space if
many of the ints are the same. t1 and t2 are lists of ints, and shift
is an int, chosen to minimize the combined size of t1 and t2 (in C
code), and where for each i in range(len(t)),
t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
where mask is a bitmask isolating the last "shift" bits.
If optional arg trace is non-zero (default zero), progress info
is printed to sys.stderr. The higher the value, the more info
you'll get.
"""
import sys
if trace:
def dump(t1, t2, shift, bytes):
print("%d+%d bins at shift %d; %d bytes" % (
len(t1), len(t2), shift, bytes), file=sys.stderr)
print("Size of original table:", len(t)*getsize(t), \
"bytes", file=sys.stderr)
n = len(t)-1 # last valid index
maxshift = 0 # the most we can shift n and still have something left
if n > 0:
while n >> 1:
n >>= 1
maxshift += 1
del n
bytes = sys.maxint # smallest total size so far
t = tuple(t) # so slices can be dict keys
for shift in range(maxshift + 1):
t1 = []
t2 = []
size = 2**shift
bincache = {}
for i in range(0, len(t), size):
bin = t[i:i+size]
index = bincache.get(bin)
if index is None:
index = len(t2)
bincache[bin] = index
t2.extend(bin)
t1.append(index >> shift)
# determine memory size
b = len(t1)*getsize(t1) + len(t2)*getsize(t2)
if trace > 1:
dump(t1, t2, shift, b)
if b < bytes:
best = t1, t2, shift
bytes = b
t1, t2, shift = best
if trace:
print("Best:", end=' ', file=sys.stderr)
dump(t1, t2, shift, bytes)
if __debug__:
# exhaustively verify that the decomposition is correct
mask = ~((~0) << shift) # i.e., low-bit mask of shift bits
for i in range(len(t)):
assert t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
return best
if __name__ == "__main__":
maketables(1)
|