summaryrefslogtreecommitdiffstats
path: root/Tools/unicode
diff options
context:
space:
mode:
authorThomas Wouters <thomas@python.org>2006-04-21 10:40:58 (GMT)
committerThomas Wouters <thomas@python.org>2006-04-21 10:40:58 (GMT)
commit49fd7fa4431da299196d74087df4a04f99f9c46f (patch)
tree35ace5fe78d3d52c7a9ab356ab9f6dbf8d4b71f4 /Tools/unicode
parent9ada3d6e29d5165dadacbe6be07bcd35cfbef59d (diff)
downloadcpython-49fd7fa4431da299196d74087df4a04f99f9c46f.zip
cpython-49fd7fa4431da299196d74087df4a04f99f9c46f.tar.gz
cpython-49fd7fa4431da299196d74087df4a04f99f9c46f.tar.bz2
Merge p3yk branch with the trunk up to revision 45595. This breaks a fair
number of tests, all because of the codecs/_multibytecodecs issue described here (it's not a Py3K issue, just something Py3K discovers): http://mail.python.org/pipermail/python-dev/2006-April/064051.html Hye-Shik Chang promised to look for a fix, so no need to fix it here. The tests that are expected to break are: test_codecencodings_cn test_codecencodings_hk test_codecencodings_jp test_codecencodings_kr test_codecencodings_tw test_codecs test_multibytecodec This merge fixes an actual test failure (test_weakref) in this branch, though, so I believe merging is the right thing to do anyway.
Diffstat (limited to 'Tools/unicode')
-rw-r--r--Tools/unicode/Makefile5
-rw-r--r--Tools/unicode/gencjkcodecs.py68
-rw-r--r--Tools/unicode/gencodec.py2
3 files changed, 73 insertions, 2 deletions
diff --git a/Tools/unicode/Makefile b/Tools/unicode/Makefile
index f266d4d..fbd3557 100644
--- a/Tools/unicode/Makefile
+++ b/Tools/unicode/Makefile
@@ -15,7 +15,7 @@ RM = /bin/rm
all: distclean mappings codecs
-codecs: misc windows iso apple ebcdic custom-mappings
+codecs: misc windows iso apple ebcdic custom-mappings cjk
### Mappings
@@ -72,6 +72,9 @@ ebcdic: build/
$(PYTHON) gencodec.py MAPPINGS/VENDORS/MICSFT/EBCDIC/ build/
$(RM) -f build/readme.*
+cjk: build/
+ $(PYTHON) gencjkcodecs.py build/
+
### Cleanup
clean:
diff --git a/Tools/unicode/gencjkcodecs.py b/Tools/unicode/gencjkcodecs.py
new file mode 100644
index 0000000..975c19c
--- /dev/null
+++ b/Tools/unicode/gencjkcodecs.py
@@ -0,0 +1,68 @@
+import os, string
+
+codecs = {
+ 'cn': ('gb2312', 'gbk', 'gb18030', 'hz'),
+ 'tw': ('big5', 'cp950'),
+ 'hk': ('big5hkscs',),
+ 'jp': ('cp932', 'shift_jis', 'euc_jp', 'euc_jisx0213', 'shift_jisx0213',
+ 'euc_jis_2004', 'shift_jis_2004'),
+ 'kr': ('cp949', 'euc_kr', 'johab'),
+ 'iso2022': ('iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2',
+ 'iso2022_jp_2004', 'iso2022_jp_3', 'iso2022_jp_ext',
+ 'iso2022_kr'),
+}
+
+TEMPLATE = string.Template("""\
+#
+# $encoding.py: Python Unicode Codec for $ENCODING
+#
+# Written by Hye-Shik Chang <perky@FreeBSD.org>
+#
+
+import _codecs_$owner, codecs
+import _multibytecodec as mbc
+
+codec = _codecs_$owner.getcodec('$encoding')
+
+class Codec(codecs.Codec):
+ encode = codec.encode
+ decode = codec.decode
+
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='$encoding',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+""")
+
+def gencodecs(prefix):
+ for loc, encodings in codecs.iteritems():
+ for enc in encodings:
+ code = TEMPLATE.substitute(ENCODING=enc.upper(),
+ encoding=enc.lower(),
+ owner=loc)
+ codecpath = os.path.join(prefix, enc + '.py')
+ open(codecpath, 'w').write(code)
+
+if __name__ == '__main__':
+ import sys
+ gencodecs(sys.argv[1])
diff --git a/Tools/unicode/gencodec.py b/Tools/unicode/gencodec.py
index bb1c9da..3cfef20 100644
--- a/Tools/unicode/gencodec.py
+++ b/Tools/unicode/gencodec.py
@@ -348,7 +348,7 @@ def getregentry():
l.extend(encoding_map_code)
# Final new-line
- l.append('\n')
+ l.append('')
return '\n'.join(l).expandtabs()