summaryrefslogtreecommitdiffstats
path: root/doc/Encoding.3
blob: df219c3047a400c63668885bc29578723aacd796 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
'\"
'\" Copyright (c) 1997-1998 Sun Microsystems, Inc.
'\"
'\" See the file "license.terms" for information on usage and redistribution
'\" of this file, and for a DISCLAIMER OF ALL WARRANTIES.
'\" 
'\" RCS: @(#) $Id: Encoding.3,v 1.7 1999/10/13 00:32:05 hobbs Exp $
'\" 
.so man.macros
.TH Tcl_GetEncoding 3 "8.1" Tcl "Tcl Library Procedures"
.BS
.SH NAME
Tcl_GetEncoding, Tcl_FreeEncoding, Tcl_ExternalToUtfDString, Tcl_ExternalToUtf, Tcl_UtfToExternalDString, Tcl_UtfToExternal, Tcl_WinTCharToUtf, Tcl_WinUtfToTChar, Tcl_GetEncodingName, Tcl_SetSystemEncoding, Tcl_GetEncodingNames, Tcl_CreateEncoding, Tcl_GetDefaultEncodingDir, Tcl_SetDefaultEncodingDir \- procedures for creating and using encodings.
.SH SYNOPSIS
.nf
\fB#include <tcl.h>\fR
.sp
Tcl_Encoding
\fBTcl_GetEncoding\fR(\fIinterp, name\fR)
.sp
void
\fBTcl_FreeEncoding\fR(\fIencoding\fR)
.sp
char *
\fBTcl_ExternalToUtfDString\fR(\fIencoding, src, srcLen, dstPtr\fR)
.sp
int
\fBTcl_ExternalToUtf\fR(\fIinterp, encoding, src, srcLen, flags, statePtr, dst, dstLen, srcReadPtr, dstWrotePtr, 
	dstCharsPtr\fR)
.sp
char * 
\fBTcl_UtfToExternalDString\fR(\fIencoding, src, srcLen, dstPtr\fR)
.sp
int
\fBTcl_UtfToExternal\fR(\fIinterp, encoding, src, srcLen, flags, statePtr, dst, dstLen, srcReadPtr, dstWrotePtr, 
	dstCharsPtr\fR)
.sp
char *
\fBTcl_WinTCharToUtf\fR(\fItsrc, srcLen, dstPtr\fR)
.sp
TCHAR *
\fBTcl_WinUtfToTChar\fR(\fIsrc, srcLen, dstPtr\fR)
.sp
char *
\fBTcl_GetEncodingName\fR(\fIencoding\fR)
.sp
int
\fBTcl_SetSystemEncoding\fR(\fIinterp, name\fR)
.sp
void
\fBTcl_GetEncodingNames\fR(\fIinterp\fR)
.sp
Tcl_Encoding
\fBTcl_CreateEncoding\fR(\fItypePtr\fR)
.sp
char *
\fBTcl_GetDefaultEncodingDir\fR(\fIvoid\fR)
.sp
void
\fBTcl_SetDefaultEncodingDir\fR(\fIpath\fR)


.SH ARGUMENTS
.AS Tcl_EncodingState *dstWrotePtr
.AP Tcl_Interp *interp in
Interpreter to use for error reporting, or NULL if no error reporting is
desired.
.AP "CONST char" *name in
Name of encoding to load.
.AP Tcl_Encoding encoding in
The encoding to query, free, or use for converting text.  If \fIencoding\fR is 
NULL, the current system encoding is used.
.AP "CONST char" *src in
For the \fBTcl_ExternalToUtf\fR functions, an array of bytes in the
specified encoding that are to be converted to UTF-8.  For the
\fBTcl_UtfToExternal\fR and \fBTcl_WinUtfToTChar\fR functions, an array of
UTF-8 characters to be converted to the specified encoding.  
.AP "CONST TCHAR" *tsrc in
An array of Windows TCHAR characters to convert to UTF-8.
.AP int srcLen in 
Length of \fIsrc\fR or \fItsrc\fR in bytes.  If the length is negative, the 
encoding-specific length of the string is used.
.AP Tcl_DString *dstPtr out
Pointer to an uninitialized or free \fBTcl_DString\fR in which the converted
result will be stored.
.AP int flags in
Various flag bits OR-ed together.  
TCL_ENCODING_START signifies that the
source buffer is the first block in a (potentially multi-block) input
stream, telling the conversion routine to reset to an initial state and
perform any initialization that needs to occur before the first byte is
converted.  TCL_ENCODING_END signifies that the source buffer is the last
block in a (potentially multi-block) input stream, telling the conversion
routine to perform any finalization that needs to occur after the last
byte is converted and then to reset to an initial state.
TCL_ENCODING_STOPONERROR signifies that the conversion routine should
return immediately upon reading a source character that doesn't exist in
the target encoding; otherwise a default fallback character will
automatically be substituted.  
.AP Tcl_EncodingState *statePtr in/out
Used when converting a (generally long or indefinite length) byte stream
in a piece by piece fashion.  The conversion routine stores its current
state in \fI*statePtr\fR after \fIsrc\fR (the buffer containing the
current piece) has been converted; that state information must be passed
back when converting the next piece of the stream so the conversion
routine knows what state it was in when it left off at the end of the
last piece.  May be NULL, in which case the value specified for \fIflags\fR 
is ignored and the source buffer is assumed to contain the complete string to
convert.
.AP char *dst out
Buffer in which the converted result will be stored.  No more than
\fIdstLen\fR bytes will be stored in \fIdst\fR.
.AP int dstLen in
The maximum length of the output buffer \fIdst\fR in bytes.
.AP int *srcReadPtr out
Filled with the number of bytes from \fIsrc\fR that were actually
converted.  This may be less than the original source length if there was
a problem converting some source characters.  May be NULL.
.AP int *dstWrotePtr out
Filled with the number of bytes that were actually stored in the output
buffer as a result of the conversion.  May be NULL.
.AP int *dstCharsPtr out
Filled with the number of characters that correspond to the number of bytes
stored in the output buffer.  May be NULL.
.AP Tcl_EncodingType *typePtr in
Structure that defines a new type of encoding.  
.AP char *path in
A path to the location of the encoding file.  
.BE
.SH INTRODUCTION
.PP
These routines convert between Tcl's internal character representation,
UTF-8, and character representations used by various operating systems or
file systems, such as Unicode, ASCII, or Shift-JIS.  When operating on
strings, such as such as obtaining the names of files or displaying
characters using international fonts, the strings must be translated into
one or possibly multiple formats that the various system calls can use.  For
instance, on a Japanese Unix workstation, a user might obtain a filename
represented in the EUC-JP file encoding and then translate the characters to
the jisx0208 font encoding in order to display the filename in a Tk widget.
The purpose of the encoding package is to help bridge the translation gap.
UTF-8 provides an intermediate staging ground for all the various
encodings.  In the example above, text would be translated into UTF-8 from
whatever file encoding the operating system is using.  Then it would be
translated from UTF-8 into whatever font encoding the display routines
require.
.PP
Some basic encodings are compiled into Tcl.  Others can be defined by the
user or dynamically loaded from encoding files in a
platform-independent manner.
.SH DESCRIPTION
.PP
\fBTcl_GetEncoding\fR finds an encoding given its \fIname\fR.  The name may
refer to a builtin Tcl encoding, a user-defined encoding registered by
calling \fBTcl_CreateEncoding\fR, or a dynamically-loadable encoding
file.  The return value is a token that represents the encoding and can be
used in subsequent calls to procedures such as \fBTcl_GetEncodingName\fR,
\fBTcl_FreeEncoding\fR, and \fBTcl_UtfToExternal\fR.  If the name did not
refer to any known or loadable encoding, NULL is returned and an error
message is returned in \fIinterp\fR.
.PP
The encoding package maintains a database of all encodings currently in use.
The first time \fIname\fR is seen, \fBTcl_GetEncoding\fR returns an
encoding with a reference count of 1.  If the same \fIname\fR is requested
further times, then the reference count for that encoding is incremented
without the overhead of allocating a new encoding and all its associated
data structures.  
.PP
When an \fIencoding\fR is no longer needed, \fBTcl_FreeEncoding\fR
should be called to release it.  When an \fIencoding\fR is no longer in use
anywhere (i.e., it has been freed as many times as it has been gotten)
\fBTcl_FreeEncoding\fR will release all storage the encoding was using
and delete it from the database. 
.PP
\fBTcl_ExternalToUtfDString\fR converts a source buffer \fIsrc\fR from the
specified \fIencoding\fR into UTF-8.  The converted bytes are stored in 
\fIdstPtr\fR, which is then NULL terminated.  The caller should eventually
call \fBTcl_DStringFree\fR to free any information stored in \fIdstPtr\fR.
When converting, if any of the characters in the source buffer cannot be
represented in the target encoding, a default fallback character will be
used.  The return value is a pointer to the value stored in the DString.
.PP
\fBTcl_ExternalToUtf\fR converts a source buffer \fIsrc\fR from the specified
\fIencoding\fR into UTF-8.  Up to \fIsrcLen\fR bytes are converted from the
source buffer and up to \fIdstLen\fR converted bytes are stored in \fIdst\fR.
In all cases, \fI*srcReadPtr\fR is filled with the number of bytes that were
successfully converted from \fIsrc\fR and \fI*dstWrotePtr\fR is filled with
the corresponding number of bytes that were stored in \fIdst\fR.  The return
value is one of the following:
.RS
.IP \fBTCL_OK\fR 29
All bytes of \fIsrc\fR were converted.
.IP \fBTCL_CONVERT_NOSPACE\fR 29
The destination buffer was not large enough for all of the converted data; as
many characters as could fit were converted though.
.IP \fBTCL_CONVERT_MULTIBYTE\fR 29
The last fews bytes in the source buffer were the beginning of a multibyte
sequence, but more bytes were needed to complete this sequence.  A
subsequent call to the conversion routine should pass a buffer containing
the unconverted bytes that remained in \fIsrc\fR plus some further bytes
from the source stream to properly convert the formerly split-up multibyte
sequence.  
.IP \fBTCL_CONVERT_SYNTAX\fR 29
The source buffer contained an invalid character sequence.  This may occur
if the input stream has been damaged or if the input encoding method was
misidentified.
.IP \fBTCL_CONVERT_UNKNOWN\fR 29
The source buffer contained a character that could not be represented in
the target encoding and TCL_ENCODING_STOPONERROR was specified.  
.RE
.LP
\fBTcl_UtfToExternalDString\fR converts a source buffer \fIsrc\fR from UTF-8 
into the specified \fIencoding\fR.  The converted bytes are stored in
\fIdstPtr\fR, which is then terminated with the appropriate encoding-specific
NULL.  The caller should eventually call \fBTcl_DStringFree\fR to free any
information stored in \fIdstPtr\fR.  When converting, if any of the
characters in the source buffer cannot be represented in the target
encoding, a default fallback character will be used.  The return value is
a pointer to the value stored in the DString.
.PP
\fBTcl_UtfToExternal\fR converts a source buffer \fIsrc\fR from UTF-8 into
the specified \fIencoding\fR.  Up to \fIsrcLen\fR bytes are converted from
the source buffer and up to \fIdstLen\fR converted bytes are stored in
\fIdst\fR.  In all cases, \fI*srcReadPtr\fR is filled with the number of
bytes that were successfully converted from \fIsrc\fR and \fI*dstWrotePtr\fR
is filled with the corresponding number of bytes that were stored in
\fIdst\fR.  The return values are the same as the return values for
\fBTcl_ExternalToUtf\fR.
.PP
\fBTcl_WinUtfToTChar\fR and \fBTcl_WinTCharToUtf\fR are
Windows-only convenience
functions for converting between UTF-8 and Windows strings.  On Windows 95
(as with the Macintosh and Unix operating systems),
all strings exchanged between Tcl and the operating system are "char"
based.  On Windows NT, some strings exchanged between Tcl and the
operating system are "char" oriented while others are in Unicode.  By
convention, in Windows a TCHAR is a character in the ANSI code page
on Windows 95 and a Unicode character on Windows NT.
.PP
If you planned to use the same "char" based interfaces on both Windows
95 and Windows NT, you could use \fBTcl_UtfToExternal\fR and
\fBTcl_ExternalToUtf\fR (or their \fBTcl_DString\fR equivalents) with an
encoding of NULL (the current system encoding).  On the other hand,
if you planned to use the Unicode interface when running on Windows NT
and the "char" interfaces when running on Windows 95, you would have
to perform the following type of test over and over in your program
(as represented in psuedo-code):
.CS
if (running NT) {
    encoding <- Tcl_GetEncoding("unicode");
    nativeBuffer <- Tcl_UtfToExternal(encoding, utfBuffer);
    Tcl_FreeEncoding(encoding);
} else {
    nativeBuffer <- Tcl_UtfToExternal(NULL, utfBuffer);
.CE
\fBTcl_WinUtfToTChar\fR and \fBTcl_WinTCharToUtf\fR automatically
handle this test and use the proper encoding based on the current
operating system.  \fBTcl_WinUtfToTChar\fR returns a pointer to
a TCHAR string, and \fBTcl_WinTCharToUtf\fR expects a TCHAR string
pointer as the \fIsrc\fR string.  Otherwise, these functions
behave identically to \fBTcl_UtfToExternalDString\fR and
\fBTcl_ExternalToUtfDString\fR.
.PP
\fBTcl_GetEncodingName\fR is roughly the inverse of \fBTcl_GetEncoding\fR.
Given an \fIencoding\fR, the return value is the \fIname\fR argument that
was used to create the encoding.  The string returned by 
\fBTcl_GetEncodingName\fR is only guaranteed to persist until the
\fIencoding\fR is deleted.  The caller must not modify this string.
.PP
\fBTcl_SetSystemEncoding\fR sets the default encoding that should be used
whenever the user passes a NULL value for the \fIencoding\fR argument to
any of the other encoding functions.  If \fIname\fR is NULL, the system
encoding is reset to the default system encoding, \fBbinary\fR.  If the
name did not refer to any known or loadable encoding, TCL_ERROR is
returned and an error message is left in \fIinterp\fR.  Otherwise, this
procedure increments the reference count of the new system encoding,
decrements the reference count of the old system encoding, and returns
TCL_OK.
.PP
\fBTcl_GetEncodingNames\fR sets the \fIinterp\fR result to a list
consisting of the names of all the encodings that are currently defined
or can be dynamically loaded, searching the encoding path specified by
\fBTcl_SetDefaultEncodingDir\fR.  This procedure does not ensure that the
dynamically-loadable encoding files contain valid data, but merely that they
exist.
.PP
\fBTcl_CreateEncoding\fR defines a new encoding and registers the C
procedures that are called back to convert between the encoding and
UTF-8.  Encodings created by \fBTcl_CreateEncoding\fR are thereafter
visible in the database used by \fBTcl_GetEncoding\fR.  Just as with the
\fBTcl_GetEncoding\fR procedure, the return value is a token that
represents the encoding and can be used in subsequent calls to other
encoding functions.  \fBTcl_CreateEncoding\fR returns an encoding with a
reference count of 1. If an encoding with the specified \fIname\fR
already exists, then its entry in the database is replaced with the new
encoding; the token for the old encoding will remain valid and continue
to behave as before, but users of the new token will now call the new
encoding procedures.  
.PP
The \fItypePtr\fR argument to \fBTcl_CreateEncoding\fR contains information 
about the name of the encoding and the procedures that will be called to
convert between this encoding and UTF-8.  It is defined as follows:
.PP
.CS
typedef struct Tcl_EncodingType {
	CONST char *\fIencodingName\fR;
	Tcl_EncodingConvertProc *\fItoUtfProc\fR;
	Tcl_EncodingConvertProc *\fIfromUtfProc\fR;
	Tcl_EncodingFreeProc *\fIfreeProc\fR;
	ClientData \fIclientData\fR;
	int \fInullSize\fR;
} Tcl_EncodingType;  
.CE
.PP
The \fIencodingName\fR provides a string name for the encoding, by
which it can be referred in other procedures such as
\fBTcl_GetEncoding\fR.  The \fItoUtfProc\fR refers to a callback
procedure to invoke to convert text from this encoding into UTF-8.
The \fIfromUtfProc\fR refers to a callback procedure to invoke to
convert text from UTF-8 into this encoding.  The \fIfreeProc\fR refers
to a callback procedure to invoke when this encoding is deleted.  The
\fIfreeProc\fR field may be NULL.  The \fIclientData\fR contains an
arbitrary one-word value passed to \fItoUtfProc\fR, \fIfromUtfProc\fR,
and \fIfreeProc\fR whenever they are called.  Typically, this is a
pointer to a data structure containing encoding-specific information
that can be used by the callback procedures.  For instance, two very
similar encodings such as \fBascii\fR and \fBmacRoman\fR may use the
same callback procedure, but use different values of \fIclientData\fR
to control its behavior.  The \fInullSize\fR specifies the number of
zero bytes that signify end-of-string in this encoding.  It must be
\fB1\fR (for single-byte or multi-byte encodings like ASCII or
Shift-JIS) or \fB2\fR (for double-byte encodings like Unicode).
Constant-sized encodings with 3 or more bytes per character (such as
CNS11643) are not accepted.
.PP
The callback procedures \fItoUtfProc\fR and \fIfromUtfProc\fR should match the
type \fBTcl_EncodingConvertProc\fR:
.PP
.CS
typedef int Tcl_EncodingConvertProc(
	ClientData \fIclientData\fR,
	CONST char *\fIsrc\fR, 
	int \fIsrcLen\fR, 
	int \fIflags\fR, 
	Tcl_Encoding *\fIstatePtr\fR,
	char *\fIdst\fR, 
	int \fIdstLen\fR, 
	int *\fIsrcReadPtr\fR,
	int *\fIdstWrotePtr\fR,
	int *\fIdstCharsPtr\fR);
.CE
.PP
The \fItoUtfProc\fR and \fIfromUtfProc\fR procedures are called by the
\fBTcl_ExternalToUtf\fR or \fBTcl_UtfToExternal\fR family of functions to
perform the actual conversion.  The \fIclientData\fR parameter to these
procedures is the same as the \fIclientData\fR field specified to
\fBTcl_CreateEncoding\fR when the encoding was created.  The remaining
arguments to the callback procedures are the same as the arguments,
documented at the top, to \fBTcl_ExternalToUtf\fR or
\fBTcl_UtfToExternal\fR, with the following exceptions.  If the
\fIsrcLen\fR argument to one of those high-level functions is negative,
the value passed to the callback procedure will be the appropriate
encoding-specific string length of \fIsrc\fR.  If any of the \fIsrcReadPtr\fR, 
\fIdstWrotePtr\fR, or \fIdstCharsPtr\fR arguments to one of the high-level
functions is NULL, the corresponding value passed to the callback
procedure will be a non-NULL location.
.PP
The callback procedure \fIfreeProc\fR, if non-NULL, should match the type 
\fBTcl_EncodingFreeProc\fR:
.CS
typedef void Tcl_EncodingFreeProc(
	ClientData \fIclientData\fR);
.CE
.PP
This \fIfreeProc\fR function is called when the encoding is deleted.  The
\fIclientData\fR parameter is the same as the \fIclientData\fR field
specified to \fBTcl_CreateEncoding\fR when the encoding was created.  
.PP

\fBTcl_GetDefaultEncodingDir\fR and \fBTcl_SetDefaultEncodingDir\fR
access and set the directory to use when locating the default encoding
files.  If this value is not NULL, the \fBTclpInitLibraryPath\fR routine
appends the path to the head of the search path, and uses this path as
the first place to look into when trying to locate the encoding file.

.SH "ENCODING FILES"
Space would prohibit precompiling into Tcl every possible encoding
algorithm, so many encodings are stored on disk as dynamically-loadable
encoding files.  This behavior also allows the user to create additional
encoding files that can be loaded using the same mechanism.  These
encoding files contain information about the tables and/or escape
sequences used to map between an external encoding and Unicode.  The
external encoding may consist of single-byte, multi-byte, or double-byte
characters.  
.PP
Each dynamically-loadable encoding is represented as a text file.  The
initial line of the file, beginning with a ``#'' symbol, is a comment
that provides a human-readable description of the file.  The next line
identifies the type of encoding file.  It can be one of the following
letters:
.IP "[1]   \fBS\fR"
A single-byte encoding, where one character is always one byte long in the
encoding.  An example is \fBiso8859-1\fR, used by many European languages.
.IP "[2]   \fBD\fR"
A double-byte encoding, where one character is always two bytes long in the
encoding.  An example is \fBbig5\fR, used for Chinese text.
.IP "[3]   \fBM\fR"
A multi-byte encoding, where one character may be either one or two bytes long.
Certain bytes are a lead bytes, indicating that another byte must follow
and that together the two bytes represent one character.  Other bytes are not
lead bytes and represent themselves.  An example is \fBshiftjis\fR, used by
many Japanese computers.
.IP "[4]   \fBE\fR"
An escape-sequence encoding, specifying that certain sequences of bytes
do not represent characters, but commands that describe how following bytes
should be interpreted.  
.PP
The rest of the lines in the file depend on the type.  
.PP
Cases [1], [2], and [3] are collectively referred to as table-based encoding
files.  The lines in a table-based encoding file are in the same
format as this example taken from the \fBshiftjis\fR encoding (this is not
the complete file):
.CS
# Encoding file: shiftjis, multi-byte
M
003F 0 40
00
0000000100020003000400050006000700080009000A000B000C000D000E000F
0010001100120013001400150016001700180019001A001B001C001D001E001F
0020002100220023002400250026002700280029002A002B002C002D002E002F
0030003100320033003400350036003700380039003A003B003C003D003E003F
0040004100420043004400450046004700480049004A004B004C004D004E004F
0050005100520053005400550056005700580059005A005B005C005D005E005F
0060006100620063006400650066006700680069006A006B006C006D006E006F
0070007100720073007400750076007700780079007A007B007C007D203E007F
0080000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000
0000FF61FF62FF63FF64FF65FF66FF67FF68FF69FF6AFF6BFF6CFF6DFF6EFF6F
FF70FF71FF72FF73FF74FF75FF76FF77FF78FF79FF7AFF7BFF7CFF7DFF7EFF7F
FF80FF81FF82FF83FF84FF85FF86FF87FF88FF89FF8AFF8BFF8CFF8DFF8EFF8F
FF90FF91FF92FF93FF94FF95FF96FF97FF98FF99FF9AFF9BFF9CFF9DFF9EFF9F
0000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000
81
0000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000
0000000000000000000000000000000000000000000000000000000000000000
300030013002FF0CFF0E30FBFF1AFF1BFF1FFF01309B309C00B4FF4000A8FF3E
FFE3FF3F30FD30FE309D309E30034EDD30053006300730FC20152010FF0F005C
301C2016FF5C2026202520182019201C201DFF08FF0930143015FF3BFF3DFF5B
FF5D30083009300A300B300C300D300E300F30103011FF0B221200B100D70000
00F7FF1D2260FF1CFF1E22662267221E22342642264000B0203220332103FFE5
FF0400A200A3FF05FF03FF06FF0AFF2000A72606260525CB25CF25CE25C725C6
25A125A025B325B225BD25BC203B301221922190219121933013000000000000
000000000000000000000000000000002208220B2286228722822283222A2229
000000000000000000000000000000002227222800AC21D221D4220022030000
0000000000000000000000000000000000000000222022A52312220222072261
2252226A226B221A223D221D2235222B222C0000000000000000000000000000
212B2030266F266D266A2020202100B6000000000000000025EF000000000000
.CE
.PP
The third line of the file is three numbers.  The first number is the
fallback character (in base 16) to use when converting from UTF-8 to this
encoding.  The second number is a \fB1\fR if this file represents the
encoding for a symbol font, or \fB0\fR otherwise.  The last number (in base
10) is how many pages of data follow.  
.PP
Subsequent lines in the example above are pages that describe how to map
from the encoding into 2-byte Unicode.  The first line in a page identifies
the page number.  Following it are 256 double-byte numbers, arranged as 16
rows of 16 numbers.  Given a character in the encoding, the high byte of
that character is used to select which page, and the low byte of that
character is used as an index to select one of the double-byte numbers in
that page \- the value obtained being the corresponding Unicode character.
By examination of the example above, one can see that the characters 0x7E
and 0x8163 in \fBshiftjis\fR map to 203E and 2026 in Unicode, respectively.
.PP
Following the first page will be all the other pages, each in the same
format as the first: one number identifying the page followed by 256
double-byte Unicode characters.  If a character in the encoding maps to the
Unicode character 0000, it means that the character doesn't actually exist.
If all characters on a page would map to 0000, that page can be omitted.
.PP
Case [4] is the escape-sequence encoding file.  The lines in an this type of
file are in the same format as this example taken from the \fBiso2022-jp\fR
encoding:
.CS
.ta 1.5i
# Encoding file: iso2022-jp, escape-driven
E
init		{}
final		{}
iso8859-1	\\x1b(B
jis0201		\\x1b(J
jis0208		\\x1b$@
jis0208		\\x1b$B
jis0212		\\x1b$(D
gb2312		\\x1b$A
ksc5601		\\x1b$(C
.CE
.PP
In the file, the first column represents an option and the second column
is the associated value.  \fBinit\fR is a string to emit or expect before
the first character is converted, while \fBfinal\fR is a string to emit
or expect after the last character.  All other options are names of
table-based encodings; the associated value is the escape-sequence that
marks that encoding.  Tcl syntax is used for the values; in the above
example, for instance, ``\fB{}\fR'' represents the empty string and
``\fB\\x1b\fR'' represents character 27.
.PP
When \fBTcl_GetEncoding\fR encounters an encoding \fIname\fR that has not
been loaded, it attempts to load an encoding file called \fIname\fB.enc\fR
from the \fBencoding\fR subdirectory of each directory specified in the
library path \fB$tcl_libPath\fR.  If the encoding file exists, but is
malformed, an error message will be left in \fIinterp\fR.
.SH KEYWORDS
utf, encoding, convert



class="hl kwa">yield from f()) def g3(): return (yield from f()) for gen_fun in (f, g, f2, g2, f3, g3): gen = gen_fun() self.assertEqual(next(gen), 1) with self.assertRaises(StopIteration) as cm: gen.send(2) self.assertEqual(cm.exception.value, 2) class ExceptionTest(unittest.TestCase): # Tests for the issue #23353: check that the currently handled exception # is correctly saved/restored in PyEval_EvalFrameEx(). def test_except_throw(self): def store_raise_exc_generator(): try: self.assertEqual(sys.exc_info()[0], None) yield except Exception as exc: # exception raised by gen.throw(exc) self.assertEqual(sys.exc_info()[0], ValueError) self.assertIsNone(exc.__context__) yield # ensure that the exception is not lost self.assertEqual(sys.exc_info()[0], ValueError) yield # we should be able to raise back the ValueError raise make = store_raise_exc_generator() next(make) try: raise ValueError() except Exception as exc: try: make.throw(exc) except Exception: pass next(make) with self.assertRaises(ValueError) as cm: next(make) self.assertIsNone(cm.exception.__context__) self.assertEqual(sys.exc_info(), (None, None, None)) def test_except_next(self): def gen(): self.assertEqual(sys.exc_info()[0], ValueError) yield "done" g = gen() try: raise ValueError except Exception: self.assertEqual(next(g), "done") self.assertEqual(sys.exc_info(), (None, None, None)) def test_except_gen_except(self): def gen(): try: self.assertEqual(sys.exc_info()[0], None) yield # we are called from "except ValueError:", TypeError must # inherit ValueError in its context raise TypeError() except TypeError as exc: self.assertEqual(sys.exc_info()[0], TypeError) self.assertEqual(type(exc.__context__), ValueError) # here we are still called from the "except ValueError:" self.assertEqual(sys.exc_info()[0], ValueError) yield self.assertIsNone(sys.exc_info()[0]) yield "done" g = gen() next(g) try: raise ValueError except Exception: next(g) self.assertEqual(next(g), "done") self.assertEqual(sys.exc_info(), (None, None, None)) def test_except_throw_exception_context(self): def gen(): try: try: self.assertEqual(sys.exc_info()[0], None) yield except ValueError: # we are called from "except ValueError:" self.assertEqual(sys.exc_info()[0], ValueError) raise TypeError() except Exception as exc: self.assertEqual(sys.exc_info()[0], TypeError) self.assertEqual(type(exc.__context__), ValueError) # we are still called from "except ValueError:" self.assertEqual(sys.exc_info()[0], ValueError) yield self.assertIsNone(sys.exc_info()[0]) yield "done" g = gen() next(g) try: raise ValueError except Exception as exc: g.throw(exc) self.assertEqual(next(g), "done") self.assertEqual(sys.exc_info(), (None, None, None)) tutorial_tests = """ Let's try a simple generator: >>> def f(): ... yield 1 ... yield 2 >>> for i in f(): ... print(i) 1 2 >>> g = f() >>> next(g) 1 >>> next(g) 2 "Falling off the end" stops the generator: >>> next(g) Traceback (most recent call last): File "<stdin>", line 1, in ? File "<stdin>", line 2, in g StopIteration "return" also stops the generator: >>> def f(): ... yield 1 ... return ... yield 2 # never reached ... >>> g = f() >>> next(g) 1 >>> next(g) Traceback (most recent call last): File "<stdin>", line 1, in ? File "<stdin>", line 3, in f StopIteration >>> next(g) # once stopped, can't be resumed Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration "raise StopIteration" stops the generator too: >>> def f(): ... yield 1 ... raise StopIteration ... yield 2 # never reached ... >>> g = f() >>> next(g) 1 >>> next(g) Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration >>> next(g) Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration However, they are not exactly equivalent: >>> def g1(): ... try: ... return ... except: ... yield 1 ... >>> list(g1()) [] >>> def g2(): ... try: ... raise StopIteration ... except: ... yield 42 >>> print(list(g2())) [42] This may be surprising at first: >>> def g3(): ... try: ... return ... finally: ... yield 1 ... >>> list(g3()) [1] Let's create an alternate range() function implemented as a generator: >>> def yrange(n): ... for i in range(n): ... yield i ... >>> list(yrange(5)) [0, 1, 2, 3, 4] Generators always return to the most recent caller: >>> def creator(): ... r = yrange(5) ... print("creator", next(r)) ... return r ... >>> def caller(): ... r = creator() ... for i in r: ... print("caller", i) ... >>> caller() creator 0 caller 1 caller 2 caller 3 caller 4 Generators can call other generators: >>> def zrange(n): ... for i in yrange(n): ... yield i ... >>> list(zrange(5)) [0, 1, 2, 3, 4] """ # The examples from PEP 255. pep_tests = """ Specification: Yield Restriction: A generator cannot be resumed while it is actively running: >>> def g(): ... i = next(me) ... yield i >>> me = g() >>> next(me) Traceback (most recent call last): ... File "<string>", line 2, in g ValueError: generator already executing Specification: Return Note that return isn't always equivalent to raising StopIteration: the difference lies in how enclosing try/except constructs are treated. For example, >>> def f1(): ... try: ... return ... except: ... yield 1 >>> print(list(f1())) [] because, as in any function, return simply exits, but >>> def f2(): ... try: ... raise StopIteration ... except: ... yield 42 >>> print(list(f2())) [42] because StopIteration is captured by a bare "except", as is any exception. Specification: Generators and Exception Propagation >>> def f(): ... return 1//0 >>> def g(): ... yield f() # the zero division exception propagates ... yield 42 # and we'll never get here >>> k = g() >>> next(k) Traceback (most recent call last): File "<stdin>", line 1, in ? File "<stdin>", line 2, in g File "<stdin>", line 2, in f ZeroDivisionError: integer division or modulo by zero >>> next(k) # and the generator cannot be resumed Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration >>> Specification: Try/Except/Finally >>> def f(): ... try: ... yield 1 ... try: ... yield 2 ... 1//0 ... yield 3 # never get here ... except ZeroDivisionError: ... yield 4 ... yield 5 ... raise ... except: ... yield 6 ... yield 7 # the "raise" above stops this ... except: ... yield 8 ... yield 9 ... try: ... x = 12 ... finally: ... yield 10 ... yield 11 >>> print(list(f())) [1, 2, 4, 5, 8, 9, 10, 11] >>> Guido's binary tree example. >>> # A binary tree class. >>> class Tree: ... ... def __init__(self, label, left=None, right=None): ... self.label = label ... self.left = left ... self.right = right ... ... def __repr__(self, level=0, indent=" "): ... s = level*indent + repr(self.label) ... if self.left: ... s = s + "\\n" + self.left.__repr__(level+1, indent) ... if self.right: ... s = s + "\\n" + self.right.__repr__(level+1, indent) ... return s ... ... def __iter__(self): ... return inorder(self) >>> # Create a Tree from a list. >>> def tree(list): ... n = len(list) ... if n == 0: ... return [] ... i = n // 2 ... return Tree(list[i], tree(list[:i]), tree(list[i+1:])) >>> # Show it off: create a tree. >>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ") >>> # A recursive generator that generates Tree labels in in-order. >>> def inorder(t): ... if t: ... for x in inorder(t.left): ... yield x ... yield t.label ... for x in inorder(t.right): ... yield x >>> # Show it off: create a tree. >>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ") >>> # Print the nodes of the tree in in-order. >>> for x in t: ... print(' '+x, end='') A B C D E F G H I J K L M N O P Q R S T U V W X Y Z >>> # A non-recursive generator. >>> def inorder(node): ... stack = [] ... while node: ... while node.left: ... stack.append(node) ... node = node.left ... yield node.label ... while not node.right: ... try: ... node = stack.pop() ... except IndexError: ... return ... yield node.label ... node = node.right >>> # Exercise the non-recursive generator. >>> for x in t: ... print(' '+x, end='') A B C D E F G H I J K L M N O P Q R S T U V W X Y Z """ # Examples from Iterator-List and Python-Dev and c.l.py. email_tests = """ The difference between yielding None and returning it. >>> def g(): ... for i in range(3): ... yield None ... yield None ... return >>> list(g()) [None, None, None, None] Ensure that explicitly raising StopIteration acts like any other exception in try/except, not like a return. >>> def g(): ... yield 1 ... try: ... raise StopIteration ... except: ... yield 2 ... yield 3 >>> list(g()) [1, 2, 3] Next one was posted to c.l.py. >>> def gcomb(x, k): ... "Generate all combinations of k elements from list x." ... ... if k > len(x): ... return ... if k == 0: ... yield [] ... else: ... first, rest = x[0], x[1:] ... # A combination does or doesn't contain first. ... # If it does, the remainder is a k-1 comb of rest. ... for c in gcomb(rest, k-1): ... c.insert(0, first) ... yield c ... # If it doesn't contain first, it's a k comb of rest. ... for c in gcomb(rest, k): ... yield c >>> seq = list(range(1, 5)) >>> for k in range(len(seq) + 2): ... print("%d-combs of %s:" % (k, seq)) ... for c in gcomb(seq, k): ... print(" ", c) 0-combs of [1, 2, 3, 4]: [] 1-combs of [1, 2, 3, 4]: [1] [2] [3] [4] 2-combs of [1, 2, 3, 4]: [1, 2] [1, 3] [1, 4] [2, 3] [2, 4] [3, 4] 3-combs of [1, 2, 3, 4]: [1, 2, 3] [1, 2, 4] [1, 3, 4] [2, 3, 4] 4-combs of [1, 2, 3, 4]: [1, 2, 3, 4] 5-combs of [1, 2, 3, 4]: From the Iterators list, about the types of these things. >>> def g(): ... yield 1 ... >>> type(g) <class 'function'> >>> i = g() >>> type(i) <class 'generator'> >>> [s for s in dir(i) if not s.startswith('_')] ['close', 'gi_code', 'gi_frame', 'gi_running', 'send', 'throw'] >>> from test.support import HAVE_DOCSTRINGS >>> print(i.__next__.__doc__ if HAVE_DOCSTRINGS else 'Implement next(self).') Implement next(self). >>> iter(i) is i True >>> import types >>> isinstance(i, types.GeneratorType) True And more, added later. >>> i.gi_running 0 >>> type(i.gi_frame) <class 'frame'> >>> i.gi_running = 42 Traceback (most recent call last): ... AttributeError: readonly attribute >>> def g(): ... yield me.gi_running >>> me = g() >>> me.gi_running 0 >>> next(me) 1 >>> me.gi_running 0 A clever union-find implementation from c.l.py, due to David Eppstein. Sent: Friday, June 29, 2001 12:16 PM To: python-list@python.org Subject: Re: PEP 255: Simple Generators >>> class disjointSet: ... def __init__(self, name): ... self.name = name ... self.parent = None ... self.generator = self.generate() ... ... def generate(self): ... while not self.parent: ... yield self ... for x in self.parent.generator: ... yield x ... ... def find(self): ... return next(self.generator) ... ... def union(self, parent): ... if self.parent: ... raise ValueError("Sorry, I'm not a root!") ... self.parent = parent ... ... def __str__(self): ... return self.name >>> names = "ABCDEFGHIJKLM" >>> sets = [disjointSet(name) for name in names] >>> roots = sets[:] >>> import random >>> gen = random.Random(42) >>> while 1: ... for s in sets: ... print(" %s->%s" % (s, s.find()), end='') ... print() ... if len(roots) > 1: ... s1 = gen.choice(roots) ... roots.remove(s1) ... s2 = gen.choice(roots) ... s1.union(s2) ... print("merged", s1, "into", s2) ... else: ... break A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M merged K into B A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M merged A into F A->F B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M merged E into F A->F B->B C->C D->D E->F F->F G->G H->H I->I J->J K->B L->L M->M merged D into C A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->M merged M into C A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->C merged J into B A->F B->B C->C D->C E->F F->F G->G H->H I->I J->B K->B L->L M->C merged B into C A->F B->C C->C D->C E->F F->F G->G H->H I->I J->C K->C L->L M->C merged F into G A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->L M->C merged L into C A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->C M->C merged G into I A->I B->C C->C D->C E->I F->I G->I H->H I->I J->C K->C L->C M->C merged I into H A->H B->C C->C D->C E->H F->H G->H H->H I->H J->C K->C L->C M->C merged C into H A->H B->H C->H D->H E->H F->H G->H H->H I->H J->H K->H L->H M->H """ # Emacs turd ' # Fun tests (for sufficiently warped notions of "fun"). fun_tests = """ Build up to a recursive Sieve of Eratosthenes generator. >>> def firstn(g, n): ... return [next(g) for i in range(n)] >>> def intsfrom(i): ... while 1: ... yield i ... i += 1 >>> firstn(intsfrom(5), 7) [5, 6, 7, 8, 9, 10, 11] >>> def exclude_multiples(n, ints): ... for i in ints: ... if i % n: ... yield i >>> firstn(exclude_multiples(3, intsfrom(1)), 6) [1, 2, 4, 5, 7, 8] >>> def sieve(ints): ... prime = next(ints) ... yield prime ... not_divisible_by_prime = exclude_multiples(prime, ints) ... for p in sieve(not_divisible_by_prime): ... yield p >>> primes = sieve(intsfrom(2)) >>> firstn(primes, 20) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71] Another famous problem: generate all integers of the form 2**i * 3**j * 5**k in increasing order, where i,j,k >= 0. Trickier than it may look at first! Try writing it without generators, and correctly, and without generating 3 internal results for each result output. >>> def times(n, g): ... for i in g: ... yield n * i >>> firstn(times(10, intsfrom(1)), 10) [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] >>> def merge(g, h): ... ng = next(g) ... nh = next(h) ... while 1: ... if ng < nh: ... yield ng ... ng = next(g) ... elif ng > nh: ... yield nh ... nh = next(h) ... else: ... yield ng ... ng = next(g) ... nh = next(h) The following works, but is doing a whale of a lot of redundant work -- it's not clear how to get the internal uses of m235 to share a single generator. Note that me_times2 (etc) each need to see every element in the result sequence. So this is an example where lazy lists are more natural (you can look at the head of a lazy list any number of times). >>> def m235(): ... yield 1 ... me_times2 = times(2, m235()) ... me_times3 = times(3, m235()) ... me_times5 = times(5, m235()) ... for i in merge(merge(me_times2, ... me_times3), ... me_times5): ... yield i Don't print "too many" of these -- the implementation above is extremely inefficient: each call of m235() leads to 3 recursive calls, and in turn each of those 3 more, and so on, and so on, until we've descended enough levels to satisfy the print stmts. Very odd: when I printed 5 lines of results below, this managed to screw up Win98's malloc in "the usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting address space, and it *looked* like a very slow leak. >>> result = m235() >>> for i in range(3): ... print(firstn(result, 15)) [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24] [25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80] [81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192] Heh. Here's one way to get a shared list, complete with an excruciating namespace renaming trick. The *pretty* part is that the times() and merge() functions can be reused as-is, because they only assume their stream arguments are iterable -- a LazyList is the same as a generator to times(). >>> class LazyList: ... def __init__(self, g): ... self.sofar = [] ... self.fetch = g.__next__ ... ... def __getitem__(self, i): ... sofar, fetch = self.sofar, self.fetch ... while i >= len(sofar): ... sofar.append(fetch()) ... return sofar[i] >>> def m235(): ... yield 1 ... # Gack: m235 below actually refers to a LazyList. ... me_times2 = times(2, m235) ... me_times3 = times(3, m235) ... me_times5 = times(5, m235) ... for i in merge(merge(me_times2, ... me_times3), ... me_times5): ... yield i Print as many of these as you like -- *this* implementation is memory- efficient. >>> m235 = LazyList(m235()) >>> for i in range(5): ... print([m235[j] for j in range(15*i, 15*(i+1))]) [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24] [25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80] [81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192] [200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384] [400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675] Ye olde Fibonacci generator, LazyList style. >>> def fibgen(a, b): ... ... def sum(g, h): ... while 1: ... yield next(g) + next(h) ... ... def tail(g): ... next(g) # throw first away ... for x in g: ... yield x ... ... yield a ... yield b ... for s in sum(iter(fib), ... tail(iter(fib))): ... yield s >>> fib = LazyList(fibgen(1, 2)) >>> firstn(iter(fib), 17) [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584] Running after your tail with itertools.tee (new in version 2.4) The algorithms "m235" (Hamming) and Fibonacci presented above are both examples of a whole family of FP (functional programming) algorithms where a function produces and returns a list while the production algorithm suppose the list as already produced by recursively calling itself. For these algorithms to work, they must: - produce at least a first element without presupposing the existence of the rest of the list - produce their elements in a lazy manner To work efficiently, the beginning of the list must not be recomputed over and over again. This is ensured in most FP languages as a built-in feature. In python, we have to explicitly maintain a list of already computed results and abandon genuine recursivity. This is what had been attempted above with the LazyList class. One problem with that class is that it keeps a list of all of the generated results and therefore continually grows. This partially defeats the goal of the generator concept, viz. produce the results only as needed instead of producing them all and thereby wasting memory. Thanks to itertools.tee, it is now clear "how to get the internal uses of m235 to share a single generator". >>> from itertools import tee >>> def m235(): ... def _m235(): ... yield 1 ... for n in merge(times(2, m2), ... merge(times(3, m3), ... times(5, m5))): ... yield n ... m1 = _m235() ... m2, m3, m5, mRes = tee(m1, 4) ... return mRes >>> it = m235() >>> for i in range(5): ... print(firstn(it, 15)) [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24] [25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80] [81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192] [200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384] [400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675] The "tee" function does just what we want. It internally keeps a generated result for as long as it has not been "consumed" from all of the duplicated iterators, whereupon it is deleted. You can therefore print the hamming sequence during hours without increasing memory usage, or very little. The beauty of it is that recursive running-after-their-tail FP algorithms are quite straightforwardly expressed with this Python idiom. Ye olde Fibonacci generator, tee style. >>> def fib(): ... ... def _isum(g, h): ... while 1: ... yield next(g) + next(h) ... ... def _fib(): ... yield 1 ... yield 2 ... next(fibTail) # throw first away ... for res in _isum(fibHead, fibTail): ... yield res ... ... realfib = _fib() ... fibHead, fibTail, fibRes = tee(realfib, 3) ... return fibRes >>> firstn(fib(), 17) [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584] """ # syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0 # hackery. syntax_tests = """ These are fine: >>> def f(): ... yield 1 ... return >>> def f(): ... try: ... yield 1 ... finally: ... pass >>> def f(): ... try: ... try: ... 1//0 ... except ZeroDivisionError: ... yield 666 ... except: ... pass ... finally: ... pass >>> def f(): ... try: ... try: ... yield 12 ... 1//0 ... except ZeroDivisionError: ... yield 666 ... except: ... try: ... x = 12 ... finally: ... yield 12 ... except: ... return >>> list(f()) [12, 666] >>> def f(): ... yield >>> type(f()) <class 'generator'> >>> def f(): ... if 0: ... yield >>> type(f()) <class 'generator'> >>> def f(): ... if 0: ... yield 1 >>> type(f()) <class 'generator'> >>> def f(): ... if "": ... yield None >>> type(f()) <class 'generator'> >>> def f(): ... return ... try: ... if x==4: ... pass ... elif 0: ... try: ... 1//0 ... except SyntaxError: ... pass ... else: ... if 0: ... while 12: ... x += 1 ... yield 2 # don't blink ... f(a, b, c, d, e) ... else: ... pass ... except: ... x = 1 ... return >>> type(f()) <class 'generator'> >>> def f(): ... if 0: ... def g(): ... yield 1 ... >>> type(f()) <class 'NoneType'> >>> def f(): ... if 0: ... class C: ... def __init__(self): ... yield 1 ... def f(self): ... yield 2 >>> type(f()) <class 'NoneType'> >>> def f(): ... if 0: ... return ... if 0: ... yield 2 >>> type(f()) <class 'generator'> This one caused a crash (see SF bug 567538): >>> def f(): ... for i in range(3): ... try: ... continue ... finally: ... yield i ... >>> g = f() >>> print(next(g)) 0 >>> print(next(g)) 1 >>> print(next(g)) 2 >>> print(next(g)) Traceback (most recent call last): StopIteration Test the gi_code attribute >>> def f(): ... yield 5 ... >>> g = f() >>> g.gi_code is f.__code__ True >>> next(g) 5 >>> next(g) Traceback (most recent call last): StopIteration >>> g.gi_code is f.__code__ True Test the __name__ attribute and the repr() >>> def f(): ... yield 5 ... >>> g = f() >>> g.__name__ 'f' >>> repr(g) # doctest: +ELLIPSIS '<generator object f at ...>' Lambdas shouldn't have their usual return behavior. >>> x = lambda: (yield 1) >>> list(x()) [1] >>> x = lambda: ((yield 1), (yield 2)) >>> list(x()) [1, 2] """ # conjoin is a simple backtracking generator, named in honor of Icon's # "conjunction" control structure. Pass a list of no-argument functions # that return iterable objects. Easiest to explain by example: assume the # function list [x, y, z] is passed. Then conjoin acts like: # # def g(): # values = [None] * 3 # for values[0] in x(): # for values[1] in y(): # for values[2] in z(): # yield values # # So some 3-lists of values *may* be generated, each time we successfully # get into the innermost loop. If an iterator fails (is exhausted) before # then, it "backtracks" to get the next value from the nearest enclosing # iterator (the one "to the left"), and starts all over again at the next # slot (pumps a fresh iterator). Of course this is most useful when the # iterators have side-effects, so that which values *can* be generated at # each slot depend on the values iterated at previous slots. def simple_conjoin(gs): values = [None] * len(gs) def gen(i): if i >= len(gs): yield values else: for values[i] in gs[i](): for x in gen(i+1): yield x for x in gen(0): yield x # That works fine, but recursing a level and checking i against len(gs) for # each item produced is inefficient. By doing manual loop unrolling across # generator boundaries, it's possible to eliminate most of that overhead. # This isn't worth the bother *in general* for generators, but conjoin() is # a core building block for some CPU-intensive generator applications. def conjoin(gs): n = len(gs) values = [None] * n # Do one loop nest at time recursively, until the # of loop nests # remaining is divisible by 3. def gen(i): if i >= n: yield values elif (n-i) % 3: ip1 = i+1 for values[i] in gs[i](): for x in gen(ip1): yield x else: for x in _gen3(i): yield x # Do three loop nests at a time, recursing only if at least three more # remain. Don't call directly: this is an internal optimization for # gen's use. def _gen3(i): assert i < n and (n-i) % 3 == 0 ip1, ip2, ip3 = i+1, i+2, i+3 g, g1, g2 = gs[i : ip3] if ip3 >= n: # These are the last three, so we can yield values directly. for values[i] in g(): for values[ip1] in g1(): for values[ip2] in g2(): yield values else: # At least 6 loop nests remain; peel off 3 and recurse for the # rest. for values[i] in g(): for values[ip1] in g1(): for values[ip2] in g2(): for x in _gen3(ip3): yield x for x in gen(0): yield x # And one more approach: For backtracking apps like the Knight's Tour # solver below, the number of backtracking levels can be enormous (one # level per square, for the Knight's Tour, so that e.g. a 100x100 board # needs 10,000 levels). In such cases Python is likely to run out of # stack space due to recursion. So here's a recursion-free version of # conjoin too. # NOTE WELL: This allows large problems to be solved with only trivial # demands on stack space. Without explicitly resumable generators, this is # much harder to achieve. OTOH, this is much slower (up to a factor of 2) # than the fancy unrolled recursive conjoin. def flat_conjoin(gs): # rename to conjoin to run tests with this instead n = len(gs) values = [None] * n iters = [None] * n _StopIteration = StopIteration # make local because caught a *lot* i = 0 while 1: # Descend. try: while i < n: it = iters[i] = gs[i]().__next__ values[i] = it() i += 1 except _StopIteration: pass else: assert i == n yield values # Backtrack until an older iterator can be resumed. i -= 1 while i >= 0: try: values[i] = iters[i]() # Success! Start fresh at next level. i += 1 break except _StopIteration: # Continue backtracking. i -= 1 else: assert i < 0 break # A conjoin-based N-Queens solver. class Queens: def __init__(self, n): self.n = n rangen = range(n) # Assign a unique int to each column and diagonal. # columns: n of those, range(n). # NW-SE diagonals: 2n-1 of these, i-j unique and invariant along # each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0- # based. # NE-SW diagonals: 2n-1 of these, i+j unique and invariant along # each, smallest i+j is 0, largest is 2n-2. # For each square, compute a bit vector of the columns and # diagonals it covers, and for each row compute a function that # generates the possiblities for the columns in that row. self.rowgenerators = [] for i in rangen: rowuses = [(1 << j) | # column ordinal (1 << (n + i-j + n-1)) | # NW-SE ordinal (1 << (n + 2*n-1 + i+j)) # NE-SW ordinal for j in rangen] def rowgen(rowuses=rowuses): for j in rangen: uses = rowuses[j] if uses & self.used == 0: self.used |= uses yield j self.used &= ~uses self.rowgenerators.append(rowgen) # Generate solutions. def solve(self): self.used = 0 for row2col in conjoin(self.rowgenerators): yield row2col def printsolution(self, row2col): n = self.n assert n == len(row2col) sep = "+" + "-+" * n print(sep) for i in range(n): squares = [" " for j in range(n)] squares[row2col[i]] = "Q" print("|" + "|".join(squares) + "|") print(sep) # A conjoin-based Knight's Tour solver. This is pretty sophisticated # (e.g., when used with flat_conjoin above, and passing hard=1 to the # constructor, a 200x200 Knight's Tour was found quickly -- note that we're # creating 10s of thousands of generators then!), and is lengthy. class Knights: def __init__(self, m, n, hard=0): self.m, self.n = m, n # solve() will set up succs[i] to be a list of square #i's # successors. succs = self.succs = [] # Remove i0 from each of its successor's successor lists, i.e. # successors can't go back to i0 again. Return 0 if we can # detect this makes a solution impossible, else return 1. def remove_from_successors(i0, len=len): # If we remove all exits from a free square, we're dead: # even if we move to it next, we can't leave it again. # If we create a square with one exit, we must visit it next; # else somebody else will have to visit it, and since there's # only one adjacent, there won't be a way to leave it again. # Finelly, if we create more than one free square with a # single exit, we can only move to one of them next, leaving # the other one a dead end. ne0 = ne1 = 0 for i in succs[i0]: s = succs[i] s.remove(i0) e = len(s) if e == 0: ne0 += 1 elif e == 1: ne1 += 1 return ne0 == 0 and ne1 < 2 # Put i0 back in each of its successor's successor lists. def add_to_successors(i0): for i in succs[i0]: succs[i].append(i0) # Generate the first move. def first(): if m < 1 or n < 1: return # Since we're looking for a cycle, it doesn't matter where we # start. Starting in a corner makes the 2nd move easy. corner = self.coords2index(0, 0) remove_from_successors(corner) self.lastij = corner yield corner add_to_successors(corner) # Generate the second moves. def second(): corner = self.coords2index(0, 0) assert self.lastij == corner # i.e., we started in the corner if m < 3 or n < 3: return assert len(succs[corner]) == 2 assert self.coords2index(1, 2) in succs[corner] assert self.coords2index(2, 1) in succs[corner] # Only two choices. Whichever we pick, the other must be the # square picked on move m*n, as it's the only way to get back # to (0, 0). Save its index in self.final so that moves before # the last know it must be kept free. for i, j in (1, 2), (2, 1): this = self.coords2index(i, j) final = self.coords2index(3-i, 3-j) self.final = final remove_from_successors(this) succs[final].append(corner) self.lastij = this yield this succs[final].remove(corner) add_to_successors(this) # Generate moves 3 thru m*n-1. def advance(len=len): # If some successor has only one exit, must take it. # Else favor successors with fewer exits. candidates = [] for i in succs[self.lastij]: e = len(succs[i]) assert e > 0, "else remove_from_successors() pruning flawed" if e == 1: candidates = [(e, i)] break candidates.append((e, i)) else: candidates.sort() for e, i in candidates: if i != self.final: if remove_from_successors(i): self.lastij = i yield i add_to_successors(i) # Generate moves 3 thru m*n-1. Alternative version using a # stronger (but more expensive) heuristic to order successors. # Since the # of backtracking levels is m*n, a poor move early on # can take eons to undo. Smallest square board for which this # matters a lot is 52x52. def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len): # If some successor has only one exit, must take it. # Else favor successors with fewer exits. # Break ties via max distance from board centerpoint (favor # corners and edges whenever possible). candidates = [] for i in succs[self.lastij]: e = len(succs[i]) assert e > 0, "else remove_from_successors() pruning flawed" if e == 1: candidates = [(e, 0, i)] break i1, j1 = self.index2coords(i) d = (i1 - vmid)**2 + (j1 - hmid)**2 candidates.append((e, -d, i)) else: candidates.sort() for e, d, i in candidates: if i != self.final: if remove_from_successors(i): self.lastij = i yield i add_to_successors(i) # Generate the last move. def last(): assert self.final in succs[self.lastij] yield self.final if m*n < 4: self.squaregenerators = [first] else: self.squaregenerators = [first, second] + \ [hard and advance_hard or advance] * (m*n - 3) + \ [last] def coords2index(self, i, j): assert 0 <= i < self.m assert 0 <= j < self.n return i * self.n + j def index2coords(self, index): assert 0 <= index < self.m * self.n return divmod(index, self.n) def _init_board(self): succs = self.succs del succs[:] m, n = self.m, self.n c2i = self.coords2index offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2), (-1, -2), (-2, -1), (-2, 1), (-1, 2)] rangen = range(n) for i in range(m): for j in rangen: s = [c2i(i+io, j+jo) for io, jo in offsets if 0 <= i+io < m and 0 <= j+jo < n] succs.append(s) # Generate solutions. def solve(self): self._init_board() for x in conjoin(self.squaregenerators): yield x def printsolution(self, x): m, n = self.m, self.n assert len(x) == m*n w = len(str(m*n)) format = "%" + str(w) + "d" squares = [[None] * n for i in range(m)] k = 1 for i in x: i1, j1 = self.index2coords(i) squares[i1][j1] = format % k k += 1 sep = "+" + ("-" * w + "+") * n print(sep) for i in range(m): row = squares[i] print("|" + "|".join(row) + "|") print(sep) conjoin_tests = """ Generate the 3-bit binary numbers in order. This illustrates dumbest- possible use of conjoin, just to generate the full cross-product. >>> for c in conjoin([lambda: iter((0, 1))] * 3): ... print(c) [0, 0, 0] [0, 0, 1] [0, 1, 0] [0, 1, 1] [1, 0, 0] [1, 0, 1] [1, 1, 0] [1, 1, 1] For efficiency in typical backtracking apps, conjoin() yields the same list object each time. So if you want to save away a full account of its generated sequence, you need to copy its results. >>> def gencopy(iterator): ... for x in iterator: ... yield x[:] >>> for n in range(10): ... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n))) ... print(n, len(all), all[0] == [0] * n, all[-1] == [1] * n) 0 1 True True 1 2 True True 2 4 True True 3 8 True True 4 16 True True 5 32 True True 6 64 True True 7 128 True True 8 256 True True 9 512 True True And run an 8-queens solver. >>> q = Queens(8) >>> LIMIT = 2 >>> count = 0 >>> for row2col in q.solve(): ... count += 1 ... if count <= LIMIT: ... print("Solution", count) ... q.printsolution(row2col) Solution 1 +-+-+-+-+-+-+-+-+ |Q| | | | | | | | +-+-+-+-+-+-+-+-+ | | | | |Q| | | | +-+-+-+-+-+-+-+-+ | | | | | | | |Q| +-+-+-+-+-+-+-+-+ | | | | | |Q| | | +-+-+-+-+-+-+-+-+ | | |Q| | | | | | +-+-+-+-+-+-+-+-+ | | | | | | |Q| | +-+-+-+-+-+-+-+-+ | |Q| | | | | | | +-+-+-+-+-+-+-+-+ | | | |Q| | | | | +-+-+-+-+-+-+-+-+ Solution 2 +-+-+-+-+-+-+-+-+ |Q| | | | | | | | +-+-+-+-+-+-+-+-+ | | | | | |Q| | | +-+-+-+-+-+-+-+-+ | | | | | | | |Q| +-+-+-+-+-+-+-+-+ | | |Q| | | | | | +-+-+-+-+-+-+-+-+ | | | | | | |Q| | +-+-+-+-+-+-+-+-+ | | | |Q| | | | | +-+-+-+-+-+-+-+-+ | |Q| | | | | | | +-+-+-+-+-+-+-+-+ | | | | |Q| | | | +-+-+-+-+-+-+-+-+ >>> print(count, "solutions in all.") 92 solutions in all. And run a Knight's Tour on a 10x10 board. Note that there are about 20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion. >>> k = Knights(10, 10) >>> LIMIT = 2 >>> count = 0 >>> for x in k.solve(): ... count += 1 ... if count <= LIMIT: ... print("Solution", count) ... k.printsolution(x) ... else: ... break Solution 1 +---+---+---+---+---+---+---+---+---+---+ | 1| 58| 27| 34| 3| 40| 29| 10| 5| 8| +---+---+---+---+---+---+---+---+---+---+ | 26| 35| 2| 57| 28| 33| 4| 7| 30| 11| +---+---+---+---+---+---+---+---+---+---+ | 59|100| 73| 36| 41| 56| 39| 32| 9| 6| +---+---+---+---+---+---+---+---+---+---+ | 74| 25| 60| 55| 72| 37| 42| 49| 12| 31| +---+---+---+---+---+---+---+---+---+---+ | 61| 86| 99| 76| 63| 52| 47| 38| 43| 50| +---+---+---+---+---+---+---+---+---+---+ | 24| 75| 62| 85| 54| 71| 64| 51| 48| 13| +---+---+---+---+---+---+---+---+---+---+ | 87| 98| 91| 80| 77| 84| 53| 46| 65| 44| +---+---+---+---+---+---+---+---+---+---+ | 90| 23| 88| 95| 70| 79| 68| 83| 14| 17| +---+---+---+---+---+---+---+---+---+---+ | 97| 92| 21| 78| 81| 94| 19| 16| 45| 66| +---+---+---+---+---+---+---+---+---+---+ | 22| 89| 96| 93| 20| 69| 82| 67| 18| 15| +---+---+---+---+---+---+---+---+---+---+ Solution 2 +---+---+---+---+---+---+---+---+---+---+ | 1| 58| 27| 34| 3| 40| 29| 10| 5| 8| +---+---+---+---+---+---+---+---+---+---+ | 26| 35| 2| 57| 28| 33| 4| 7| 30| 11| +---+---+---+---+---+---+---+---+---+---+ | 59|100| 73| 36| 41| 56| 39| 32| 9| 6| +---+---+---+---+---+---+---+---+---+---+ | 74| 25| 60| 55| 72| 37| 42| 49| 12| 31| +---+---+---+---+---+---+---+---+---+---+ | 61| 86| 99| 76| 63| 52| 47| 38| 43| 50| +---+---+---+---+---+---+---+---+---+---+ | 24| 75| 62| 85| 54| 71| 64| 51| 48| 13| +---+---+---+---+---+---+---+---+---+---+ | 87| 98| 89| 80| 77| 84| 53| 46| 65| 44| +---+---+---+---+---+---+---+---+---+---+ | 90| 23| 92| 95| 70| 79| 68| 83| 14| 17| +---+---+---+---+---+---+---+---+---+---+ | 97| 88| 21| 78| 81| 94| 19| 16| 45| 66| +---+---+---+---+---+---+---+---+---+---+ | 22| 91| 96| 93| 20| 69| 82| 67| 18| 15| +---+---+---+---+---+---+---+---+---+---+ """ weakref_tests = """\ Generators are weakly referencable: >>> import weakref >>> def gen(): ... yield 'foo!' ... >>> wr = weakref.ref(gen) >>> wr() is gen True >>> p = weakref.proxy(gen) Generator-iterators are weakly referencable as well: >>> gi = gen() >>> wr = weakref.ref(gi) >>> wr() is gi True >>> p = weakref.proxy(gi) >>> list(p) ['foo!'] """ coroutine_tests = """\ Sending a value into a started generator: >>> def f(): ... print((yield 1)) ... yield 2 >>> g = f() >>> next(g) 1 >>> g.send(42) 42 2 Sending a value into a new generator produces a TypeError: >>> f().send("foo") Traceback (most recent call last): ... TypeError: can't send non-None value to a just-started generator Yield by itself yields None: >>> def f(): yield >>> list(f()) [None] An obscene abuse of a yield expression within a generator expression: >>> list((yield 21) for i in range(4)) [21, None, 21, None, 21, None, 21, None] And a more sane, but still weird usage: >>> def f(): list(i for i in [(yield 26)]) >>> type(f()) <class 'generator'> A yield expression with augmented assignment. >>> def coroutine(seq): ... count = 0 ... while count < 200: ... count += yield ... seq.append(count) >>> seq = [] >>> c = coroutine(seq) >>> next(c) >>> print(seq) [] >>> c.send(10) >>> print(seq) [10] >>> c.send(10) >>> print(seq) [10, 20] >>> c.send(10) >>> print(seq) [10, 20, 30] Check some syntax errors for yield expressions: >>> f=lambda: (yield 1),(yield 2) Traceback (most recent call last): ... SyntaxError: 'yield' outside function >>> def f(): x = yield = y Traceback (most recent call last): ... SyntaxError: assignment to yield expression not possible >>> def f(): (yield bar) = y Traceback (most recent call last): ... SyntaxError: can't assign to yield expression >>> def f(): (yield bar) += y Traceback (most recent call last): ... SyntaxError: can't assign to yield expression Now check some throw() conditions: >>> def f(): ... while True: ... try: ... print((yield)) ... except ValueError as v: ... print("caught ValueError (%s)" % (v)) >>> import sys >>> g = f() >>> next(g) >>> g.throw(ValueError) # type only caught ValueError () >>> g.throw(ValueError("xyz")) # value only caught ValueError (xyz) >>> g.throw(ValueError, ValueError(1)) # value+matching type caught ValueError (1) >>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped caught ValueError (1) >>> g.throw(ValueError, ValueError(1), None) # explicit None traceback caught ValueError (1) >>> g.throw(ValueError(1), "foo") # bad args Traceback (most recent call last): ... TypeError: instance exception may not have a separate value >>> g.throw(ValueError, "foo", 23) # bad args Traceback (most recent call last): ... TypeError: throw() third argument must be a traceback object >>> g.throw("abc") Traceback (most recent call last): ... TypeError: exceptions must be classes or instances deriving from BaseException, not str >>> g.throw(0) Traceback (most recent call last): ... TypeError: exceptions must be classes or instances deriving from BaseException, not int >>> g.throw(list) Traceback (most recent call last): ... TypeError: exceptions must be classes or instances deriving from BaseException, not type >>> def throw(g,exc): ... try: ... raise exc ... except: ... g.throw(*sys.exc_info()) >>> throw(g,ValueError) # do it with traceback included caught ValueError () >>> g.send(1) 1 >>> throw(g,TypeError) # terminate the generator Traceback (most recent call last): ... TypeError >>> print(g.gi_frame) None >>> g.send(2) Traceback (most recent call last): ... StopIteration >>> g.throw(ValueError,6) # throw on closed generator Traceback (most recent call last): ... ValueError: 6 >>> f().throw(ValueError,7) # throw on just-opened generator Traceback (most recent call last): ... ValueError: 7 Plain "raise" inside a generator should preserve the traceback (#13188). The traceback should have 3 levels: - g.throw() - f() - 1/0 >>> def f(): ... try: ... yield ... except: ... raise >>> g = f() >>> try: ... 1/0 ... except ZeroDivisionError as v: ... try: ... g.throw(v) ... except Exception as w: ... tb = w.__traceback__ >>> levels = 0 >>> while tb: ... levels += 1 ... tb = tb.tb_next >>> levels 3 Now let's try closing a generator: >>> def f(): ... try: yield ... except GeneratorExit: ... print("exiting") >>> g = f() >>> next(g) >>> g.close() exiting >>> g.close() # should be no-op now >>> f().close() # close on just-opened generator should be fine >>> def f(): yield # an even simpler generator >>> f().close() # close before opening >>> g = f() >>> next(g) >>> g.close() # close normally And finalization: >>> def f(): ... try: yield ... finally: ... print("exiting") >>> g = f() >>> next(g) >>> del g exiting GeneratorExit is not caught by except Exception: >>> def f(): ... try: yield ... except Exception: ... print('except') ... finally: ... print('finally') >>> g = f() >>> next(g) >>> del g finally Now let's try some ill-behaved generators: >>> def f(): ... try: yield ... except GeneratorExit: ... yield "foo!" >>> g = f() >>> next(g) >>> g.close() Traceback (most recent call last): ... RuntimeError: generator ignored GeneratorExit >>> g.close() Our ill-behaved code should be invoked during GC: >>> import sys, io >>> old, sys.stderr = sys.stderr, io.StringIO() >>> g = f() >>> next(g) >>> del g >>> "RuntimeError: generator ignored GeneratorExit" in sys.stderr.getvalue() True >>> sys.stderr = old And errors thrown during closing should propagate: >>> def f(): ... try: yield ... except GeneratorExit: ... raise TypeError("fie!") >>> g = f() >>> next(g) >>> g.close() Traceback (most recent call last): ... TypeError: fie! Ensure that various yield expression constructs make their enclosing function a generator: >>> def f(): x += yield >>> type(f()) <class 'generator'> >>> def f(): x = yield >>> type(f()) <class 'generator'> >>> def f(): lambda x=(yield): 1 >>> type(f()) <class 'generator'> >>> def f(): x=(i for i in (yield) if (yield)) >>> type(f()) <class 'generator'> >>> def f(d): d[(yield "a")] = d[(yield "b")] = 27 >>> data = [1,2] >>> g = f(data) >>> type(g) <class 'generator'> >>> g.send(None) 'a' >>> data [1, 2] >>> g.send(0) 'b' >>> data [27, 2] >>> try: g.send(1) ... except StopIteration: pass >>> data [27, 27] """ refleaks_tests = """ Prior to adding cycle-GC support to itertools.tee, this code would leak references. We add it to the standard suite so the routine refleak-tests would trigger if it starts being uncleanable again. >>> import itertools >>> def leak(): ... class gen: ... def __iter__(self): ... return self ... def __next__(self): ... return self.item ... g = gen() ... head, tail = itertools.tee(g) ... g.item = head ... return head >>> it = leak() Make sure to also test the involvement of the tee-internal teedataobject, which stores returned items. >>> item = next(it) This test leaked at one point due to generator finalization/destruction. It was copied from Lib/test/leakers/test_generator_cycle.py before the file was removed. >>> def leak(): ... def gen(): ... while True: ... yield g ... g = gen() >>> leak() This test isn't really generator related, but rather exception-in-cleanup related. The coroutine tests (above) just happen to cause an exception in the generator's __del__ (tp_del) method. We can also test for this explicitly, without generators. We do have to redirect stderr to avoid printing warnings and to doublecheck that we actually tested what we wanted to test. >>> import sys, io >>> old = sys.stderr >>> try: ... sys.stderr = io.StringIO() ... class Leaker: ... def __del__(self): ... def invoke(message): ... raise RuntimeError(message) ... invoke("test") ... ... l = Leaker() ... del l ... err = sys.stderr.getvalue().strip() ... "Exception ignored in" in err ... "RuntimeError: test" in err ... "Traceback" in err ... "in invoke" in err ... finally: ... sys.stderr = old True True True True These refleak tests should perhaps be in a testfile of their own, test_generators just happened to be the test that drew these out. """ __test__ = {"tut": tutorial_tests, "pep": pep_tests, "email": email_tests, "fun": fun_tests, "syntax": syntax_tests, "conjoin": conjoin_tests, "weakref": weakref_tests, "coroutine": coroutine_tests, "refleaks": refleaks_tests, } # Magic test name that regrtest.py invokes *after* importing this module. # This worms around a bootstrap problem. # Note that doctest and regrtest both look in sys.argv for a "-v" argument, # so this works as expected in both ways of running regrtest. def test_main(verbose=None): from test import support, test_generators support.run_unittest(__name__) support.run_doctest(test_generators, verbose) # This part isn't needed for regrtest, but for running the test directly. if __name__ == "__main__": test_main(1)