summaryrefslogtreecommitdiffstats
path: root/Doc
diff options
context:
space:
mode:
Diffstat (limited to 'Doc')
-rw-r--r--Doc/Makefile2
-rw-r--r--Doc/api/concrete.tex22
-rw-r--r--Doc/api/exceptions.tex2
-rw-r--r--Doc/dist/dist.tex5
-rw-r--r--Doc/ext/extending.tex14
-rw-r--r--Doc/howto/functional.rst1472
-rw-r--r--Doc/howto/regex.tex2
-rw-r--r--Doc/inst/inst.tex2
-rw-r--r--Doc/lib/libasyncore.tex8
-rw-r--r--Doc/lib/libatexit.tex16
-rw-r--r--Doc/lib/libbase64.tex2
-rw-r--r--Doc/lib/libbsddb.tex16
-rw-r--r--Doc/lib/libcfgparser.tex15
-rw-r--r--Doc/lib/libcommands.tex9
-rw-r--r--Doc/lib/libcsv.tex21
-rwxr-xr-xDoc/lib/libctypes.tex17
-rw-r--r--Doc/lib/libdatetime.tex34
-rw-r--r--Doc/lib/libdecimal.tex43
-rw-r--r--Doc/lib/libetree.tex39
-rw-r--r--Doc/lib/libfpectl.tex7
-rw-r--r--Doc/lib/libfuncs.tex2
-rw-r--r--Doc/lib/libgetopt.tex9
-rw-r--r--Doc/lib/libhashlib.tex4
-rw-r--r--Doc/lib/libheapq.tex8
-rw-r--r--Doc/lib/libhttplib.tex5
-rw-r--r--Doc/lib/libimp.tex25
-rw-r--r--Doc/lib/libitertools.tex4
-rw-r--r--Doc/lib/liblogging.tex13
-rw-r--r--Doc/lib/libmsilib.tex4
-rw-r--r--Doc/lib/libos.tex25
-rw-r--r--Doc/lib/libpickle.tex2
-rw-r--r--Doc/lib/libpopen2.tex12
-rw-r--r--Doc/lib/libpyexpat.tex7
-rw-r--r--Doc/lib/libsmtplib.tex12
-rw-r--r--Doc/lib/libsocket.tex15
-rw-r--r--Doc/lib/libsqlite3.tex119
-rw-r--r--Doc/lib/libstdtypes.tex23
-rw-r--r--Doc/lib/libstring.tex2
-rw-r--r--Doc/lib/libsubprocess.tex3
-rw-r--r--Doc/lib/libtempfile.tex7
-rw-r--r--Doc/lib/libunittest.tex102
-rw-r--r--Doc/lib/liburlparse.tex16
-rw-r--r--Doc/lib/libuuid.tex73
-rwxr-xr-xDoc/lib/libwsgiref.tex3
-rw-r--r--Doc/lib/libxmlrpclib.tex9
-rw-r--r--Doc/lib/sqlite3/executescript.py2
-rw-r--r--Doc/lib/tkinter.tex2
-rw-r--r--Doc/perl/python.perl6
-rw-r--r--Doc/ref/ref3.tex37
-rw-r--r--Doc/tools/py2texi.el43
-rw-r--r--Doc/tut/tut.tex10
-rw-r--r--Doc/whatsnew/whatsnew25.tex48
-rw-r--r--Doc/whatsnew/whatsnew26.tex137
53 files changed, 2191 insertions, 346 deletions
diff --git a/Doc/Makefile b/Doc/Makefile
index a435f11..bda244a 100644
--- a/Doc/Makefile
+++ b/Doc/Makefile
@@ -122,7 +122,7 @@ EMACS= emacs
# The end of this should reflect the major/minor version numbers of
# the release:
-WHATSNEW=whatsnew25
+WHATSNEW=whatsnew26
# what's what
MANDVIFILES= paper-$(PAPER)/api.dvi paper-$(PAPER)/ext.dvi \
diff --git a/Doc/api/concrete.tex b/Doc/api/concrete.tex
index 34221ad..33b04d4 100644
--- a/Doc/api/concrete.tex
+++ b/Doc/api/concrete.tex
@@ -602,15 +602,15 @@ parameter and are called with a non-string parameter.
\end{cfuncdesc}
\begin{cfuncdesc}{PyObject*}{PyString_FromString}{const char *v}
- Return a new string object with the value \var{v} on success, and
- \NULL{} on failure. The parameter \var{v} must not be \NULL{}; it
- will not be checked.
+ Return a new string object with a copy of the string \var{v} as value
+ on success, and \NULL{} on failure. The parameter \var{v} must not be
+ \NULL{}; it will not be checked.
\end{cfuncdesc}
\begin{cfuncdesc}{PyObject*}{PyString_FromStringAndSize}{const char *v,
Py_ssize_t len}
- Return a new string object with the value \var{v} and length
- \var{len} on success, and \NULL{} on failure. If \var{v} is
+ Return a new string object with a copy of the string \var{v} as value
+ and length \var{len} on success, and \NULL{} on failure. If \var{v} is
\NULL{}, the contents of the string are uninitialized.
\end{cfuncdesc}
@@ -2879,10 +2879,10 @@ rather than explicitly calling \cfunction{PyGen_New}.
Various date and time objects are supplied by the \module{datetime}
module. Before using any of these functions, the header file
\file{datetime.h} must be included in your source (note that this is
-not include by \file{Python.h}), and macro \cfunction{PyDateTime_IMPORT()}
-must be invoked. The macro arranges to put a pointer to a C structure
-in a static variable \code{PyDateTimeAPI}, which is used by the following
-macros.
+not included by \file{Python.h}), and the macro
+\cfunction{PyDateTime_IMPORT} must be invoked. The macro puts a
+pointer to a C structure into a static variable,
+\code{PyDateTimeAPI}, that is used by the following macros.
Type-check macros:
@@ -3080,9 +3080,9 @@ either the abstract object protocol (including
\cfunction{PyObject_IsTrue()}, \cfunction{PyObject_Print()}, and
\cfunction{PyObject_GetIter()})
or the abstract number protocol (including
-\cfunction{PyNumber_Add()}, \cfunction{PyNumber_Subtract()},
+\cfunction{PyNumber_And()}, \cfunction{PyNumber_Subtract()},
\cfunction{PyNumber_Or()}, \cfunction{PyNumber_Xor()},
-\cfunction{PyNumber_InPlaceAdd()}, \cfunction{PyNumber_InPlaceSubtract()},
+\cfunction{PyNumber_InPlaceAnd()}, \cfunction{PyNumber_InPlaceSubtract()},
\cfunction{PyNumber_InPlaceOr()}, and \cfunction{PyNumber_InPlaceXor()}).
\begin{ctypedesc}{PySetObject}
diff --git a/Doc/api/exceptions.tex b/Doc/api/exceptions.tex
index d7a490e..46ade49 100644
--- a/Doc/api/exceptions.tex
+++ b/Doc/api/exceptions.tex
@@ -325,7 +325,7 @@ error indicator for each thread.
default effect for \constant{SIGINT}\ttindex{SIGINT} is to raise the
\withsubitem{(built-in exception)}{\ttindex{KeyboardInterrupt}}
\exception{KeyboardInterrupt} exception. If an exception is raised
- the error indicator is set and the function returns \code{1};
+ the error indicator is set and the function returns \code{-1};
otherwise the function returns \code{0}. The error indicator may or
may not be cleared if it was previously set.
\end{cfuncdesc}
diff --git a/Doc/dist/dist.tex b/Doc/dist/dist.tex
index e8ae96f..ba90763 100644
--- a/Doc/dist/dist.tex
+++ b/Doc/dist/dist.tex
@@ -3607,6 +3607,11 @@ The class constructor takes a single argument \var{dist}, a
% todo
+\section{\module{distutils.command.bdist_msi} --- Build a Microsoft Installer binary package}
+\declaremodule[distutils.command.bdistmsi]{standard}{distutils.command.bdist_msi}
+\modulesynopsis{Build a binary distribution as a Windows MSI file}
+
+% todo
\section{\module{distutils.command.bdist_rpm} --- Build a binary distribution as a Redhat RPM and SRPM}
\declaremodule[distutils.command.bdistrpm]{standard}{distutils.command.bdist_rpm}
diff --git a/Doc/ext/extending.tex b/Doc/ext/extending.tex
index 0e2fd14..2af88b5 100644
--- a/Doc/ext/extending.tex
+++ b/Doc/ext/extending.tex
@@ -220,6 +220,8 @@ initspam(void)
PyObject *m;
m = Py_InitModule("spam", SpamMethods);
+ if (m == NULL)
+ return;
SpamError = PyErr_NewException("spam.error", NULL, NULL);
Py_INCREF(SpamError);
@@ -364,9 +366,9 @@ is inserted in the dictionary \code{sys.modules} under the key
created module based upon the table (an array of \ctype{PyMethodDef}
structures) that was passed as its second argument.
\cfunction{Py_InitModule()} returns a pointer to the module object
-that it creates (which is unused here). It aborts with a fatal error
-if the module could not be initialized satisfactorily, so the caller
-doesn't need to check for errors.
+that it creates (which is unused here). It may abort with a fatal error
+for certain errors, or return \NULL{} if the module could not be
+initialized satisfactorily.
When embedding Python, the \cfunction{initspam()} function is not
called automatically unless there's an entry in the
@@ -1275,6 +1277,8 @@ initspam(void)
PyObject *c_api_object;
m = Py_InitModule("spam", SpamMethods);
+ if (m == NULL)
+ return;
/* Initialize the C API pointer array */
PySpam_API[PySpam_System_NUM] = (void *)PySpam_System;
@@ -1361,7 +1365,9 @@ initclient(void)
{
PyObject *m;
- Py_InitModule("client", ClientMethods);
+ m = Py_InitModule("client", ClientMethods);
+ if (m == NULL)
+ return;
if (import_spam() < 0)
return;
/* additional initialization can happen here */
diff --git a/Doc/howto/functional.rst b/Doc/howto/functional.rst
new file mode 100644
index 0000000..2e5a6a9
--- /dev/null
+++ b/Doc/howto/functional.rst
@@ -0,0 +1,1472 @@
+Functional Programming HOWTO
+================================
+
+**Version 0.30**
+
+(This is a first draft. Please send comments/error
+reports/suggestions to amk@amk.ca. This URL is probably not going to
+be the final location of the document, so be careful about linking to
+it -- you may want to add a disclaimer.)
+
+In this document, we'll take a tour of Python's features suitable for
+implementing programs in a functional style. After an introduction to
+the concepts of functional programming, we'll look at language
+features such as iterators and generators and relevant library modules
+such as ``itertools`` and ``functools``.
+
+
+.. contents::
+
+Introduction
+----------------------
+
+This section explains the basic concept of functional programming; if
+you're just interested in learning about Python language features,
+skip to the next section.
+
+Programming languages support decomposing problems in several different
+ways:
+
+* Most programming languages are **procedural**:
+ programs are lists of instructions that tell the computer what to
+ do with the program's input.
+ C, Pascal, and even Unix shells are procedural languages.
+
+* In **declarative** languages, you write a specification that describes
+ the problem to be solved, and the language implementation figures out
+ how to perform the computation efficiently. SQL is the declarative
+ language you're most likely to be familiar with; a SQL query describes
+ the data set you want to retrieve, and the SQL engine decides whether to
+ scan tables or use indexes, which subclauses should be performed first,
+ etc.
+
+* **Object-oriented** programs manipulate collections of objects.
+ Objects have internal state and support methods that query or modify
+ this internal state in some way. Smalltalk and Java are
+ object-oriented languages. C++ and Python are languages that
+ support object-oriented programming, but don't force the use
+ of object-oriented features.
+
+* **Functional** programming decomposes a problem into a set of functions.
+ Ideally, functions only take inputs and produce outputs, and don't have any
+ internal state that affects the output produced for a given input.
+ Well-known functional languages include the ML family (Standard ML,
+ OCaml, and other variants) and Haskell.
+
+The designers of some computer languages have chosen one approach to
+programming that's emphasized. This often makes it difficult to
+write programs that use a different approach. Other languages are
+multi-paradigm languages that support several different approaches. Lisp,
+C++, and Python are multi-paradigm; you can write programs or
+libraries that are largely procedural, object-oriented, or functional
+in all of these languages. In a large program, different sections
+might be written using different approaches; the GUI might be object-oriented
+while the processing logic is procedural or functional, for example.
+
+In a functional program, input flows through a set of functions. Each
+function operates on its input and produces some output. Functional
+style frowns upon functions with side effects that modify internal
+state or make other changes that aren't visible in the function's
+return value. Functions that have no side effects at all are
+called **purely functional**.
+Avoiding side effects means not using data structures
+that get updated as a program runs; every function's output
+must only depend on its input.
+
+Some languages are very strict about purity and don't even have
+assignment statements such as ``a=3`` or ``c = a + b``, but it's
+difficult to avoid all side effects. Printing to the screen or
+writing to a disk file are side effects, for example. For example, in
+Python a ``print`` statement or a ``time.sleep(1)`` both return no
+useful value; they're only called for their side effects of sending
+some text to the screen or pausing execution for a second.
+
+Python programs written in functional style usually won't go to the
+extreme of avoiding all I/O or all assignments; instead, they'll
+provide a functional-appearing interface but will use non-functional
+features internally. For example, the implementation of a function
+will still use assignments to local variables, but won't modify global
+variables or have other side effects.
+
+Functional programming can be considered the opposite of
+object-oriented programming. Objects are little capsules containing
+some internal state along with a collection of method calls that let
+you modify this state, and programs consist of making the right set of
+state changes. Functional programming wants to avoid state changes as
+much as possible and works with data flowing between functions. In
+Python you might combine the two approaches by writing functions that
+take and return instances representing objects in your application
+(e-mail messages, transactions, etc.).
+
+Functional design may seem like an odd constraint to work under. Why
+should you avoid objects and side effects? There are theoretical and
+practical advantages to the functional style:
+
+* Formal provability.
+* Modularity.
+* Composability.
+* Ease of debugging and testing.
+
+Formal provability
+''''''''''''''''''''''
+
+A theoretical benefit is that it's easier to construct a mathematical proof
+that a functional program is correct.
+
+For a long time researchers have been interested in finding ways to
+mathematically prove programs correct. This is different from testing
+a program on numerous inputs and concluding that its output is usually
+correct, or reading a program's source code and concluding that the
+code looks right; the goal is instead a rigorous proof that a program
+produces the right result for all possible inputs.
+
+The technique used to prove programs correct is to write down
+**invariants**, properties of the input data and of the program's
+variables that are always true. For each line of code, you then show
+that if invariants X and Y are true **before** the line is executed,
+the slightly different invariants X' and Y' are true **after**
+the line is executed. This continues until you reach the end of the
+program, at which point the invariants should match the desired
+conditions on the program's output.
+
+Functional programming's avoidance of assignments arose because
+assignments are difficult to handle with this technique;
+assignments can break invariants that were true before the assignment
+without producing any new invariants that can be propagated onward.
+
+Unfortunately, proving programs correct is largely impractical and not
+relevant to Python software. Even trivial programs require proofs that
+are several pages long; the proof of correctness for a moderately
+complicated program would be enormous, and few or none of the programs
+you use daily (the Python interpreter, your XML parser, your web
+browser) could be proven correct. Even if you wrote down or generated
+a proof, there would then be the question of verifying the proof;
+maybe there's an error in it, and you wrongly believe you've proved
+the program correct.
+
+Modularity
+''''''''''''''''''''''
+
+A more practical benefit of functional programming is that it forces
+you to break apart your problem into small pieces. Programs are more
+modular as a result. It's easier to specify and write a small
+function that does one thing than a large function that performs a
+complicated transformation. Small functions are also easier to read
+and to check for errors.
+
+
+Ease of debugging and testing
+''''''''''''''''''''''''''''''''''
+
+Testing and debugging a functional-style program is easier.
+
+Debugging is simplified because functions are generally small and
+clearly specified. When a program doesn't work, each function is an
+interface point where you can check that the data are correct. You
+can look at the intermediate inputs and outputs to quickly isolate the
+function that's responsible for a bug.
+
+Testing is easier because each function is a potential subject for a
+unit test. Functions don't depend on system state that needs to be
+replicated before running a test; instead you only have to synthesize
+the right input and then check that the output matches expectations.
+
+
+
+Composability
+''''''''''''''''''''''
+
+As you work on a functional-style program, you'll write a number of
+functions with varying inputs and outputs. Some of these functions
+will be unavoidably specialized to a particular application, but
+others will be useful in a wide variety of programs. For example, a
+function that takes a directory path and returns all the XML files in
+the directory, or a function that takes a filename and returns its
+contents, can be applied to many different situations.
+
+Over time you'll form a personal library of utilities. Often you'll
+assemble new programs by arranging existing functions in a new
+configuration and writing a few functions specialized for the current
+task.
+
+
+
+Iterators
+-----------------------
+
+I'll start by looking at a Python language feature that's an important
+foundation for writing functional-style programs: iterators.
+
+An iterator is an object representing a stream of data; this object
+returns the data one element at a time. A Python iterator must
+support a method called ``next()`` that takes no arguments and always
+returns the next element of the stream. If there are no more elements
+in the stream, ``next()`` must raise the ``StopIteration`` exception.
+Iterators don't have to be finite, though; it's perfectly reasonable
+to write an iterator that produces an infinite stream of data.
+
+The built-in ``iter()`` function takes an arbitrary object and tries
+to return an iterator that will return the object's contents or
+elements, raising ``TypeError`` if the object doesn't support
+iteration. Several of Python's built-in data types support iteration,
+the most common being lists and dictionaries. An object is called
+an **iterable** object if you can get an iterator for it.
+
+You can experiment with the iteration interface manually::
+
+ >>> L = [1,2,3]
+ >>> it = iter(L)
+ >>> print it
+ <iterator object at 0x8116870>
+ >>> it.next()
+ 1
+ >>> it.next()
+ 2
+ >>> it.next()
+ 3
+ >>> it.next()
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ StopIteration
+ >>>
+
+Python expects iterable objects in several different contexts, the
+most important being the ``for`` statement. In the statement ``for X in Y``,
+Y must be an iterator or some object for which ``iter()`` can create
+an iterator. These two statements are equivalent::
+
+ for i in iter(obj):
+ print i
+
+ for i in obj:
+ print i
+
+Iterators can be materialized as lists or tuples by using the
+``list()`` or ``tuple()`` constructor functions::
+
+ >>> L = [1,2,3]
+ >>> iterator = iter(L)
+ >>> t = tuple(iterator)
+ >>> t
+ (1, 2, 3)
+
+Sequence unpacking also supports iterators: if you know an iterator
+will return N elements, you can unpack them into an N-tuple::
+
+ >>> L = [1,2,3]
+ >>> iterator = iter(L)
+ >>> a,b,c = iterator
+ >>> a,b,c
+ (1, 2, 3)
+
+Built-in functions such as ``max()`` and ``min()`` can take a single
+iterator argument and will return the largest or smallest element.
+The ``"in"`` and ``"not in"`` operators also support iterators: ``X in
+iterator`` is true if X is found in the stream returned by the
+iterator. You'll run into obvious problems if the iterator is
+infinite; ``max()``, ``min()``, and ``"not in"`` will never return, and
+if the element X never appears in the stream, the ``"in"`` operator
+won't return either.
+
+Note that you can only go forward in an iterator; there's no way to
+get the previous element, reset the iterator, or make a copy of it.
+Iterator objects can optionally provide these additional capabilities,
+but the iterator protocol only specifies the ``next()`` method.
+Functions may therefore consume all of the iterator's output, and if
+you need to do something different with the same stream, you'll have
+to create a new iterator.
+
+
+
+Data Types That Support Iterators
+'''''''''''''''''''''''''''''''''''
+
+We've already seen how lists and tuples support iterators. In fact,
+any Python sequence type, such as strings, will automatically support
+creation of an iterator.
+
+Calling ``iter()`` on a dictionary returns an iterator that will loop
+over the dictionary's keys::
+
+ >>> m = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
+ ... 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
+ >>> for key in m:
+ ... print key, m[key]
+ Mar 3
+ Feb 2
+ Aug 8
+ Sep 9
+ May 5
+ Jun 6
+ Jul 7
+ Jan 1
+ Apr 4
+ Nov 11
+ Dec 12
+ Oct 10
+
+Note that the order is essentially random, because it's based on the
+hash ordering of the objects in the dictionary.
+
+Applying ``iter()`` to a dictionary always loops over the keys, but
+dictionaries have methods that return other iterators. If you want to
+iterate over keys, values, or key/value pairs, you can explicitly call
+the ``iterkeys()``, ``itervalues()``, or ``iteritems()`` methods to
+get an appropriate iterator.
+
+The ``dict()`` constructor can accept an iterator that returns a
+finite stream of ``(key, value)`` tuples::
+
+ >>> L = [('Italy', 'Rome'), ('France', 'Paris'), ('US', 'Washington DC')]
+ >>> dict(iter(L))
+ {'Italy': 'Rome', 'US': 'Washington DC', 'France': 'Paris'}
+
+Files also support iteration by calling the ``readline()``
+method until there are no more lines in the file. This means you can
+read each line of a file like this::
+
+ for line in file:
+ # do something for each line
+ ...
+
+Sets can take their contents from an iterable and let you iterate over
+the set's elements::
+
+ S = set((2, 3, 5, 7, 11, 13))
+ for i in S:
+ print i
+
+
+
+Generator expressions and list comprehensions
+----------------------------------------------------
+
+Two common operations on an iterator's output are 1) performing some
+operation for every element, 2) selecting a subset of elements that
+meet some condition. For example, given a list of strings, you might
+want to strip off trailing whitespace from each line or extract all
+the strings containing a given substring.
+
+List comprehensions and generator expressions (short form: "listcomps"
+and "genexps") are a concise notation for such operations, borrowed
+from the functional programming language Haskell
+(http://www.haskell.org). You can strip all the whitespace from a
+stream of strings with the following code::
+
+ line_list = [' line 1\n', 'line 2 \n', ...]
+
+ # Generator expression -- returns iterator
+ stripped_iter = (line.strip() for line in line_list)
+
+ # List comprehension -- returns list
+ stripped_list = [line.strip() for line in line_list]
+
+You can select only certain elements by adding an ``"if"`` condition::
+
+ stripped_list = [line.strip() for line in line_list
+ if line != ""]
+
+With a list comprehension, you get back a Python list;
+``stripped_list`` is a list containing the resulting lines, not an
+iterator. Generator expressions return an iterator that computes the
+values as necessary, not needing to materialize all the values at
+once. This means that list comprehensions aren't useful if you're
+working with iterators that return an infinite stream or a very large
+amount of data. Generator expressions are preferable in these
+situations.
+
+Generator expressions are surrounded by parentheses ("()") and list
+comprehensions are surrounded by square brackets ("[]"). Generator
+expressions have the form::
+
+ ( expression for expr in sequence1
+ if condition1
+ for expr2 in sequence2
+ if condition2
+ for expr3 in sequence3 ...
+ if condition3
+ for exprN in sequenceN
+ if conditionN )
+
+Again, for a list comprehension only the outside brackets are
+different (square brackets instead of parentheses).
+
+The elements of the generated output will be the successive values of
+``expression``. The ``if`` clauses are all optional; if present,
+``expression`` is only evaluated and added to the result when
+``condition`` is true.
+
+Generator expressions always have to be written inside parentheses,
+but the parentheses signalling a function call also count. If you
+want to create an iterator that will be immediately passed to a
+function you can write::
+
+ obj_total = sum(obj.count for obj in list_all_objects())
+
+The ``for...in`` clauses contain the sequences to be iterated over.
+The sequences do not have to be the same length, because they are
+iterated over from left to right, **not** in parallel. For each
+element in ``sequence1``, ``sequence2`` is looped over from the
+beginning. ``sequence3`` is then looped over for each
+resulting pair of elements from ``sequence1`` and ``sequence2``.
+
+To put it another way, a list comprehension or generator expression is
+equivalent to the following Python code::
+
+ for expr1 in sequence1:
+ if not (condition1):
+ continue # Skip this element
+ for expr2 in sequence2:
+ if not (condition2):
+ continue # Skip this element
+ ...
+ for exprN in sequenceN:
+ if not (conditionN):
+ continue # Skip this element
+
+ # Output the value of
+ # the expression.
+
+This means that when there are multiple ``for...in`` clauses but no
+``if`` clauses, the length of the resulting output will be equal to
+the product of the lengths of all the sequences. If you have two
+lists of length 3, the output list is 9 elements long::
+
+ seq1 = 'abc'
+ seq2 = (1,2,3)
+ >>> [ (x,y) for x in seq1 for y in seq2]
+ [('a', 1), ('a', 2), ('a', 3),
+ ('b', 1), ('b', 2), ('b', 3),
+ ('c', 1), ('c', 2), ('c', 3)]
+
+To avoid introducing an ambiguity into Python's grammar, if
+``expression`` is creating a tuple, it must be surrounded with
+parentheses. The first list comprehension below is a syntax error,
+while the second one is correct::
+
+ # Syntax error
+ [ x,y for x in seq1 for y in seq2]
+ # Correct
+ [ (x,y) for x in seq1 for y in seq2]
+
+
+Generators
+-----------------------
+
+Generators are a special class of functions that simplify the task of
+writing iterators. Regular functions compute a value and return it,
+but generators return an iterator that returns a stream of values.
+
+You're doubtless familiar with how regular function calls work in
+Python or C. When you call a function, it gets a private namespace
+where its local variables are created. When the function reaches a
+``return`` statement, the local variables are destroyed and the
+value is returned to the caller. A later call to the same function
+creates a new private namespace and a fresh set of local
+variables. But, what if the local variables weren't thrown away on
+exiting a function? What if you could later resume the function where
+it left off? This is what generators provide; they can be thought of
+as resumable functions.
+
+Here's the simplest example of a generator function::
+
+ def generate_ints(N):
+ for i in range(N):
+ yield i
+
+Any function containing a ``yield`` keyword is a generator function;
+this is detected by Python's bytecode compiler which compiles the
+function specially as a result.
+
+When you call a generator function, it doesn't return a single value;
+instead it returns a generator object that supports the iterator
+protocol. On executing the ``yield`` expression, the generator
+outputs the value of ``i``, similar to a ``return``
+statement. The big difference between ``yield`` and a
+``return`` statement is that on reaching a ``yield`` the
+generator's state of execution is suspended and local variables are
+preserved. On the next call to the generator's ``.next()`` method,
+the function will resume executing.
+
+Here's a sample usage of the ``generate_ints()`` generator::
+
+ >>> gen = generate_ints(3)
+ >>> gen
+ <generator object at 0x8117f90>
+ >>> gen.next()
+ 0
+ >>> gen.next()
+ 1
+ >>> gen.next()
+ 2
+ >>> gen.next()
+ Traceback (most recent call last):
+ File "stdin", line 1, in ?
+ File "stdin", line 2, in generate_ints
+ StopIteration
+
+You could equally write ``for i in generate_ints(5)``, or
+``a,b,c = generate_ints(3)``.
+
+Inside a generator function, the ``return`` statement can only be used
+without a value, and signals the end of the procession of values;
+after executing a ``return`` the generator cannot return any further
+values. ``return`` with a value, such as ``return 5``, is a syntax
+error inside a generator function. The end of the generator's results
+can also be indicated by raising ``StopIteration`` manually, or by
+just letting the flow of execution fall off the bottom of the
+function.
+
+You could achieve the effect of generators manually by writing your
+own class and storing all the local variables of the generator as
+instance variables. For example, returning a list of integers could
+be done by setting ``self.count`` to 0, and having the
+``next()`` method increment ``self.count`` and return it.
+However, for a moderately complicated generator, writing a
+corresponding class can be much messier.
+
+The test suite included with Python's library, ``test_generators.py``,
+contains a number of more interesting examples. Here's one generator
+that implements an in-order traversal of a tree using generators
+recursively.
+
+::
+
+ # A recursive generator that generates Tree leaves in in-order.
+ def inorder(t):
+ if t:
+ for x in inorder(t.left):
+ yield x
+
+ yield t.label
+
+ for x in inorder(t.right):
+ yield x
+
+Two other examples in ``test_generators.py`` produce
+solutions for the N-Queens problem (placing N queens on an NxN
+chess board so that no queen threatens another) and the Knight's Tour
+(finding a route that takes a knight to every square of an NxN chessboard
+without visiting any square twice).
+
+
+
+Passing values into a generator
+''''''''''''''''''''''''''''''''''''''''''''''
+
+In Python 2.4 and earlier, generators only produced output. Once a
+generator's code was invoked to create an iterator, there was no way to
+pass any new information into the function when its execution is
+resumed. You could hack together this ability by making the
+generator look at a global variable or by passing in some mutable object
+that callers then modify, but these approaches are messy.
+
+In Python 2.5 there's a simple way to pass values into a generator.
+``yield`` became an expression, returning a value that can be assigned
+to a variable or otherwise operated on::
+
+ val = (yield i)
+
+I recommend that you **always** put parentheses around a ``yield``
+expression when you're doing something with the returned value, as in
+the above example. The parentheses aren't always necessary, but it's
+easier to always add them instead of having to remember when they're
+needed.
+
+(PEP 342 explains the exact rules, which are that a
+``yield``-expression must always be parenthesized except when it
+occurs at the top-level expression on the right-hand side of an
+assignment. This means you can write ``val = yield i`` but have to
+use parentheses when there's an operation, as in ``val = (yield i)
++ 12``.)
+
+Values are sent into a generator by calling its
+``send(value)`` method. This method resumes the
+generator's code and the ``yield`` expression returns the specified
+value. If the regular ``next()`` method is called, the
+``yield`` returns ``None``.
+
+Here's a simple counter that increments by 1 and allows changing the
+value of the internal counter.
+
+::
+
+ def counter (maximum):
+ i = 0
+ while i < maximum:
+ val = (yield i)
+ # If value provided, change counter
+ if val is not None:
+ i = val
+ else:
+ i += 1
+
+And here's an example of changing the counter:
+
+ >>> it = counter(10)
+ >>> print it.next()
+ 0
+ >>> print it.next()
+ 1
+ >>> print it.send(8)
+ 8
+ >>> print it.next()
+ 9
+ >>> print it.next()
+ Traceback (most recent call last):
+ File ``t.py'', line 15, in ?
+ print it.next()
+ StopIteration
+
+Because ``yield`` will often be returning ``None``, you
+should always check for this case. Don't just use its value in
+expressions unless you're sure that the ``send()`` method
+will be the only method used resume your generator function.
+
+In addition to ``send()``, there are two other new methods on
+generators:
+
+* ``throw(type, value=None, traceback=None)`` is used to raise an exception inside the
+ generator; the exception is raised by the ``yield`` expression
+ where the generator's execution is paused.
+
+* ``close()`` raises a ``GeneratorExit``
+ exception inside the generator to terminate the iteration.
+ On receiving this
+ exception, the generator's code must either raise
+ ``GeneratorExit`` or ``StopIteration``; catching the
+ exception and doing anything else is illegal and will trigger
+ a ``RuntimeError``. ``close()`` will also be called by
+ Python's garbage collector when the generator is garbage-collected.
+
+ If you need to run cleanup code when a ``GeneratorExit`` occurs,
+ I suggest using a ``try: ... finally:`` suite instead of
+ catching ``GeneratorExit``.
+
+The cumulative effect of these changes is to turn generators from
+one-way producers of information into both producers and consumers.
+
+Generators also become **coroutines**, a more generalized form of
+subroutines. Subroutines are entered at one point and exited at
+another point (the top of the function, and a ``return``
+statement), but coroutines can be entered, exited, and resumed at
+many different points (the ``yield`` statements).
+
+
+Built-in functions
+----------------------------------------------
+
+Let's look in more detail at built-in functions often used with iterators.
+
+Two Python's built-in functions, ``map()`` and ``filter()``, are
+somewhat obsolete; they duplicate the features of list comprehensions
+but return actual lists instead of iterators.
+
+``map(f, iterA, iterB, ...)`` returns a list containing ``f(iterA[0],
+iterB[0]), f(iterA[1], iterB[1]), f(iterA[2], iterB[2]), ...``.
+
+::
+
+ def upper(s):
+ return s.upper()
+ map(upper, ['sentence', 'fragment']) =>
+ ['SENTENCE', 'FRAGMENT']
+
+ [upper(s) for s in ['sentence', 'fragment']] =>
+ ['SENTENCE', 'FRAGMENT']
+
+As shown above, you can achieve the same effect with a list
+comprehension. The ``itertools.imap()`` function does the same thing
+but can handle infinite iterators; it'll be discussed later, in the section on
+the ``itertools`` module.
+
+``filter(predicate, iter)`` returns a list
+that contains all the sequence elements that meet a certain condition,
+and is similarly duplicated by list comprehensions.
+A **predicate** is a function that returns the truth value of
+some condition; for use with ``filter()``, the predicate must take a
+single value.
+
+::
+
+ def is_even(x):
+ return (x % 2) == 0
+
+ filter(is_even, range(10)) =>
+ [0, 2, 4, 6, 8]
+
+This can also be written as a list comprehension::
+
+ >>> [x for x in range(10) if is_even(x)]
+ [0, 2, 4, 6, 8]
+
+``filter()`` also has a counterpart in the ``itertools`` module,
+``itertools.ifilter()``, that returns an iterator and
+can therefore handle infinite sequences just as ``itertools.imap()`` can.
+
+``reduce(func, iter, [initial_value])`` doesn't have a counterpart in
+the ``itertools`` module because it cumulatively performs an operation
+on all the iterable's elements and therefore can't be applied to
+infinite iterables. ``func`` must be a function that takes two elements
+and returns a single value. ``reduce()`` takes the first two elements
+A and B returned by the iterator and calculates ``func(A, B)``. It
+then requests the third element, C, calculates ``func(func(A, B),
+C)``, combines this result with the fourth element returned, and
+continues until the iterable is exhausted. If the iterable returns no
+values at all, a ``TypeError`` exception is raised. If the initial
+value is supplied, it's used as a starting point and
+``func(initial_value, A)`` is the first calculation.
+
+::
+
+ import operator
+ reduce(operator.concat, ['A', 'BB', 'C']) =>
+ 'ABBC'
+ reduce(operator.concat, []) =>
+ TypeError: reduce() of empty sequence with no initial value
+ reduce(operator.mul, [1,2,3], 1) =>
+ 6
+ reduce(operator.mul, [], 1) =>
+ 1
+
+If you use ``operator.add`` with ``reduce()``, you'll add up all the
+elements of the iterable. This case is so common that there's a special
+built-in called ``sum()`` to compute it::
+
+ reduce(operator.add, [1,2,3,4], 0) =>
+ 10
+ sum([1,2,3,4]) =>
+ 10
+ sum([]) =>
+ 0
+
+For many uses of ``reduce()``, though, it can be clearer to just write
+the obvious ``for`` loop::
+
+ # Instead of:
+ product = reduce(operator.mul, [1,2,3], 1)
+
+ # You can write:
+ product = 1
+ for i in [1,2,3]:
+ product *= i
+
+
+``enumerate(iter)`` counts off the elements in the iterable, returning
+2-tuples containing the count and each element.
+
+::
+
+ enumerate(['subject', 'verb', 'object']) =>
+ (0, 'subject'), (1, 'verb'), (2, 'object')
+
+``enumerate()`` is often used when looping through a list
+and recording the indexes at which certain conditions are met::
+
+ f = open('data.txt', 'r')
+ for i, line in enumerate(f):
+ if line.strip() == '':
+ print 'Blank line at line #%i' % i
+
+``sorted(iterable, [cmp=None], [key=None], [reverse=False)``
+collects all the elements of the iterable into a list, sorts
+the list, and returns the sorted result. The ``cmp``, ``key``,
+and ``reverse`` arguments are passed through to the
+constructed list's ``.sort()`` method.
+
+::
+
+ import random
+ # Generate 8 random numbers between [0, 10000)
+ rand_list = random.sample(range(10000), 8)
+ rand_list =>
+ [769, 7953, 9828, 6431, 8442, 9878, 6213, 2207]
+ sorted(rand_list) =>
+ [769, 2207, 6213, 6431, 7953, 8442, 9828, 9878]
+ sorted(rand_list, reverse=True) =>
+ [9878, 9828, 8442, 7953, 6431, 6213, 2207, 769]
+
+(For a more detailed discussion of sorting, see the Sorting mini-HOWTO
+in the Python wiki at http://wiki.python.org/moin/HowTo/Sorting.)
+
+The ``any(iter)`` and ``all(iter)`` built-ins look at
+the truth values of an iterable's contents. ``any()`` returns
+True if any element in the iterable is a true value, and ``all()``
+returns True if all of the elements are true values::
+
+ any([0,1,0]) =>
+ True
+ any([0,0,0]) =>
+ False
+ any([1,1,1]) =>
+ True
+ all([0,1,0]) =>
+ False
+ all([0,0,0]) =>
+ False
+ all([1,1,1]) =>
+ True
+
+
+Small functions and the lambda statement
+----------------------------------------------
+
+When writing functional-style programs, you'll often need little
+functions that act as predicates or that combine elements in some way.
+
+If there's a Python built-in or a module function that's suitable, you
+don't need to define a new function at all::
+
+ stripped_lines = [line.strip() for line in lines]
+ existing_files = filter(os.path.exists, file_list)
+
+If the function you need doesn't exist, you need to write it. One way
+to write small functions is to use the ``lambda`` statement. ``lambda``
+takes a number of parameters and an expression combining these parameters,
+and creates a small function that returns the value of the expression::
+
+ lowercase = lambda x: x.lower()
+
+ print_assign = lambda name, value: name + '=' + str(value)
+
+ adder = lambda x, y: x+y
+
+An alternative is to just use the ``def`` statement and define a
+function in the usual way::
+
+ def lowercase(x):
+ return x.lower()
+
+ def print_assign(name, value):
+ return name + '=' + str(value)
+
+ def adder(x,y):
+ return x + y
+
+Which alternative is preferable? That's a style question; my usual
+course is to avoid using ``lambda``.
+
+One reason for my preference is that ``lambda`` is quite limited in
+the functions it can define. The result has to be computable as a
+single expression, which means you can't have multiway
+``if... elif... else`` comparisons or ``try... except`` statements.
+If you try to do too much in a ``lambda`` statement, you'll end up
+with an overly complicated expression that's hard to read. Quick,
+what's the following code doing?
+
+::
+
+ total = reduce(lambda a, b: (0, a[1] + b[1]), items)[1]
+
+You can figure it out, but it takes time to disentangle the expression
+to figure out what's going on. Using a short nested
+``def`` statements makes things a little bit better::
+
+ def combine (a, b):
+ return 0, a[1] + b[1]
+
+ total = reduce(combine, items)[1]
+
+But it would be best of all if I had simply used a ``for`` loop::
+
+ total = 0
+ for a, b in items:
+ total += b
+
+Or the ``sum()`` built-in and a generator expression::
+
+ total = sum(b for a,b in items)
+
+Many uses of ``reduce()`` are clearer when written as ``for`` loops.
+
+Fredrik Lundh once suggested the following set of rules for refactoring
+uses of ``lambda``:
+
+1) Write a lambda function.
+2) Write a comment explaining what the heck that lambda does.
+3) Study the comment for a while, and think of a name that captures
+ the essence of the comment.
+4) Convert the lambda to a def statement, using that name.
+5) Remove the comment.
+
+I really like these rules, but you're free to disagree that this
+lambda-free style is better.
+
+
+The itertools module
+-----------------------
+
+The ``itertools`` module contains a number of commonly-used iterators
+as well as functions for combining several iterators. This section
+will introduce the module's contents by showing small examples.
+
+The module's functions fall into a few broad classes:
+
+* Functions that create a new iterator based on an existing iterator.
+* Functions for treating an iterator's elements as function arguments.
+* Functions for selecting portions of an iterator's output.
+* A function for grouping an iterator's output.
+
+Creating new iterators
+''''''''''''''''''''''
+
+``itertools.count(n)`` returns an infinite stream of
+integers, increasing by 1 each time. You can optionally supply the
+starting number, which defaults to 0::
+
+ itertools.count() =>
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ...
+ itertools.count(10) =>
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, ...
+
+``itertools.cycle(iter)`` saves a copy of the contents of a provided
+iterable and returns a new iterator that returns its elements from
+first to last. The new iterator will repeat these elements infinitely.
+
+::
+
+ itertools.cycle([1,2,3,4,5]) =>
+ 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, ...
+
+``itertools.repeat(elem, [n])`` returns the provided element ``n``
+times, or returns the element endlessly if ``n`` is not provided.
+
+::
+
+ itertools.repeat('abc') =>
+ abc, abc, abc, abc, abc, abc, abc, abc, abc, abc, ...
+ itertools.repeat('abc', 5) =>
+ abc, abc, abc, abc, abc
+
+``itertools.chain(iterA, iterB, ...)`` takes an arbitrary number of
+iterables as input, and returns all the elements of the first
+iterator, then all the elements of the second, and so on, until all of
+the iterables have been exhausted.
+
+::
+
+ itertools.chain(['a', 'b', 'c'], (1, 2, 3)) =>
+ a, b, c, 1, 2, 3
+
+``itertools.izip(iterA, iterB, ...)`` takes one element from each iterable
+and returns them in a tuple::
+
+ itertools.izip(['a', 'b', 'c'], (1, 2, 3)) =>
+ ('a', 1), ('b', 2), ('c', 3)
+
+It's similiar to the built-in ``zip()`` function, but doesn't
+construct an in-memory list and exhaust all the input iterators before
+returning; instead tuples are constructed and returned only if they're
+requested. (The technical term for this behaviour is
+`lazy evaluation <http://en.wikipedia.org/wiki/Lazy_evaluation>`__.)
+
+This iterator is intended to be used with iterables that are all of
+the same length. If the iterables are of different lengths, the
+resulting stream will be the same length as the shortest iterable.
+
+::
+
+ itertools.izip(['a', 'b'], (1, 2, 3)) =>
+ ('a', 1), ('b', 2)
+
+You should avoid doing this, though, because an element may be taken
+from the longer iterators and discarded. This means you can't go on
+to use the iterators further because you risk skipping a discarded
+element.
+
+``itertools.islice(iter, [start], stop, [step])`` returns a stream
+that's a slice of the iterator. With a single ``stop`` argument,
+it will return the first ``stop``
+elements. If you supply a starting index, you'll get ``stop-start``
+elements, and if you supply a value for ``step`, elements will be
+skipped accordingly. Unlike Python's string and list slicing, you
+can't use negative values for ``start``, ``stop``, or ``step``.
+
+::
+
+ itertools.islice(range(10), 8) =>
+ 0, 1, 2, 3, 4, 5, 6, 7
+ itertools.islice(range(10), 2, 8) =>
+ 2, 3, 4, 5, 6, 7
+ itertools.islice(range(10), 2, 8, 2) =>
+ 2, 4, 6
+
+``itertools.tee(iter, [n])`` replicates an iterator; it returns ``n``
+independent iterators that will all return the contents of the source
+iterator. If you don't supply a value for ``n``, the default is 2.
+Replicating iterators requires saving some of the contents of the source
+iterator, so this can consume significant memory if the iterator is large
+and one of the new iterators is consumed more than the others.
+
+::
+
+ itertools.tee( itertools.count() ) =>
+ iterA, iterB
+
+ where iterA ->
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ...
+
+ and iterB ->
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ...
+
+
+Calling functions on elements
+'''''''''''''''''''''''''''''
+
+Two functions are used for calling other functions on the contents of an
+iterable.
+
+``itertools.imap(f, iterA, iterB, ...)`` returns
+a stream containing ``f(iterA[0], iterB[0]), f(iterA[1], iterB[1]),
+f(iterA[2], iterB[2]), ...``::
+
+ itertools.imap(operator.add, [5, 6, 5], [1, 2, 3]) =>
+ 6, 8, 8
+
+The ``operator`` module contains a set of functions
+corresponding to Python's operators. Some examples are
+``operator.add(a, b)`` (adds two values),
+``operator.ne(a, b)`` (same as ``a!=b``),
+and
+``operator.attrgetter('id')`` (returns a callable that
+fetches the ``"id"`` attribute).
+
+``itertools.starmap(func, iter)`` assumes that the iterable will
+return a stream of tuples, and calls ``f()`` using these tuples as the
+arguments::
+
+ itertools.starmap(os.path.join,
+ [('/usr', 'bin', 'java'), ('/bin', 'python'),
+ ('/usr', 'bin', 'perl'),('/usr', 'bin', 'ruby')])
+ =>
+ /usr/bin/java, /bin/python, /usr/bin/perl, /usr/bin/ruby
+
+
+Selecting elements
+''''''''''''''''''
+
+Another group of functions chooses a subset of an iterator's elements
+based on a predicate.
+
+``itertools.ifilter(predicate, iter)`` returns all the elements for
+which the predicate returns true::
+
+ def is_even(x):
+ return (x % 2) == 0
+
+ itertools.ifilter(is_even, itertools.count()) =>
+ 0, 2, 4, 6, 8, 10, 12, 14, ...
+
+``itertools.ifilterfalse(predicate, iter)`` is the opposite,
+returning all elements for which the predicate returns false::
+
+ itertools.ifilterfalse(is_even, itertools.count()) =>
+ 1, 3, 5, 7, 9, 11, 13, 15, ...
+
+``itertools.takewhile(predicate, iter)`` returns elements for as long
+as the predicate returns true. Once the predicate returns false,
+the iterator will signal the end of its results.
+
+::
+
+ def less_than_10(x):
+ return (x < 10)
+
+ itertools.takewhile(less_than_10, itertools.count()) =>
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
+
+ itertools.takewhile(is_even, itertools.count()) =>
+ 0
+
+``itertools.dropwhile(predicate, iter)`` discards elements while the
+predicate returns true, and then returns the rest of the iterable's
+results.
+
+::
+
+ itertools.dropwhile(less_than_10, itertools.count()) =>
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, ...
+
+ itertools.dropwhile(is_even, itertools.count()) =>
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ...
+
+
+Grouping elements
+'''''''''''''''''
+
+The last function I'll discuss, ``itertools.groupby(iter,
+key_func=None)``, is the most complicated. ``key_func(elem)`` is a
+function that can compute a key value for each element returned by the
+iterable. If you don't supply a key function, the key is simply each
+element itself.
+
+``groupby()`` collects all the consecutive elements from the
+underlying iterable that have the same key value, and returns a stream
+of 2-tuples containing a key value and an iterator for the elements
+with that key.
+
+::
+
+ city_list = [('Decatur', 'AL'), ('Huntsville', 'AL'), ('Selma', 'AL'),
+ ('Anchorage', 'AK'), ('Nome', 'AK'),
+ ('Flagstaff', 'AZ'), ('Phoenix', 'AZ'), ('Tucson', 'AZ'),
+ ...
+ ]
+
+ def get_state ((city, state)):
+ return state
+
+ itertools.groupby(city_list, get_state) =>
+ ('AL', iterator-1),
+ ('AK', iterator-2),
+ ('AZ', iterator-3), ...
+
+ where
+ iterator-1 =>
+ ('Decatur', 'AL'), ('Huntsville', 'AL'), ('Selma', 'AL')
+ iterator-2 =>
+ ('Anchorage', 'AK'), ('Nome', 'AK')
+ iterator-3 =>
+ ('Flagstaff', 'AZ'), ('Phoenix', 'AZ'), ('Tucson', 'AZ')
+
+``groupby()`` assumes that the underlying iterable's contents will
+already be sorted based on the key. Note that the returned iterators
+also use the underlying iterable, so you have to consume the results
+of iterator-1 before requesting iterator-2 and its corresponding key.
+
+
+The functools module
+----------------------------------------------
+
+The ``functools`` module in Python 2.5 contains some higher-order
+functions. A **higher-order function** takes one or more functions as
+input and returns a new function. The most useful tool in this module
+is the ``partial()`` function.
+
+For programs written in a functional style, you'll sometimes want to
+construct variants of existing functions that have some of the
+parameters filled in. Consider a Python function ``f(a, b, c)``; you
+may wish to create a new function ``g(b, c)`` that's equivalent to
+``f(1, b, c)``; you're filling in a value for one of ``f()``'s parameters.
+This is called "partial function application".
+
+The constructor for ``partial`` takes the arguments ``(function, arg1,
+arg2, ... kwarg1=value1, kwarg2=value2)``. The resulting object is
+callable, so you can just call it to invoke ``function`` with the
+filled-in arguments.
+
+Here's a small but realistic example::
+
+ import functools
+
+ def log (message, subsystem):
+ "Write the contents of 'message' to the specified subsystem."
+ print '%s: %s' % (subsystem, message)
+ ...
+
+ server_log = functools.partial(log, subsystem='server')
+ server_log('Unable to open socket')
+
+
+The operator module
+-------------------
+
+The ``operator`` module was mentioned earlier. It contains a set of
+functions corresponding to Python's operators. These functions
+are often useful in functional-style code because they save you
+from writing trivial functions that perform a single operation.
+
+Some of the functions in this module are:
+
+* Math operations: ``add()``, ``sub()``, ``mul()``, ``div()``, ``floordiv()``,
+ ``abs()``, ...
+* Logical operations: ``not_()``, ``truth()``.
+* Bitwise operations: ``and_()``, ``or_()``, ``invert()``.
+* Comparisons: ``eq()``, ``ne()``, ``lt()``, ``le()``, ``gt()``, and ``ge()``.
+* Object identity: ``is_()``, ``is_not()``.
+
+Consult `the operator module's documentation <http://docs.python.org/lib/module-operator.html>`__ for a complete
+list.
+
+
+
+The functional module
+---------------------
+
+Collin Winter's `functional module <http://oakwinter.com/code/functional/>`__
+provides a number of more
+advanced tools for functional programming. It also reimplements
+several Python built-ins, trying to make them more intuitive to those
+used to functional programming in other languages.
+
+This section contains an introduction to some of the most important
+functions in ``functional``; full documentation can be found at `the
+project's website <http://oakwinter.com/code/functional/documentation/>`__.
+
+``compose(outer, inner, unpack=False)``
+
+The ``compose()`` function implements function composition.
+In other words, it returns a wrapper around the ``outer`` and ``inner`` callables, such
+that the return value from ``inner`` is fed directly to ``outer``. That is,
+
+::
+
+ >>> def add(a, b):
+ ... return a + b
+ ...
+ >>> def double(a):
+ ... return 2 * a
+ ...
+ >>> compose(double, add)(5, 6)
+ 22
+
+is equivalent to
+
+::
+
+ >>> double(add(5, 6))
+ 22
+
+The ``unpack`` keyword is provided to work around the fact that Python functions are not always
+`fully curried <http://en.wikipedia.org/wiki/Currying>`__.
+By default, it is expected that the ``inner`` function will return a single object and that the ``outer``
+function will take a single argument. Setting the ``unpack`` argument causes ``compose`` to expect a
+tuple from ``inner`` which will be expanded before being passed to ``outer``. Put simply,
+
+::
+
+ compose(f, g)(5, 6)
+
+is equivalent to::
+
+ f(g(5, 6))
+
+while
+
+::
+
+ compose(f, g, unpack=True)(5, 6)
+
+is equivalent to::
+
+ f(*g(5, 6))
+
+Even though ``compose()`` only accepts two functions, it's trivial to
+build up a version that will compose any number of functions. We'll
+use ``reduce()``, ``compose()`` and ``partial()`` (the last of which
+is provided by both ``functional`` and ``functools``).
+
+::
+
+ from functional import compose, partial
+
+ multi_compose = partial(reduce, compose)
+
+
+We can also use ``map()``, ``compose()`` and ``partial()`` to craft a
+version of ``"".join(...)`` that converts its arguments to string::
+
+ from functional import compose, partial
+
+ join = compose("".join, partial(map, str))
+
+
+``flip(func)``
+
+``flip()`` wraps the callable in ``func`` and
+causes it to receive its non-keyword arguments in reverse order.
+
+::
+
+ >>> def triple(a, b, c):
+ ... return (a, b, c)
+ ...
+ >>> triple(5, 6, 7)
+ (5, 6, 7)
+ >>>
+ >>> flipped_triple = flip(triple)
+ >>> flipped_triple(5, 6, 7)
+ (7, 6, 5)
+
+``foldl(func, start, iterable)``
+
+``foldl()`` takes a binary function, a starting value (usually some kind of 'zero'), and an iterable.
+The function is applied to the starting value and the first element of the list, then the result of
+that and the second element of the list, then the result of that and the third element of the list,
+and so on.
+
+This means that a call such as::
+
+ foldl(f, 0, [1, 2, 3])
+
+is equivalent to::
+
+ f(f(f(0, 1), 2), 3)
+
+
+``foldl()`` is roughly equivalent to the following recursive function::
+
+ def foldl(func, start, seq):
+ if len(seq) == 0:
+ return start
+
+ return foldl(func, func(start, seq[0]), seq[1:])
+
+Speaking of equivalence, the above ``foldl`` call can be expressed in terms of the built-in ``reduce`` like
+so::
+
+ reduce(f, [1, 2, 3], 0)
+
+
+We can use ``foldl()``, ``operator.concat()`` and ``partial()`` to
+write a cleaner, more aesthetically-pleasing version of Python's
+``"".join(...)`` idiom::
+
+ from functional import foldl, partial
+ from operator import concat
+
+ join = partial(foldl, concat, "")
+
+
+Revision History and Acknowledgements
+------------------------------------------------
+
+The author would like to thank the following people for offering
+suggestions, corrections and assistance with various drafts of this
+article: Ian Bicking, Nick Coghlan, Nick Efford, Raymond Hettinger,
+Jim Jewett, Mike Krell, Leandro Lameiro, Jussi Salmela,
+Collin Winter, Blake Winton.
+
+Version 0.1: posted June 30 2006.
+
+Version 0.11: posted July 1 2006. Typo fixes.
+
+Version 0.2: posted July 10 2006. Merged genexp and listcomp
+sections into one. Typo fixes.
+
+Version 0.21: Added more references suggested on the tutor mailing list.
+
+Version 0.30: Adds a section on the ``functional`` module written by
+Collin Winter; adds short section on the operator module; a few other
+edits.
+
+
+References
+--------------------
+
+General
+'''''''''''''''
+
+**Structure and Interpretation of Computer Programs**, by
+Harold Abelson and Gerald Jay Sussman with Julie Sussman.
+Full text at http://mitpress.mit.edu/sicp/.
+In this classic textbook of computer science, chapters 2 and 3 discuss the
+use of sequences and streams to organize the data flow inside a
+program. The book uses Scheme for its examples, but many of the
+design approaches described in these chapters are applicable to
+functional-style Python code.
+
+http://www.defmacro.org/ramblings/fp.html: A general
+introduction to functional programming that uses Java examples
+and has a lengthy historical introduction.
+
+http://en.wikipedia.org/wiki/Functional_programming:
+General Wikipedia entry describing functional programming.
+
+http://en.wikipedia.org/wiki/Coroutine:
+Entry for coroutines.
+
+http://en.wikipedia.org/wiki/Currying:
+Entry for the concept of currying.
+
+Python-specific
+'''''''''''''''''''''''''''
+
+http://gnosis.cx/TPiP/:
+The first chapter of David Mertz's book :title-reference:`Text Processing in Python`
+discusses functional programming for text processing, in the section titled
+"Utilizing Higher-Order Functions in Text Processing".
+
+Mertz also wrote a 3-part series of articles on functional programming
+for IBM's DeveloperWorks site; see
+`part 1 <http://www-128.ibm.com/developerworks/library/l-prog.html>`__,
+`part 2 <http://www-128.ibm.com/developerworks/library/l-prog2.html>`__, and
+`part 3 <http://www-128.ibm.com/developerworks/linux/library/l-prog3.html>`__,
+
+
+Python documentation
+'''''''''''''''''''''''''''
+
+http://docs.python.org/lib/module-itertools.html:
+Documentation ``for the itertools`` module.
+
+http://docs.python.org/lib/module-operator.html:
+Documentation ``for the operator`` module.
+
+http://www.python.org/dev/peps/pep-0289/:
+PEP 289: "Generator Expressions"
+
+http://www.python.org/dev/peps/pep-0342/
+PEP 342: "Coroutines via Enhanced Generators" describes the new generator
+features in Python 2.5.
+
+.. comment
+
+ Topics to place
+ -----------------------------
+
+ XXX os.walk()
+
+ XXX Need a large example.
+
+ But will an example add much? I'll post a first draft and see
+ what the comments say.
+
+.. comment
+
+ Original outline:
+ Introduction
+ Idea of FP
+ Programs built out of functions
+ Functions are strictly input-output, no internal state
+ Opposed to OO programming, where objects have state
+
+ Why FP?
+ Formal provability
+ Assignment is difficult to reason about
+ Not very relevant to Python
+ Modularity
+ Small functions that do one thing
+ Debuggability:
+ Easy to test due to lack of state
+ Easy to verify output from intermediate steps
+ Composability
+ You assemble a toolbox of functions that can be mixed
+
+ Tackling a problem
+ Need a significant example
+
+ Iterators
+ Generators
+ The itertools module
+ List comprehensions
+ Small functions and the lambda statement
+ Built-in functions
+ map
+ filter
+ reduce
+
+.. comment
+
+ Handy little function for printing part of an iterator -- used
+ while writing this document.
+
+ import itertools
+ def print_iter(it):
+ slice = itertools.islice(it, 10)
+ for elem in slice[:-1]:
+ sys.stdout.write(str(elem))
+ sys.stdout.write(', ')
+ print elem[-1]
+
+
diff --git a/Doc/howto/regex.tex b/Doc/howto/regex.tex
index f9867ae..3c63b3a 100644
--- a/Doc/howto/regex.tex
+++ b/Doc/howto/regex.tex
@@ -367,7 +367,7 @@ included with the Python distribution. It allows you to enter REs and
strings, and displays whether the RE matches or fails.
\file{redemo.py} can be quite useful when trying to debug a
complicated RE. Phil Schwartz's
-\ulink{Kodos}{http://kodos.sourceforge.net} is also an interactive
+\ulink{Kodos}{http://www.phil-schwartz.com/kodos.spy} is also an interactive
tool for developing and testing RE patterns. This HOWTO will use the
standard Python interpreter for its examples.
diff --git a/Doc/inst/inst.tex b/Doc/inst/inst.tex
index df7c656..6db22ac 100644
--- a/Doc/inst/inst.tex
+++ b/Doc/inst/inst.tex
@@ -632,7 +632,7 @@ Note that these two are \emph{not} equivalent if you supply a different
installation base directory when you run the setup script. For example,
\begin{verbatim}
-python setup.py --install-base=/tmp
+python setup.py install --install-base=/tmp
\end{verbatim}
would install pure modules to \filevar{/tmp/python/lib} in the first
diff --git a/Doc/lib/libasyncore.tex b/Doc/lib/libasyncore.tex
index 4425da7..2067839 100644
--- a/Doc/lib/libasyncore.tex
+++ b/Doc/lib/libasyncore.tex
@@ -198,9 +198,11 @@ Most of these are nearly identical to their socket partners.
\end{methoddesc}
\begin{methoddesc}{bind}{address}
- Bind the socket to \var{address}. The socket must not already
- be bound. (The format of \var{address} depends on the address
- family --- see above.)
+ Bind the socket to \var{address}. The socket must not already be
+ bound. (The format of \var{address} depends on the address family
+ --- see above.) To mark the socket as re-usable (setting the
+ \constant{SO_REUSEADDR} option), call the \class{dispatcher}
+ object's \method{set_reuse_addr()} method.
\end{methoddesc}
\begin{methoddesc}{accept}{}
diff --git a/Doc/lib/libatexit.tex b/Doc/lib/libatexit.tex
index 33dc7dd..9798b57 100644
--- a/Doc/lib/libatexit.tex
+++ b/Doc/lib/libatexit.tex
@@ -44,6 +44,10 @@ If an exception is raised during execution of the exit handlers, a
traceback is printed (unless \exception{SystemExit} is raised) and the
exception information is saved. After all exit handlers have had a
chance to run the last exception to be raised is re-raised.
+
+\versionchanged[This function now returns \var{func} which makes it
+ possible to use it as a decorator without binding the
+ original name to \code{None}]{2.6}
\end{funcdesc}
@@ -92,3 +96,15 @@ atexit.register(goodbye, 'Donny', 'nice')
# or:
atexit.register(goodbye, adjective='nice', name='Donny')
\end{verbatim}
+
+Usage as a decorator:
+
+\begin{verbatim}
+import atexit
+
+@atexit.register
+def goodbye():
+ print "You are now leaving the Python sector."
+\end{verbatim}
+
+This obviously only works with functions that don't take arguments.
diff --git a/Doc/lib/libbase64.tex b/Doc/lib/libbase64.tex
index 0039c84..d7eccbd 100644
--- a/Doc/lib/libbase64.tex
+++ b/Doc/lib/libbase64.tex
@@ -21,7 +21,7 @@ three alphabets. The legacy interface provides for encoding and
decoding to and from file-like objects as well as strings, but only
using the Base64 standard alphabet.
-The modern interface provides:
+The modern interface, which was introduced in Python 2.4, provides:
\begin{funcdesc}{b64encode}{s\optional{, altchars}}
Encode a string use Base64.
diff --git a/Doc/lib/libbsddb.tex b/Doc/lib/libbsddb.tex
index 44b9168..85ea824 100644
--- a/Doc/lib/libbsddb.tex
+++ b/Doc/lib/libbsddb.tex
@@ -19,21 +19,23 @@ The \module{bsddb} module requires a Berkeley DB library version from
3.3 thru 4.4.
\begin{seealso}
- \seeurl{http://pybsddb.sourceforge.net/}{The website with documentation
- for the \module{bsddb.db} python Berkeley DB interface that closely mirrors
- the Sleepycat object oriented interface provided in Berkeley DB 3 and 4.}
- \seeurl{http://www.sleepycat.com/}{Sleepycat Software produces the
- Berkeley DB library.}
+ \seeurl{http://pybsddb.sourceforge.net/}
+ {The website with documentation for the \module{bsddb.db}
+ Python Berkeley DB interface that closely mirrors the object
+ oriented interface provided in Berkeley DB 3 and 4.}
+
+ \seeurl{http://www.oracle.com/database/berkeley-db/}
+ {The Berkeley DB library.}
\end{seealso}
A more modern DB, DBEnv and DBSequence object interface is available in the
-\module{bsddb.db} module which closely matches the Sleepycat Berkeley DB C API
+\module{bsddb.db} module which closely matches the Berkeley DB C API
documented at the above URLs. Additional features provided by the
\module{bsddb.db} API include fine tuning, transactions, logging, and
multiprocess concurrent database access.
The following is a description of the legacy \module{bsddb} interface
-compatible with the old python bsddb module. Starting in Python 2.5 this
+compatible with the old Python bsddb module. Starting in Python 2.5 this
interface should be safe for multithreaded access. The \module{bsddb.db}
API is recommended for threading users as it provides better control.
diff --git a/Doc/lib/libcfgparser.tex b/Doc/lib/libcfgparser.tex
index 42a362e..2c08ec4 100644
--- a/Doc/lib/libcfgparser.tex
+++ b/Doc/lib/libcfgparser.tex
@@ -48,11 +48,20 @@ Default values can be specified by passing them into the
may be passed into the \method{get()} method which will override all
others.
-\begin{classdesc}{RawConfigParser}{\optional{defaults}}
+Sections are normally stored in a builtin dictionary. An alternative
+dictionary type can be passed to the \class{ConfigParser} constructor.
+For example, if a dictionary type is passed that sorts its keys,
+the sections will be sorted on write-back, as will be the keys within
+each section.
+
+\begin{classdesc}{RawConfigParser}{\optional{defaults\optional{, dict_type}}}
The basic configuration object. When \var{defaults} is given, it is
-initialized into the dictionary of intrinsic defaults. This class
-does not support the magical interpolation behavior.
+initialized into the dictionary of intrinsic defaults. When \var{dict_type}
+is given, it will be used to create the dictionary objects for the list
+of sections, for the options within a section, and for the default values.
+This class does not support the magical interpolation behavior.
\versionadded{2.3}
+\versionchanged[\var{dict_type} was added]{2.6}
\end{classdesc}
\begin{classdesc}{ConfigParser}{\optional{defaults}}
diff --git a/Doc/lib/libcommands.tex b/Doc/lib/libcommands.tex
index 74e7023..53b8a20 100644
--- a/Doc/lib/libcommands.tex
+++ b/Doc/lib/libcommands.tex
@@ -12,6 +12,11 @@ The \module{commands} module contains wrapper functions for
return any output generated by the command and, optionally, the exit
status.
+The \module{subprocess} module provides more powerful facilities for
+spawning new processes and retrieving their results. Using the
+\module{subprocess} module is preferable to using the \module{commands}
+module.
+
The \module{commands} module defines the following functions:
@@ -51,3 +56,7 @@ Example:
>>> commands.getstatus('/bin/ls')
'-rwxr-xr-x 1 root 13352 Oct 14 1994 /bin/ls'
\end{verbatim}
+
+\begin{seealso}
+ \seemodule{subprocess}{Module for spawning and managing subprocesses.}
+\end{seealso}
diff --git a/Doc/lib/libcsv.tex b/Doc/lib/libcsv.tex
index 8e10ccf..e965e31 100644
--- a/Doc/lib/libcsv.tex
+++ b/Doc/lib/libcsv.tex
@@ -64,9 +64,9 @@ dialect. It may be an instance of a subclass of the \class{Dialect}
class or one of the strings returned by the \function{list_dialects}
function. The other optional {}\var{fmtparam} keyword arguments can be
given to override individual formatting parameters in the current
-dialect. For more information about the dialect and formatting
+dialect. For full details about the dialect and formatting
parameters, see section~\ref{csv-fmt-params}, ``Dialects and Formatting
-Parameters'' for details of these parameters.
+Parameters''.
All data read are returned as strings. No automatic data type
conversion is performed.
@@ -96,10 +96,10 @@ parameters specific to a particular CSV dialect. It may be an instance
of a subclass of the \class{Dialect} class or one of the strings
returned by the \function{list_dialects} function. The other optional
{}\var{fmtparam} keyword arguments can be given to override individual
-formatting parameters in the current dialect. For more information
+formatting parameters in the current dialect. For full details
about the dialect and formatting parameters, see
-section~\ref{csv-fmt-params}, ``Dialects and Formatting Parameters'' for
-details of these parameters. To make it as easy as possible to
+section~\ref{csv-fmt-params}, ``Dialects and Formatting Parameters''.
+To make it as easy as possible to
interface with modules which implement the DB API, the value
\constant{None} is written as the empty string. While this isn't a
reversible transformation, it makes it easier to dump SQL NULL data values
@@ -113,9 +113,8 @@ Associate \var{dialect} with \var{name}. \var{name} must be a string
or Unicode object. The dialect can be specified either by passing a
sub-class of \class{Dialect}, or by \var{fmtparam} keyword arguments,
or both, with keyword arguments overriding parameters of the dialect.
-For more information about the dialect and formatting parameters, see
-section~\ref{csv-fmt-params}, ``Dialects and Formatting Parameters''
-for details of these parameters.
+For full details about the dialect and formatting parameters, see
+section~\ref{csv-fmt-params}, ``Dialects and Formatting Parameters''.
\end{funcdesc}
\begin{funcdesc}{unregister_dialect}{name}
@@ -197,12 +196,13 @@ attributes, which are used to define the parameters for a specific
\begin{classdesc}{excel}{}
The \class{excel} class defines the usual properties of an Excel-generated
-CSV file.
+CSV file. It is registered with the dialect name \code{'excel'}.
\end{classdesc}
\begin{classdesc}{excel_tab}{}
The \class{excel_tab} class defines the usual properties of an
-Excel-generated TAB-delimited file.
+Excel-generated TAB-delimited file. It is registered with the dialect name
+\code{'excel-tab'}.
\end{classdesc}
\begin{classdesc}{Sniffer}{}
@@ -345,6 +345,7 @@ A read-only description of the dialect in use by the parser.
\begin{memberdesc}[csv reader]{line_num}
The number of lines read from the source iterator. This is not the same
as the number of records returned, as records can span multiple lines.
+ \versionadded{2.5}
\end{memberdesc}
diff --git a/Doc/lib/libctypes.tex b/Doc/lib/libctypes.tex
index b2e488a..c0e2310 100755
--- a/Doc/lib/libctypes.tex
+++ b/Doc/lib/libctypes.tex
@@ -1821,7 +1821,7 @@ Here is the wrapping with \code{ctypes}:
\begin{quote}
\begin{verbatim}>>> from ctypes import c_int, WINFUNCTYPE, windll
>>> from ctypes.wintypes import HWND, LPCSTR, UINT
->>> prototype = WINFUNCTYPE(c_int, HWND, LPCSTR, LPCSTR, c_uint)
+>>> prototype = WINFUNCTYPE(c_int, HWND, LPCSTR, LPCSTR, UINT)
>>> paramflags = (1, "hwnd", 0), (1, "text", "Hi"), (1, "caption", None), (1, "flags", 0)
>>> MessageBox = prototype(("MessageBoxA", windll.user32), paramflags)
>>>\end{verbatim}
@@ -1848,7 +1848,7 @@ GetWindowRect(
Here is the wrapping with \code{ctypes}:
\begin{quote}
-\begin{verbatim}>>> from ctypes import POINTER, WINFUNCTYPE, windll
+\begin{verbatim}>>> from ctypes import POINTER, WINFUNCTYPE, windll, WinError
>>> from ctypes.wintypes import BOOL, HWND, RECT
>>> prototype = WINFUNCTYPE(BOOL, HWND, POINTER(RECT))
>>> paramflags = (1, "hwnd"), (2, "lprect")
@@ -2299,12 +2299,10 @@ Windows only: Represents a \class{HRESULT} value, which contains success
or error information for a function or method call.
\end{classdesc*}
-\code{py{\_}object} : classdesc*
-\begin{quote}
-
-Represents the C \code{PyObject *} datatype. Calling this with an
-without an argument creates a \code{NULL} \code{PyObject *} pointer.
-\end{quote}
+\begin{classdesc*}{py_object}
+Represents the C \code{PyObject *} datatype. Calling this without an
+argument creates a \code{NULL} \code{PyObject *} pointer.
+\end{classdesc*}
The \code{ctypes.wintypes} module provides quite some other Windows
specific data types, for example \code{HWND}, \code{WPARAM}, or \code{DWORD}.
@@ -2440,5 +2438,6 @@ attributes for names not present in \member{{\_}fields{\_}}.
\subsubsection{Arrays and pointers\label{ctypes-arrays-pointers}}
-XXX
+Not yet written - please see section~\ref{ctypes-pointers}, pointers and
+section~\ref{ctypes-arrays}, arrays in the tutorial.
diff --git a/Doc/lib/libdatetime.tex b/Doc/lib/libdatetime.tex
index cae5d60..0d2b5bb 100644
--- a/Doc/lib/libdatetime.tex
+++ b/Doc/lib/libdatetime.tex
@@ -1421,19 +1421,21 @@ The exact range of years for which \method{strftime()} works also
varies across platforms. Regardless of platform, years before 1900
cannot be used.
-\subsection{Examples}
-
-\subsubsection{Creating Datetime Objects from Formatted Strings}
-
-The \class{datetime} class does not directly support parsing formatted time
-strings. You can use \function{time.strptime} to do the parsing and create
-a \class{datetime} object from the tuple it returns:
-
-\begin{verbatim}
->>> s = "2005-12-06T12:13:14"
->>> from datetime import datetime
->>> from time import strptime
->>> datetime(*strptime(s, "%Y-%m-%dT%H:%M:%S")[0:6])
-datetime.datetime(2005, 12, 6, 12, 13, 14)
-\end{verbatim}
-
+%%% This example is obsolete, since strptime is now supported by datetime.
+%
+% \subsection{Examples}
+%
+% \subsubsection{Creating Datetime Objects from Formatted Strings}
+%
+% The \class{datetime} class does not directly support parsing formatted time
+% strings. You can use \function{time.strptime} to do the parsing and create
+% a \class{datetime} object from the tuple it returns:
+%
+% \begin{verbatim}
+% >>> s = "2005-12-06T12:13:14"
+% >>> from datetime import datetime
+% >>> from time import strptime
+% >>> datetime(*strptime(s, "%Y-%m-%dT%H:%M:%S")[0:6])
+% datetime.datetime(2005, 12, 6, 12, 13, 14)
+% \end{verbatim}
+%
diff --git a/Doc/lib/libdecimal.tex b/Doc/lib/libdecimal.tex
index a0c7bde..127eb1d 100644
--- a/Doc/lib/libdecimal.tex
+++ b/Doc/lib/libdecimal.tex
@@ -435,36 +435,37 @@ Each thread has its own current context which is accessed or changed using
the \function{getcontext()} and \function{setcontext()} functions:
\begin{funcdesc}{getcontext}{}
- Return the current context for the active thread.
+ Return the current context for the active thread.
\end{funcdesc}
\begin{funcdesc}{setcontext}{c}
- Set the current context for the active thread to \var{c}.
+ Set the current context for the active thread to \var{c}.
\end{funcdesc}
Beginning with Python 2.5, you can also use the \keyword{with} statement
-to temporarily change the active context. For example the following code
-increases the current decimal precision by 2 places, performs a
-calculation, and then automatically restores the previous context:
-
+and the \function{localcontext()} function to temporarily change the
+active context.
+
+\begin{funcdesc}{localcontext}{\optional{c}}
+ Return a context manager that will set the current context for
+ the active thread to a copy of \var{c} on entry to the with-statement
+ and restore the previous context when exiting the with-statement. If
+ no context is specified, a copy of the current context is used.
+ \versionadded{2.5}
+
+ For example, the following code sets the current decimal precision
+ to 42 places, performs a calculation, and then automatically restores
+ the previous context:
\begin{verbatim}
-from __future__ import with_statement
-import decimal
+ from __future__ import with_statement
+ from decimal import localcontext
-with decimal.getcontext() as ctx:
- ctx.prec += 2 # add 2 more digits of precision
- calculate_something()
+ with localcontext() as ctx:
+ ctx.prec = 42 # Perform a high precision calculation
+ s = calculate_something()
+ s = +s # Round the final result back to the default precision
\end{verbatim}
-
-The context that's active in the body of the \keyword{with} statement is
-a \emph{copy} of the context you provided to the \keyword{with}
-statement, so modifying its attributes doesn't affect anything except
-that temporary copy.
-
-You can use any decimal context in a \keyword{with} statement, but if
-you just want to make a temporary change to some aspect of the current
-context, it's easiest to just use \function{getcontext()} as shown
-above.
+\end{funcdesc}
New contexts can also be created using the \class{Context} constructor
described below. In addition, the module provides three pre-made
diff --git a/Doc/lib/libetree.tex b/Doc/lib/libetree.tex
index 1f29887..ffa1943 100644
--- a/Doc/lib/libetree.tex
+++ b/Doc/lib/libetree.tex
@@ -1,45 +1,34 @@
-\section{\module{elementtree} --- The xml.etree.ElementTree Module}
-\declaremodule{standard}{elementtree}
+\section{\module{xml.etree.ElementTree} --- The ElementTree XML API}
+\declaremodule{standard}{xml.etree.ElementTree}
\moduleauthor{Fredrik Lundh}{fredrik@pythonware.com}
-\modulesynopsis{This module provides implementations
-of the Element and ElementTree types, plus support classes.
+\modulesynopsis{Implementation of the ElementTree API.}
-A C version of this API is available as xml.etree.cElementTree.}
\versionadded{2.5}
-
-\subsection{Overview\label{elementtree-overview}}
-
The Element type is a flexible container object, designed to store
hierarchical data structures in memory. The type can be described as a
cross between a list and a dictionary.
Each element has a number of properties associated with it:
-\begin{itemize}
-\item {}
-a tag which is a string identifying what kind of data
-this element represents (the element type, in other words).
-
-\item {}
-a number of attributes, stored in a Python dictionary.
-
-\item {}
-a text string.
-
-\item {}
-an optional tail string.
-
-\item {}
-a number of child elements, stored in a Python sequence
+\begin{itemize}
+ \item a tag which is a string identifying what kind of data
+ this element represents (the element type, in other words).
+ \item a number of attributes, stored in a Python dictionary.
+ \item a text string.
+ \item an optional tail string.
+ \item a number of child elements, stored in a Python sequence
\end{itemize}
To create an element instance, use the Element or SubElement factory
functions.
-The ElementTree class can be used to wrap an element
+The \class{ElementTree} class can be used to wrap an element
structure, and convert it from and to XML.
+A C implementation of this API is available as
+\module{xml.etree.cElementTree}.
+
\subsection{Functions\label{elementtree-functions}}
diff --git a/Doc/lib/libfpectl.tex b/Doc/lib/libfpectl.tex
index 814e226..cca2314 100644
--- a/Doc/lib/libfpectl.tex
+++ b/Doc/lib/libfpectl.tex
@@ -7,6 +7,11 @@
\sectionauthor{Lee Busby}{busby1@llnl.gov}
\modulesynopsis{Provide control for floating point exception handling.}
+\note{The \module{fpectl} module is not built by default, and its usage
+ is discouraged and may be dangerous except in the hands of
+ experts. See also the section \ref{fpectl-limitations} on
+ limitations for more details.}
+
Most computers carry out floating point operations\index{IEEE-754}
in conformance with the so-called IEEE-754 standard.
On any real computer,
@@ -95,7 +100,7 @@ FloatingPointError: in math_1
\end{verbatim}
-\subsection{Limitations and other considerations}
+\subsection{Limitations and other considerations \label{fpectl-limitations}}
Setting up a given processor to trap IEEE-754 floating point
errors currently requires custom code on a per-architecture basis.
diff --git a/Doc/lib/libfuncs.tex b/Doc/lib/libfuncs.tex
index 4dde065..a8c06bb 100644
--- a/Doc/lib/libfuncs.tex
+++ b/Doc/lib/libfuncs.tex
@@ -808,7 +808,7 @@ class C:
\begin{verbatim}
class C(object):
- def __init__(self): self.__x = None
+ def __init__(self): self._x = None
def getx(self): return self._x
def setx(self, value): self._x = value
def delx(self): del self._x
diff --git a/Doc/lib/libgetopt.tex b/Doc/lib/libgetopt.tex
index e8b16a3..b38fcd8 100644
--- a/Doc/lib/libgetopt.tex
+++ b/Doc/lib/libgetopt.tex
@@ -126,8 +126,9 @@ import getopt, sys
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:v", ["help", "output="])
- except getopt.GetoptError:
+ except getopt.GetoptError, err:
# print help information and exit:
+ print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
output = None
@@ -135,11 +136,13 @@ def main():
for o, a in opts:
if o == "-v":
verbose = True
- if o in ("-h", "--help"):
+ elif o in ("-h", "--help"):
usage()
sys.exit()
- if o in ("-o", "--output"):
+ elif o in ("-o", "--output"):
output = a
+ else:
+ assert False, "unhandled option"
# ...
if __name__ == "__main__":
diff --git a/Doc/lib/libhashlib.tex b/Doc/lib/libhashlib.tex
index 62e3fc4..17f5179 100644
--- a/Doc/lib/libhashlib.tex
+++ b/Doc/lib/libhashlib.tex
@@ -86,8 +86,8 @@ arguments: \code{m.update(a); m.update(b)} is equivalent to
\begin{methoddesc}[hash]{digest}{}
Return the digest of the strings passed to the \method{update()}
-method so far. This is a 16-byte string which may contain
-non-\ASCII{} characters, including null bytes.
+method so far. This is a string of \member{digest_size} bytes which may
+contain non-\ASCII{} characters, including null bytes.
\end{methoddesc}
\begin{methoddesc}[hash]{hexdigest}{}
diff --git a/Doc/lib/libheapq.tex b/Doc/lib/libheapq.tex
index eaf7051..5f3d8c5 100644
--- a/Doc/lib/libheapq.tex
+++ b/Doc/lib/libheapq.tex
@@ -76,14 +76,14 @@ Example of use:
>>> for item in data:
... heappush(heap, item)
...
->>> sorted = []
+>>> ordered = []
>>> while heap:
-... sorted.append(heappop(heap))
+... ordered.append(heappop(heap))
...
->>> print sorted
+>>> print ordered
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> data.sort()
->>> print data == sorted
+>>> print data == ordered
True
>>>
\end{verbatim}
diff --git a/Doc/lib/libhttplib.tex b/Doc/lib/libhttplib.tex
index 049f6c4..557ee3d 100644
--- a/Doc/lib/libhttplib.tex
+++ b/Doc/lib/libhttplib.tex
@@ -304,9 +304,14 @@ Example: \code{httplib.responses[httplib.NOT_FOUND]} is \code{'Not Found'}.
This will send a request to the server using the HTTP request method
\var{method} and the selector \var{url}. If the \var{body} argument is
present, it should be a string of data to send after the headers are finished.
+Alternatively, it may be an open file object, in which case the
+contents of the file is sent; this file object should support
+\code{fileno()} and \code{read()} methods.
The header Content-Length is automatically set to the correct value.
The \var{headers} argument should be a mapping of extra HTTP headers to send
with the request.
+
+\versionchanged[\var{body} can be a file object]{2.6}
\end{methoddesc}
\begin{methoddesc}{getresponse}{}
diff --git a/Doc/lib/libimp.tex b/Doc/lib/libimp.tex
index 598d351..5379309 100644
--- a/Doc/lib/libimp.tex
+++ b/Doc/lib/libimp.tex
@@ -161,10 +161,10 @@ Unused.
\begin{funcdesc}{init_builtin}{name}
Initialize the built-in module called \var{name} and return its module
-object. If the module was already initialized, it will be initialized
-\emph{again}. A few modules cannot be initialized twice --- attempting
-to initialize these again will raise an \exception{ImportError}
-exception. If there is no
+object along with storing it in \code{sys.modules}. If the module was already
+initialized, it will be initialized \emph{again}. Re-initialization involves
+the copying of the built-in module's \code{__dict__} from the cached
+module over the module's entry in \code{sys.modules}. If there is no
built-in module called \var{name}, \code{None} is returned.
\end{funcdesc}
@@ -208,14 +208,15 @@ user-defined class emulating a file.
\begin{funcdesc}{load_dynamic}{name, pathname\optional{, file}}
Load and initialize a module implemented as a dynamically loadable
shared library and return its module object. If the module was
-already initialized, it will be initialized \emph{again}. Some modules
-don't like that and may raise an exception. The \var{pathname}
-argument must point to the shared library. The \var{name} argument is
-used to construct the name of the initialization function: an external
-C function called \samp{init\var{name}()} in the shared library is
-called. The optional \var{file} argument is ignored. (Note: using
-shared libraries is highly system dependent, and not all systems
-support it.)
+already initialized, it will be initialized \emph{again}.
+Re-initialization involves copying the \code{__dict__} attribute of the cached
+instance of the module over the value used in the module cached in
+\code{sys.modules}. The \var{pathname} argument must point to the shared
+library. The \var{name} argument is used to construct the name of the
+initialization function: an external C function called
+\samp{init\var{name}()} in the shared library is called. The optional
+\var{file} argument is ignored. (Note: using shared libraries is highly
+system dependent, and not all systems support it.)
\end{funcdesc}
\begin{funcdesc}{load_source}{name, pathname\optional{, file}}
diff --git a/Doc/lib/libitertools.tex b/Doc/lib/libitertools.tex
index 20bbc8d..59fbd98 100644
--- a/Doc/lib/libitertools.tex
+++ b/Doc/lib/libitertools.tex
@@ -474,8 +474,8 @@ def iteritems(mapping):
return izip(mapping.iterkeys(), mapping.itervalues())
def nth(iterable, n):
- "Returns the nth item"
- return list(islice(iterable, n, n+1))
+ "Returns the nth item or raise IndexError"
+ return list(islice(iterable, n, n+1))[0]
def all(seq, pred=None):
"Returns True if pred(x) is true for every element in the iterable"
diff --git a/Doc/lib/liblogging.tex b/Doc/lib/liblogging.tex
index cc44294..e01fe0b 100644
--- a/Doc/lib/liblogging.tex
+++ b/Doc/lib/liblogging.tex
@@ -528,8 +528,8 @@ as those created locally. Logger-level filtering is applied using
\method{filter()}.
\end{methoddesc}
-\begin{methoddesc}{makeRecord}{name, lvl, fn, lno, msg, args, exc_info,
- func, extra}
+\begin{methoddesc}{makeRecord}{name, lvl, fn, lno, msg, args, exc_info
+ \optional{, func, extra}}
This is a factory method which can be overridden in subclasses to create
specialized \class{LogRecord} instances.
\versionchanged[\var{func} and \var{extra} were added]{2.5}
@@ -1397,6 +1397,9 @@ Currently, the useful mapping keys in a \class{LogRecord} are:
(if available).}
\lineii{\%(created)f} {Time when the \class{LogRecord} was created (as
returned by \function{time.time()}).}
+\lineii{\%(relativeCreated)d} {Time in milliseconds when the LogRecord was
+ created, relative to the time the logging module was
+ loaded.}
\lineii{\%(asctime)s} {Human-readable time when the \class{LogRecord}
was created. By default this is of the form
``2003-07-08 16:49:45,896'' (the numbers after the
@@ -1479,7 +1482,7 @@ source line where the logging call was made, and any exception
information to be logged.
\begin{classdesc}{LogRecord}{name, lvl, pathname, lineno, msg, args,
- exc_info}
+ exc_info \optional{, func}}
Returns an instance of \class{LogRecord} initialized with interesting
information. The \var{name} is the logger name; \var{lvl} is the
numeric level; \var{pathname} is the absolute pathname of the source
@@ -1489,7 +1492,9 @@ user-supplied message (a format string); \var{args} is the tuple
which, together with \var{msg}, makes up the user message; and
\var{exc_info} is the exception tuple obtained by calling
\function{sys.exc_info() }(or \constant{None}, if no exception information
-is available).
+is available). The \var{func} is the name of the function from which the
+logging call was made. If not specified, it defaults to \var{None}.
+\versionchanged[\var{func} was added]{2.5}
\end{classdesc}
\begin{methoddesc}{getMessage}{}
diff --git a/Doc/lib/libmsilib.tex b/Doc/lib/libmsilib.tex
index 1e044f4..13d5556 100644
--- a/Doc/lib/libmsilib.tex
+++ b/Doc/lib/libmsilib.tex
@@ -344,8 +344,8 @@ the string inside the exception will contain more detail.
\subsection{Features\label{features}}
\begin{classdesc}{Feature}{database, id, title, desc, display\optional{,
- level=1\optional{, parent\optional\{, directory\optional{,
- attributes=0}}}}
+ level=1\optional{, parent\optional{, directory\optional{,
+ attributes=0}}}}}
Add a new record to the \code{Feature} table, using the values
\var{id}, \var{parent.id}, \var{title}, \var{desc}, \var{display},
diff --git a/Doc/lib/libos.tex b/Doc/lib/libos.tex
index 9ded3ae..355d8fa 100644
--- a/Doc/lib/libos.tex
+++ b/Doc/lib/libos.tex
@@ -361,6 +361,10 @@ object, except that when the exit status is zero (termination without
errors), \code{None} is returned.
Availability: Macintosh, \UNIX, Windows.
+The \module{subprocess} module provides more powerful facilities for
+spawning new processes and retrieving their results; using that module
+is preferable to using this function.
+
\versionchanged[This function worked unreliably under Windows in
earlier versions of Python. This was due to the use of the
\cfunction{_popen()} function from the libraries provided with
@@ -375,8 +379,13 @@ deleted once there are no file descriptors for the file.
Availability: Macintosh, \UNIX, Windows.
\end{funcdesc}
+There are a number of different \function{popen*()} functions that
+provide slightly different ways to create subprocesses. Note that the
+\module{subprocess} module is easier to use and more powerful;
+consider using that module before writing code using the
+lower-level \function{popen*()} functions.
-For each of the following \function{popen()} variants, if \var{bufsize} is
+For each of the \function{popen*()} variants, if \var{bufsize} is
specified, it specifies the buffer size for the I/O pipes.
\var{mode}, if provided, should be the string \code{'b'} or
\code{'t'}; on Windows this is needed to determine whether the file
@@ -920,6 +929,8 @@ Return a string representing the path to which the symbolic link
points. The result may be either an absolute or relative pathname; if
it is relative, it may be converted to an absolute pathname using
\code{os.path.join(os.path.dirname(\var{path}), \var{result})}.
+\versionchanged [If the \var{path} is a Unicode object the result will also
+be a Unicode object]{2.6}
Availability: Macintosh, \UNIX.
\end{funcdesc}
@@ -1545,7 +1556,13 @@ functions are described in section \ref{os-newstreams}.
\funcline{spawnve}{mode, path, args, env}
\funcline{spawnvp}{mode, file, args}
\funcline{spawnvpe}{mode, file, args, env}
-Execute the program \var{path} in a new process. If \var{mode} is
+Execute the program \var{path} in a new process.
+
+(Note that the \module{subprocess} module provides more powerful
+facilities for spawning new processes and retrieving their results;
+using that module is preferable to using these functions.)
+
+If \var{mode} is
\constant{P_NOWAIT}, this function returns the process ID of the new
process; if \var{mode} is \constant{P_WAIT}, returns the process's
exit code if it exits normally, or \code{-\var{signal}}, where
@@ -1682,6 +1699,10 @@ and XP) this is the exit status of the command run; on systems using
a non-native shell, consult your shell documentation.
Availability: Macintosh, \UNIX, Windows.
+
+The \module{subprocess} module provides more powerful facilities for
+spawning new processes and retrieving their results; using that module
+is preferable to using this function.
\end{funcdesc}
\begin{funcdesc}{times}{}
diff --git a/Doc/lib/libpickle.tex b/Doc/lib/libpickle.tex
index a8ab39e..3290641 100644
--- a/Doc/lib/libpickle.tex
+++ b/Doc/lib/libpickle.tex
@@ -519,7 +519,7 @@ as their value. The semantics of each element are:
version of the object. The next element of the tuple will provide
arguments for this callable, and later elements provide additional
state information that will subsequently be used to fully reconstruct
-the pickled date.
+the pickled data.
In the unpickling environment this object must be either a class, a
callable registered as a ``safe constructor'' (see below), or it must
diff --git a/Doc/lib/libpopen2.tex b/Doc/lib/libpopen2.tex
index 985f580..fa0c1a6 100644
--- a/Doc/lib/libpopen2.tex
+++ b/Doc/lib/libpopen2.tex
@@ -11,10 +11,10 @@ This module allows you to spawn processes and connect to their
input/output/error pipes and obtain their return codes under
\UNIX{} and Windows.
-Note that starting with Python 2.0, this functionality is available
-using functions from the \refmodule{os} module which have the same
-names as the factory functions here, but the order of the return
-values is more intuitive in the \refmodule{os} module variants.
+The \module{subprocess} module provides more powerful facilities for
+spawning new processes and retrieving their results. Using the
+\module{subprocess} module is preferable to using the \module{popen2}
+module.
The primary interface offered by this module is a trio of factory
functions. For each of these, if \var{bufsize} is specified,
@@ -184,3 +184,7 @@ integrate I/O over pipes with their \function{select()} loops, or use
separate threads to read each of the individual files provided by
whichever \function{popen*()} function or \class{Popen*} class was
used.
+
+\begin{seealso}
+ \seemodule{subprocess}{Module for spawning and managing subprocesses.}
+\end{seealso}
diff --git a/Doc/lib/libpyexpat.tex b/Doc/lib/libpyexpat.tex
index 83581ec..a0ea8a1 100644
--- a/Doc/lib/libpyexpat.tex
+++ b/Doc/lib/libpyexpat.tex
@@ -216,9 +216,10 @@ any time.
\begin{memberdesc}[xmlparser]{returns_unicode}
If this attribute is set to a non-zero integer, the handler functions
-will be passed Unicode strings. If \member{returns_unicode} is 0,
-8-bit strings containing UTF-8 encoded data will be passed to the
-handlers.
+will be passed Unicode strings. If \member{returns_unicode} is
+\constant{False}, 8-bit strings containing UTF-8 encoded data will be
+passed to the handlers. This is \constant{True} by default when
+Python is built with Unicode support.
\versionchanged[Can be changed at any time to affect the result
type]{1.6}
\end{memberdesc}
diff --git a/Doc/lib/libsmtplib.tex b/Doc/lib/libsmtplib.tex
index ddf1764..962383f 100644
--- a/Doc/lib/libsmtplib.tex
+++ b/Doc/lib/libsmtplib.tex
@@ -28,6 +28,18 @@ For normal use, you should only require the initialization/connect,
included below.
\end{classdesc}
+\begin{classdesc}{SMTP_SSL}{\optional{host\optional{, port\optional{,
+ local_hostname\optional{,
+ keyfile\optional{,
+ certfile}}}}}}
+A \class{SMTP_SSL} instance behaves exactly the same as instances of \class{SMTP}.
+\class{SMTP_SSL} should be used for situations where SSL is required from
+the beginning of the connection and using \method{starttls()} is not appropriate.
+If \var{host} is not specified, the local host is used. If \var{port} is
+omitted, the standard SMTP-over-SSL port (465) is used. \var{keyfile} and \var{certfile}
+are also optional, and can contain a PEM formatted private key and
+certificate chain file for the SSL connection.
+\end{classdesc}
A nice selection of exceptions is defined as well:
diff --git a/Doc/lib/libsocket.tex b/Doc/lib/libsocket.tex
index aa75ec9..f510fd4 100644
--- a/Doc/lib/libsocket.tex
+++ b/Doc/lib/libsocket.tex
@@ -241,8 +241,8 @@ If you want to know the current machine's IP address, you may want to use
This operation assumes that there is a valid address-to-host mapping for
the host, and the assumption does not always hold.
Note: \function{gethostname()} doesn't always return the fully qualified
-domain name; use \code{gethostbyaddr(gethostname())}
-(see below).
+domain name; use \code{getfqdn()}
+(see above).
\end{funcdesc}
\begin{funcdesc}{gethostbyaddr}{ip_address}
@@ -712,14 +712,15 @@ read until EOF. The return value is a string of the bytes read.
\end{methoddesc}
\begin{methoddesc}{server}{}
-Returns a string containing the ASN.1 distinguished name identifying the
-server's certificate. (See below for an example
-showing what distinguished names look like.)
+Returns a string describing the server's certificate.
+Useful for debugging purposes; do not parse the content of this string
+because its format can't be parsed unambiguously.
\end{methoddesc}
\begin{methoddesc}{issuer}{}
-Returns a string containing the ASN.1 distinguished name identifying the
-issuer of the server's certificate.
+Returns a string describing the issuer of the server's certificate.
+Useful for debugging purposes; do not parse the content of this string
+because its format can't be parsed unambiguously.
\end{methoddesc}
\subsection{Example \label{socket-example}}
diff --git a/Doc/lib/libsqlite3.tex b/Doc/lib/libsqlite3.tex
index d87e064..82416fa 100644
--- a/Doc/lib/libsqlite3.tex
+++ b/Doc/lib/libsqlite3.tex
@@ -6,14 +6,16 @@
\sectionauthor{Gerhard Häring}{gh@ghaering.de}
\versionadded{2.5}
-SQLite is a C library that provides a SQL-language database that
-stores data in disk files without requiring a separate server process.
+SQLite is a C library that provides a lightweight disk-based database
+that doesn't require a separate server process and allows accessing
+the database using a nonstandard variant of the SQL query language.
+Some applications can use SQLite for internal data storage. It's also
+possible to prototype an application using SQLite and then port the
+code to a larger database such as PostgreSQL or Oracle.
+
pysqlite was written by Gerhard H\"aring and provides a SQL interface
compliant with the DB-API 2.0 specification described by
-\pep{249}. This means that it should be possible to write the first
-version of your applications using SQLite for data storage. If
-switching to a larger database such as PostgreSQL or Oracle is
-later necessary, the switch should be relatively easy.
+\pep{249}.
To use the module, you must first create a \class{Connection} object
that represents the database. Here the data will be stored in the
@@ -34,8 +36,8 @@ c = conn.cursor()
# Create table
c.execute('''create table stocks
-(date timestamp, trans varchar, symbol varchar,
- qty decimal, price decimal)''')
+(date text, trans text, symbol text,
+ qty real, price real)''')
# Insert a row of data
c.execute("""insert into stocks
@@ -144,11 +146,11 @@ committed. The \var{timeout} parameter specifies how long the connection should
wait for the lock to go away until raising an exception. The default for the
timeout parameter is 5.0 (five seconds).
-For the \var{isolation_level} parameter, please see \member{isolation_level}
-\ref{sqlite3-Connection-IsolationLevel} property of \class{Connection} objects.
+For the \var{isolation_level} parameter, please see the \member{isolation_level}
+property of \class{Connection} objects in section~\ref{sqlite3-Connection-IsolationLevel}.
SQLite natively supports only the types TEXT, INTEGER, FLOAT, BLOB and NULL. If
-you want to use other types, like you have to add support for them yourself.
+you want to use other types you must add support for them yourself.
The \var{detect_types} parameter and the using custom \strong{converters} registered with
the module-level \function{register_converter} function allow you to easily do that.
@@ -195,7 +197,7 @@ This can be used to build a shell for SQLite, like in the following example:
\verbatiminput{sqlite3/complete_statement.py}
\end{funcdesc}
-\begin{funcdesc}{}enable_callback_tracebacks{flag}
+\begin{funcdesc}{enable_callback_tracebacks}{flag}
By default you will not get any tracebacks in user-defined functions,
aggregates, converters, authorizer callbacks etc. If you want to debug them,
you can call this function with \var{flag} as True. Afterwards, you will get
@@ -210,13 +212,14 @@ A \class{Connection} instance has the following attributes and methods:
\label{sqlite3-Connection-IsolationLevel}
\begin{memberdesc}{isolation_level}
Get or set the current isolation level. None for autocommit mode or one of
- "DEFERRED", "IMMEDIATE" or "EXLUSIVE". See Controlling Transactions
- \ref{sqlite3-Controlling-Transactions} for a more detailed explanation.
+ "DEFERRED", "IMMEDIATE" or "EXLUSIVE". See ``Controlling Transactions'',
+ section~\ref{sqlite3-Controlling-Transactions}, for a more detailed explanation.
\end{memberdesc}
\begin{methoddesc}{cursor}{\optional{cursorClass}}
The cursor method accepts a single optional parameter \var{cursorClass}.
- This is a custom cursor class which must extend \class{sqlite3.Cursor}.
+ If supplied, this must be a custom cursor class that extends
+ \class{sqlite3.Cursor}.
\end{methoddesc}
\begin{methoddesc}{execute}{sql, \optional{parameters}}
@@ -242,7 +245,7 @@ parameters given.
Creates a user-defined function that you can later use from within SQL
statements under the function name \var{name}. \var{num_params} is the number
of parameters the function accepts, and \var{func} is a Python callable that is
-called as SQL function.
+called as the SQL function.
The function can return any of the types supported by SQLite: unicode, str,
int, long, float, buffer and None.
@@ -272,7 +275,7 @@ Example:
Creates a collation with the specified \var{name} and \var{callable}. The
callable will be passed two string arguments. It should return -1 if the first
-is ordered lower than the second, 0 if they are ordered equal and 1 and if the
+is ordered lower than the second, 0 if they are ordered equal and 1 if the
first is ordered higher than the second. Note that this controls sorting
(ORDER BY in SQL) so your comparisons don't affect other SQL operations.
@@ -321,20 +324,21 @@ module.
\begin{memberdesc}{row_factory}
You can change this attribute to a callable that accepts the cursor and
- the original row as tuple and will return the real result row. This
- way, you can implement more advanced ways of returning results, like
- ones that can also access columns by name.
+ the original row as a tuple and will return the real result row. This
+ way, you can implement more advanced ways of returning results, such
+ as returning an object that can also access columns by name.
Example:
\verbatiminput{sqlite3/row_factory.py}
- If the standard tuple types don't suffice for you, and you want name-based
+ If returning a tuple doesn't suffice and you want name-based
access to columns, you should consider setting \member{row_factory} to the
- highly-optimized sqlite3.Row type. It provides both
+ highly-optimized \class{sqlite3.Row} type. \class{Row} provides both
index-based and case-insensitive name-based access to columns with almost
- no memory overhead. Much better than your own custom dictionary-based
- approach or even a db_row based solution.
+ no memory overhead. It will probably be better than your own custom
+ dictionary-based approach or even a db_row based solution.
+ % XXX what's a db_row-based solution?
\end{memberdesc}
\begin{memberdesc}{text_factory}
@@ -348,7 +352,7 @@ module.
attribute to \constant{sqlite3.OptimizedUnicode}.
You can also set it to any other callable that accepts a single bytestring
- parameter and returns the result object.
+ parameter and returns the resulting object.
See the following example code for illustration:
@@ -356,7 +360,7 @@ module.
\end{memberdesc}
\begin{memberdesc}{total_changes}
- Returns the total number of database rows that have be modified, inserted,
+ Returns the total number of database rows that have been modified, inserted,
or deleted since the database connection was opened.
\end{memberdesc}
@@ -383,9 +387,9 @@ This example shows how to use the named style:
\verbatiminput{sqlite3/execute_2.py}
- \method{execute} will only execute a single SQL statement. If you try to
+ \method{execute()} will only execute a single SQL statement. If you try to
execute more than one statement with it, it will raise a Warning. Use
- \method{executescript} if want to execute multiple SQL statements with one
+ \method{executescript()} if you want to execute multiple SQL statements with one
call.
\end{methoddesc}
@@ -393,7 +397,7 @@ This example shows how to use the named style:
\begin{methoddesc}{executemany}{sql, seq_of_parameters}
Executes a SQL command against all parameter sequences or mappings found in the
sequence \var{sql}. The \module{sqlite3} module also allows
-to use an iterator yielding parameters instead of a sequence.
+using an iterator yielding parameters instead of a sequence.
\verbatiminput{sqlite3/executemany_1.py}
@@ -405,7 +409,7 @@ Here's a shorter example using a generator:
\begin{methoddesc}{executescript}{sql_script}
This is a nonstandard convenience method for executing multiple SQL statements
-at once. It issues a COMMIT statement before, then executes the SQL script it
+at once. It issues a COMMIT statement first, then executes the SQL script it
gets as a parameter.
\var{sql_script} can be a bytestring or a Unicode string.
@@ -462,20 +466,19 @@ This is how SQLite types are converted to Python types by default:
\lineii{BLOB}{buffer}
\end{tableii}
-The type system of the \module{sqlite3} module is extensible in both ways: you can store
+The type system of the \module{sqlite3} module is extensible in two ways: you can store
additional Python types in a SQLite database via object adaptation, and you can
let the \module{sqlite3} module convert SQLite types to different Python types via
converters.
\subsubsection{Using adapters to store additional Python types in SQLite databases}
-Like described before, SQLite supports only a limited set of types natively. To
+As described before, SQLite supports only a limited set of types natively. To
use other Python types with SQLite, you must \strong{adapt} them to one of the sqlite3
-module's supported types for SQLite. So, one of NoneType, int, long, float,
+module's supported types for SQLite: one of NoneType, int, long, float,
str, unicode, buffer.
-The \module{sqlite3} module uses the Python object adaptation, like described in PEP 246
-for this. The protocol to use is \class{PrepareProtocol}.
+The \module{sqlite3} module uses Python object adaptation, as described in \pep{246} for this. The protocol to use is \class{PrepareProtocol}.
There are two ways to enable the \module{sqlite3} module to adapt a custom Python type
to one of the supported ones.
@@ -491,8 +494,8 @@ class Point(object):
self.x, self.y = x, y
\end{verbatim}
-Now you want to store the point in a single SQLite column. You'll have to
-choose one of the supported types first that you use to represent the point in.
+Now you want to store the point in a single SQLite column. First you'll have to
+choose one of the supported types first to be used for representing the point.
Let's just use str and separate the coordinates using a semicolon. Then you
need to give your class a method \code{__conform__(self, protocol)} which must
return the converted value. The parameter \var{protocol} will be
@@ -505,13 +508,13 @@ return the converted value. The parameter \var{protocol} will be
The other possibility is to create a function that converts the type to the
string representation and register the function with \method{register_adapter}.
- \verbatiminput{sqlite3/adapter_point_2.py}
-
\begin{notice}
The type/class to adapt must be a new-style class, i. e. it must have
\class{object} as one of its bases.
\end{notice}
+ \verbatiminput{sqlite3/adapter_point_2.py}
+
The \module{sqlite3} module has two default adapters for Python's built-in
\class{datetime.date} and \class{datetime.datetime} types. Now let's suppose
we want to store \class{datetime.datetime} objects not in ISO representation,
@@ -521,16 +524,17 @@ but as a \UNIX{} timestamp.
\subsubsection{Converting SQLite values to custom Python types}
-Now that's all nice and dandy that you can send custom Python types to SQLite.
+Writing an adapter lets you send custom Python types to SQLite.
But to make it really useful we need to make the Python to SQLite to Python
-roundtrip work.
+roundtrip work.
Enter converters.
-Let's go back to the Point class. We stored the x and y coordinates separated
-via semicolons as strings in SQLite.
+Let's go back to the \class{Point} class. We stored the x and y
+coordinates separated via semicolons as strings in SQLite.
-Let's first define a converter function that accepts the string as a parameter and constructs a Point object from it.
+First, we'll define a converter function that accepts the string as a
+parameter and constructs a \class{Point} object from it.
\begin{notice}
Converter functions \strong{always} get called with a string, no matter
@@ -556,11 +560,12 @@ database is actually a point. There are two ways of doing this:
\item Explicitly via the column name
\end{itemize}
-Both ways are described at \ref{sqlite3-Module-Contents} in the text explaining
-the constants \constant{PARSE_DECLTYPES} and \constant{PARSE_COlNAMES}.
+Both ways are described in ``Module Constants'', section~\ref{sqlite3-Module-Contents}, in
+the entries for the constants \constant{PARSE_DECLTYPES} and
+\constant{PARSE_COLNAMES}.
-The following example illustrates both ways.
+The following example illustrates both approaches.
\verbatiminput{sqlite3/converter_point.py}
@@ -569,8 +574,8 @@ The following example illustrates both ways.
There are default adapters for the date and datetime types in the datetime
module. They will be sent as ISO dates/ISO timestamps to SQLite.
-The default converters are registered under the name "date" for datetime.date
-and under the name "timestamp" for datetime.datetime.
+The default converters are registered under the name "date" for \class{datetime.date}
+and under the name "timestamp" for \class{datetime.datetime}.
This way, you can use date/timestamps from Python without any additional
fiddling in most cases. The format of the adapters is also compatible with the
@@ -582,12 +587,12 @@ The following example demonstrates this.
\subsection{Controlling Transactions \label{sqlite3-Controlling-Transactions}}
-By default, the \module{sqlite3} module opens transactions implicitly before a DML
-statement (INSERT/UPDATE/DELETE/REPLACE), and commits transactions implicitly
-before a non-DML, non-DQL statement (i. e. anything other than
+By default, the \module{sqlite3} module opens transactions implicitly before a Data Modification Language (DML)
+statement (i.e. INSERT/UPDATE/DELETE/REPLACE), and commits transactions implicitly
+before a non-DML, non-query statement (i. e. anything other than
SELECT/INSERT/UPDATE/DELETE/REPLACE).
-So if you are within a transaction, and issue a command like \code{CREATE TABLE
+So if you are within a transaction and issue a command like \code{CREATE TABLE
...}, \code{VACUUM}, \code{PRAGMA}, the \module{sqlite3} module will commit implicitly
before executing that command. There are two reasons for doing that. The first
is that some of these commands don't work within transactions. The other reason
@@ -616,17 +621,17 @@ the connection yourself.
Using the nonstandard \method{execute}, \method{executemany} and
\method{executescript} methods of the \class{Connection} object, your code can
-be written more concisely, because you don't have to create the - often
-superfluous \class{Cursor} objects explicitly. Instead, the \class{Cursor}
+be written more concisely because you don't have to create the (often
+superfluous) \class{Cursor} objects explicitly. Instead, the \class{Cursor}
objects are created implicitly and these shortcut methods return the cursor
-objects. This way, you can for example execute a SELECT statement and iterate
+objects. This way, you can execute a SELECT statement and iterate
over it directly using only a single call on the \class{Connection} object.
\verbatiminput{sqlite3/shortcut_methods.py}
\subsubsection{Accessing columns by name instead of by index}
-One cool feature of the \module{sqlite3} module is the builtin \class{sqlite3.Row} class
+One useful feature of the \module{sqlite3} module is the builtin \class{sqlite3.Row} class
designed to be used as a row factory.
Rows wrapped with this class can be accessed both by index (like tuples) and
diff --git a/Doc/lib/libstdtypes.tex b/Doc/lib/libstdtypes.tex
index 17e377b..b433bc4 100644
--- a/Doc/lib/libstdtypes.tex
+++ b/Doc/lib/libstdtypes.tex
@@ -759,8 +759,8 @@ The original string is returned if
Split the string at the last occurrence of \var{sep}, and return
a 3-tuple containing the part before the separator, the separator
itself, and the part after the separator. If the separator is not
-found, return a 3-tuple containing the string itself, followed by
-two empty strings.
+found, return a 3-tuple containing two empty strings, followed by
+the string itself.
\versionadded{2.5}
\end{methoddesc}
@@ -822,7 +822,7 @@ boundaries. Line breaks are not included in the resulting list unless
start\optional{, end}}}
Return \code{True} if string starts with the \var{prefix}, otherwise
return \code{False}. \var{prefix} can also be a tuple of
-suffixes to look for. With optional \var{start}, test string beginning at
+prefixes to look for. With optional \var{start}, test string beginning at
that position. With optional \var{end}, stop comparing string at that
position.
@@ -864,6 +864,9 @@ optional argument \var{deletechars} are removed, and the remaining
characters have been mapped through the given translation table, which
must be a string of length 256.
+You can use the \function{maketrans()} helper function in the
+\refmodule{string} module to create a translation table.
+
For Unicode objects, the \method{translate()} method does not
accept the optional \var{deletechars} argument. Instead, it
returns a copy of the \var{s} where all characters have been mapped
@@ -1398,21 +1401,22 @@ arbitrary objects):
{(1)}
\lineiii{\var{a}.clear()}{remove all items from \code{a}}{}
\lineiii{\var{a}.copy()}{a (shallow) copy of \code{a}}{}
- \lineiii{\var{a}.has_key(\var{k})}
+ \lineiii{\var{k} in \var{a}}
{\code{True} if \var{a} has a key \var{k}, else \code{False}}
- {}
- \lineiii{\var{k} \code{in} \var{a}}
- {Equivalent to \var{a}.has_key(\var{k})}
{(2)}
\lineiii{\var{k} not in \var{a}}
- {Equivalent to \code{not} \var{a}.has_key(\var{k})}
+ {Equivalent to \code{not} \var{k} in \var{a}}
{(2)}
+ \lineiii{\var{a}.has_key(\var{k})}
+ {Equivalent to \var{k} \code{in} \var{a}, use that form in new code}
+ {}
\lineiii{\var{a}.items()}
{a copy of \var{a}'s list of (\var{key}, \var{value}) pairs}
{(3)}
\lineiii{\var{a}.keys()}{a copy of \var{a}'s list of keys}{(3)}
\lineiii{\var{a}.update(\optional{\var{b}})}
- {updates (and overwrites) key/value pairs from \var{b}}
+ {updates \var{a} with key/value pairs from \var{b}, overwriting
+ existing keys, returns \code{None}}
{(9)}
\lineiii{\var{a}.fromkeys(\var{seq}\optional{, \var{value}})}
{Creates a new dictionary with keys from \var{seq} and values set to \var{value}}
@@ -1670,6 +1674,7 @@ flush the read-ahead buffer.
behavior.
Note that not all file objects are seekable.
+ \versionchanged{Passing float values as offset has been deprecated}[2.6]
\end{methoddesc}
\begin{methoddesc}[file]{tell}{}
diff --git a/Doc/lib/libstring.tex b/Doc/lib/libstring.tex
index 1828b2e..bc1649f 100644
--- a/Doc/lib/libstring.tex
+++ b/Doc/lib/libstring.tex
@@ -220,7 +220,7 @@ objects. They are not available as string methods.
\begin{funcdesc}{maketrans}{from, to}
Return a translation table suitable for passing to
- \function{translate()} or \function{regex.compile()}, that will map
+ \function{translate()}, that will map
each character in \var{from} into the character at the same position
in \var{to}; \var{from} and \var{to} must have the same length.
diff --git a/Doc/lib/libsubprocess.tex b/Doc/lib/libsubprocess.tex
index 03072f7..f639710 100644
--- a/Doc/lib/libsubprocess.tex
+++ b/Doc/lib/libsubprocess.tex
@@ -12,9 +12,6 @@ connect to their input/output/error pipes, and obtain their return
codes. This module intends to replace several other, older modules
and functions, such as:
-% XXX Should add pointers to this module to at least the popen2
-% and commands sections.
-
\begin{verbatim}
os.system
os.spawn*
diff --git a/Doc/lib/libtempfile.tex b/Doc/lib/libtempfile.tex
index 9da8663..9b4d848 100644
--- a/Doc/lib/libtempfile.tex
+++ b/Doc/lib/libtempfile.tex
@@ -86,7 +86,12 @@ If \var{prefix} is specified, the file name will begin with that
prefix; otherwise, a default prefix is used.
If \var{dir} is specified, the file will be created in that directory;
-otherwise, a default directory is used.
+otherwise, a default directory is used. The default directory is chosen
+from a platform-dependent list, but the user of the application can control
+the directory location by setting the \var{TMPDIR}, \var{TEMP} or \var{TMP}
+environment variables. There is thus no guarantee that the generated
+filename will have any nice properties, such as not requiring quoting when
+passed to external commands via \code{os.popen()}.
If \var{text} is specified, it indicates whether to open the file in
binary mode (the default) or text mode. On some platforms, this makes
diff --git a/Doc/lib/libunittest.tex b/Doc/lib/libunittest.tex
index f40493d..350abae 100644
--- a/Doc/lib/libunittest.tex
+++ b/Doc/lib/libunittest.tex
@@ -212,8 +212,8 @@ import unittest
class DefaultWidgetSizeTestCase(unittest.TestCase):
def runTest(self):
- widget = Widget("The widget")
- self.failUnless(widget.size() == (50,50), 'incorrect default size')
+ widget = Widget('The widget')
+ self.assertEqual(widget.size(), (50, 50), 'incorrect default size')
\end{verbatim}
Note that in order to test something, we use the one of the
@@ -247,7 +247,7 @@ import unittest
class SimpleWidgetTestCase(unittest.TestCase):
def setUp(self):
- self.widget = Widget("The widget")
+ self.widget = Widget('The widget')
class DefaultWidgetSizeTestCase(SimpleWidgetTestCase):
def runTest(self):
@@ -273,7 +273,7 @@ import unittest
class SimpleWidgetTestCase(unittest.TestCase):
def setUp(self):
- self.widget = Widget("The widget")
+ self.widget = Widget('The widget')
def tearDown(self):
self.widget.dispose()
@@ -298,7 +298,7 @@ import unittest
class WidgetTestCase(unittest.TestCase):
def setUp(self):
- self.widget = Widget("The widget")
+ self.widget = Widget('The widget')
def tearDown(self):
self.widget.dispose()
@@ -322,8 +322,8 @@ instance we must specify the test method it is to run. We do this by
passing the method name in the constructor:
\begin{verbatim}
-defaultSizeTestCase = WidgetTestCase("testDefaultSize")
-resizeTestCase = WidgetTestCase("testResize")
+defaultSizeTestCase = WidgetTestCase('testDefaultSize')
+resizeTestCase = WidgetTestCase('testResize')
\end{verbatim}
Test case instances are grouped together according to the features
@@ -333,8 +333,8 @@ class:
\begin{verbatim}
widgetTestSuite = unittest.TestSuite()
-widgetTestSuite.addTest(WidgetTestCase("testDefaultSize"))
-widgetTestSuite.addTest(WidgetTestCase("testResize"))
+widgetTestSuite.addTest(WidgetTestCase('testDefaultSize'))
+widgetTestSuite.addTest(WidgetTestCase('testResize'))
\end{verbatim}
For the ease of running tests, as we will see later, it is a good
@@ -344,8 +344,8 @@ pre-built test suite:
\begin{verbatim}
def suite():
suite = unittest.TestSuite()
- suite.addTest(WidgetTestCase("testDefaultSize"))
- suite.addTest(WidgetTestCase("testResize"))
+ suite.addTest(WidgetTestCase('testDefaultSize'))
+ suite.addTest(WidgetTestCase('testResize'))
return suite
\end{verbatim}
@@ -353,7 +353,7 @@ or even:
\begin{verbatim}
def suite():
- tests = ["testDefaultSize", "testResize"]
+ tests = ['testDefaultSize', 'testResize']
return unittest.TestSuite(map(WidgetTestCase, tests))
\end{verbatim}
@@ -462,7 +462,7 @@ easier.}
\subsection{Classes and functions
\label{unittest-contents}}
-\begin{classdesc}{TestCase}{}
+\begin{classdesc}{TestCase}{\optional{methodName}}
Instances of the \class{TestCase} class represent the smallest
testable units in the \module{unittest} universe. This class is
intended to be used as a base class, with specific tests being
@@ -470,6 +470,23 @@ easier.}
interface needed by the test runner to allow it to drive the
test, and methods that the test code can use to check for and
report various kinds of failure.
+
+ Each instance of \class{TestCase} will run a single test method:
+ the method named \var{methodName}. If you remember, we had an
+ earlier example that went something like this:
+
+ \begin{verbatim}
+ def suite():
+ suite = unittest.TestSuite()
+ suite.addTest(WidgetTestCase('testDefaultSize'))
+ suite.addTest(WidgetTestCase('testResize'))
+ return suite
+ \end{verbatim}
+
+ Here, we create two instances of \class{WidgetTestCase}, each of
+ which runs a single test.
+
+ \var{methodName} defaults to \code{'runTest'}.
\end{classdesc}
\begin{classdesc}{FunctionTestCase}{testFunc\optional{,
@@ -502,6 +519,11 @@ easier.}
subclass.
\end{classdesc}
+\begin{classdesc}{TestResult}{}
+ This class is used to compile information about which tests have succeeded
+ and which have failed.
+\end{classdesc}
+
\begin{datadesc}{defaultTestLoader}
Instance of the \class{TestLoader} class intended to be shared. If no
customization of the \class{TestLoader} is needed, this instance can
@@ -574,8 +596,9 @@ Methods in the first group (running the test) are:
\begin{methoddesc}[TestCase]{run}{\optional{result}}
Run the test, collecting the result into the test result object
passed as \var{result}. If \var{result} is omitted or \constant{None},
- a temporary result object is created and used, but is not made
- available to the caller.
+ a temporary result object is created (by calling the
+ \method{defaultTestCase()} method) and used; this result object is not
+ returned to \method{run()}'s caller.
The same effect may be had by simply calling the \class{TestCase}
instance.
@@ -684,8 +707,13 @@ information on the test:
\end{methoddesc}
\begin{methoddesc}[TestCase]{defaultTestResult}{}
- Return the default type of test result object to be used to run this
- test.
+ Return an instance of the test result class that should be used
+ for this test case class (if no other result instance is provided
+ to the \method{run()} method).
+
+ For \class{TestCase} instances, this will always be an instance of
+ \class{TestResult}; subclasses of \class{TestCase} should
+ override this as necessary.
\end{methoddesc}
\begin{methoddesc}[TestCase]{id}{}
@@ -761,26 +789,20 @@ access to the \class{TestResult} object generated by running a set of
tests for reporting purposes; a \class{TestResult} instance is
returned by the \method{TestRunner.run()} method for this purpose.
-Each instance holds the total number of tests run, and collections of
-failures and errors that occurred among those test runs. The
-collections contain tuples of \code{(\var{testcase},
-\var{traceback})}, where \var{traceback} is a string containing a
-formatted version of the traceback for the exception.
-
\class{TestResult} instances have the following attributes that will
be of interest when inspecting the results of running a set of tests:
\begin{memberdesc}[TestResult]{errors}
A list containing 2-tuples of \class{TestCase} instances and
- formatted tracebacks. Each tuple represents a test which raised an
- unexpected exception.
+ strings holding formatted tracebacks. Each tuple represents a test which
+ raised an unexpected exception.
\versionchanged[Contains formatted tracebacks instead of
\function{sys.exc_info()} results]{2.2}
\end{memberdesc}
\begin{memberdesc}[TestResult]{failures}
- A list containing 2-tuples of \class{TestCase} instances and
- formatted tracebacks. Each tuple represents a test where a failure
+ A list containing 2-tuples of \class{TestCase} instances and strings
+ holding formatted tracebacks. Each tuple represents a test where a failure
was explicitly signalled using the \method{TestCase.fail*()} or
\method{TestCase.assert*()} methods.
\versionchanged[Contains formatted tracebacks instead of
@@ -817,17 +839,25 @@ reporting while tests are being run.
\begin{methoddesc}[TestResult]{startTest}{test}
Called when the test case \var{test} is about to be run.
+
+ The default implementation simply increments the instance's
+ \code{testsRun} counter.
\end{methoddesc}
\begin{methoddesc}[TestResult]{stopTest}{test}
- Called when the test case \var{test} has been executed, regardless
+ Called after the test case \var{test} has been executed, regardless
of the outcome.
+
+ The default implementation does nothing.
\end{methoddesc}
\begin{methoddesc}[TestResult]{addError}{test, err}
Called when the test case \var{test} raises an unexpected exception
\var{err} is a tuple of the form returned by \function{sys.exc_info()}:
\code{(\var{type}, \var{value}, \var{traceback})}.
+
+ The default implementation appends \code{(\var{test}, \var{err})} to
+ the instance's \code{errors} attribute.
\end{methoddesc}
\begin{methoddesc}[TestResult]{addFailure}{test, err}
@@ -835,10 +865,15 @@ reporting while tests are being run.
\var{err} is a tuple of the form returned by
\function{sys.exc_info()}: \code{(\var{type}, \var{value},
\var{traceback})}.
+
+ The default implementation appends \code{(\var{test}, \var{err})} to
+ the instance's \code{failures} attribute.
\end{methoddesc}
\begin{methoddesc}[TestResult]{addSuccess}{test}
Called when the test case \var{test} succeeds.
+
+ The default implementation does nothing.
\end{methoddesc}
@@ -878,9 +913,12 @@ configurable properties.
Return a suite of all tests cases given a string specifier.
The specifier \var{name} is a ``dotted name'' that may resolve
- either to a module, a test case class, a \class{TestSuite} instance,
- a test method within a test case class, or a callable object which
- returns a \class{TestCase} or \class{TestSuite} instance.
+ either to a module, a test case class, a test method within a test
+ case class, a \class{TestSuite} instance, or a callable object which
+ returns a \class{TestCase} or \class{TestSuite} instance. These checks
+ are applied in the order listed here; that is, a method on a possible
+ test case class will be picked up as ``a test method within a test
+ case class'', rather than ``a callable object''.
For example, if you have a module \module{SampleTests} containing a
\class{TestCase}-derived class \class{SampleTestCase} with three test
@@ -905,7 +943,7 @@ configurable properties.
\begin{methoddesc}[TestLoader]{getTestCaseNames}{testCaseClass}
Return a sorted sequence of method names found within
- \var{testCaseClass}.
+ \var{testCaseClass}; this should be a subclass of \class{TestCase}.
\end{methoddesc}
diff --git a/Doc/lib/liburlparse.tex b/Doc/lib/liburlparse.tex
index f18efe9..8603605 100644
--- a/Doc/lib/liburlparse.tex
+++ b/Doc/lib/liburlparse.tex
@@ -142,7 +142,7 @@ a ? with an empty query; the RFC states that these are equivalent).
\begin{funcdesc}{urljoin}{base, url\optional{, allow_fragments}}
Construct a full (``absolute'') URL by combining a ``base URL''
-(\var{base}) with a ``relative URL'' (\var{url}). Informally, this
+(\var{base}) with another URL (\var{url}). Informally, this
uses components of the base URL, in particular the addressing scheme,
the network location and (part of) the path, to provide missing
components in the relative URL. For example:
@@ -155,6 +155,20 @@ components in the relative URL. For example:
The \var{allow_fragments} argument has the same meaning and default as
for \function{urlparse()}.
+
+\note{If \var{url} is an absolute URL (that is, starting with \code{//}
+ or \code{scheme://}, the \var{url}'s host name and/or scheme
+ will be present in the result. For example:}
+
+\begin{verbatim}
+>>> urljoin('http://www.cwi.nl/%7Eguido/Python.html',
+... '//www.python.org/%7Eguido')
+'http://www.python.org/%7Eguido'
+\end{verbatim}
+
+If you do not want that behavior, preprocess
+the \var{url} with \function{urlsplit()} and \function{urlunsplit()},
+removing possible \em{scheme} and \em{netloc} parts.
\end{funcdesc}
\begin{funcdesc}{urldefrag}{url}
diff --git a/Doc/lib/libuuid.tex b/Doc/lib/libuuid.tex
index a9d5295..5aa9d8c 100644
--- a/Doc/lib/libuuid.tex
+++ b/Doc/lib/libuuid.tex
@@ -18,20 +18,11 @@ may compromise privacy since it creates a UUID containing the computer's
network address. \function{uuid4()} creates a random UUID.
\begin{classdesc}{UUID}{\optional{hex\optional{, bytes\optional{,
-fields\optional{, int\optional{, version}}}}}}
-
-%Instances of the UUID class represent UUIDs as specified in RFC 4122.
-%UUID objects are immutable, hashable, and usable as dictionary keys.
-%Converting a UUID to a string with str() yields something in the form
-%'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
-%four possible forms: a similar string of hexadecimal digits, or a
-%string of 16 raw bytes as an argument named 'bytes', or a tuple of
-%six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
-%48-bit values respectively) as an argument named 'fields', or a single
-%128-bit integer as an argument named 'int'.
+bytes_le\optional{, fields\optional{, int\optional{, version}}}}}}}
Create a UUID from either a string of 32 hexadecimal digits,
-a string of 16 bytes as the \var{bytes} argument, a tuple of six
+a string of 16 bytes as the \var{bytes} argument, a string of 16 bytes
+in little-endian order as the \var{bytes_le} argument, a tuple of six
integers (32-bit \var{time_low}, 16-bit \var{time_mid},
16-bit \var{time_hi_version},
8-bit \var{clock_seq_hi_variant}, 8-bit \var{clock_seq_low}, 48-bit \var{node})
@@ -45,22 +36,31 @@ UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
+UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
+ '\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
\end{verbatim}
-Exactly one of \var{hex}, \var{bytes}, \var{fields}, or \var{int} must
+Exactly one of \var{hex}, \var{bytes}, \var{bytes_le}, \var{fields},
+or \var{int} must
be given. The \var{version} argument is optional; if given, the
resulting UUID will have its variant and version number set according to
RFC 4122, overriding bits in the given \var{hex}, \var{bytes},
-\var{fields}, or \var{int}.
+\var{bytes_le}, \var{fields}, or \var{int}.
\end{classdesc}
\class{UUID} instances have these read-only attributes:
\begin{memberdesc}{bytes}
-The UUID as a 16-byte string.
+The UUID as a 16-byte string (containing the six
+integer fields in big-endian byte order).
+\end{memberdesc}
+
+\begin{memberdesc}{bytes_le}
+The UUID as a 16-byte string (with \var{time_low}, \var{time_mid},
+and \var{time_hi_version} in little-endian byte order).
\end{memberdesc}
\begin{memberdesc}{fields}
@@ -95,10 +95,10 @@ The UUID as a URN as specified in RFC 4122.
\begin{memberdesc}{variant}
The UUID variant, which determines the internal layout of the UUID.
-This will be an integer equal to one of the constants
+This will be one of the integer constants
\constant{RESERVED_NCS},
\constant{RFC_4122}, \constant{RESERVED_MICROSOFT}, or
-\constant{RESERVED_FUTURE}).
+\constant{RESERVED_FUTURE}.
\end{memberdesc}
\begin{memberdesc}{version}
@@ -106,7 +106,7 @@ The UUID version number (1 through 5, meaningful only
when the variant is \constant{RFC_4122}).
\end{memberdesc}
-The \module{uuid} module defines the following functions
+The \module{uuid} module defines the following functions:
\begin{funcdesc}{getnode}{}
Get the hardware address as a 48-bit positive integer. The first time this
@@ -129,11 +129,8 @@ otherwise a random 14-bit sequence number is chosen.
\index{uuid1}
\begin{funcdesc}{uuid3}{namespace, name}
-Generate a UUID based upon a MD5 hash of the \var{name} string value
-drawn from a specified namespace. \var{namespace}
-must be one of \constant{NAMESPACE_DNS},
-\constant{NAMESPACE_URL}, \constant{NAMESPACE_OID},
-or \constant{NAMESPACE_X500}.
+Generate a UUID based on the MD5 hash
+of a namespace identifier (which is a UUID) and a name (which is a string).
\end{funcdesc}
\index{uuid3}
@@ -143,31 +140,32 @@ Generate a random UUID.
\index{uuid4}
\begin{funcdesc}{uuid5}{namespace, name}
-Generate a UUID based upon a SHA-1 hash of the \var{name} string value
-drawn from a specified namespace. \var{namespace}
-must be one of \constant{NAMESPACE_DNS},
-\constant{NAMESPACE_URL}, \constant{NAMESPACE_OID},
-or \constant{NAMESPACE_X500}.
+Generate a UUID based on the SHA-1 hash
+of a namespace identifier (which is a UUID) and a name (which is a string).
\end{funcdesc}
\index{uuid5}
-The \module{uuid} module defines the following namespace constants
+The \module{uuid} module defines the following namespace identifiers
for use with \function{uuid3()} or \function{uuid5()}.
\begin{datadesc}{NAMESPACE_DNS}
-Fully-qualified domain name namespace UUID.
+When this namespace is specified,
+the \var{name} string is a fully-qualified domain name.
\end{datadesc}
\begin{datadesc}{NAMESPACE_URL}
-URL namespace UUID.
+When this namespace is specified,
+the \var{name} string is a URL.
\end{datadesc}
\begin{datadesc}{NAMESPACE_OID}
-ISO OID namespace UUID.
+When this namespace is specified,
+the \var{name} string is an ISO OID.
\end{datadesc}
\begin{datadesc}{NAMESPACE_X500}
-X.500 DN namespace UUID.
+When this namespace is specified,
+the \var{name} string is an X.500 DN in DER or a text output format.
\end{datadesc}
The \module{uuid} module defines the following constants
@@ -178,11 +176,11 @@ Reserved for NCS compatibility.
\end{datadesc}
\begin{datadesc}{RFC_4122}
-Uses UUID layout specified in \rfc{4122}.
+Specifies the UUID layout given in \rfc{4122}.
\end{datadesc}
\begin{datadesc}{RESERVED_MICROSOFT}
-Reserved for Microsoft backward compatibility.
+Reserved for Microsoft compatibility.
\end{datadesc}
\begin{datadesc}{RESERVED_FUTURE}
@@ -192,12 +190,13 @@ Reserved for future definition.
\begin{seealso}
\seerfc{4122}{A Universally Unique IDentifier (UUID) URN Namespace}{
- This specifies a Uniform Resource Name namespace for UUIDs.}
+This specification defines a Uniform Resource Name namespace for UUIDs,
+the internal format of UUIDs, and methods of generating UUIDs.}
\end{seealso}
\subsection{Example \label{uuid-example}}
-Here is a typical usage:
+Here are some examples of typical usage of the \module{uuid} module:
\begin{verbatim}
>>> import uuid
diff --git a/Doc/lib/libwsgiref.tex b/Doc/lib/libwsgiref.tex
index 4b12e9d..37ded9f 100755
--- a/Doc/lib/libwsgiref.tex
+++ b/Doc/lib/libwsgiref.tex
@@ -26,8 +26,9 @@ checks WSGI servers and applications for conformance to the
WSGI specification (\pep{333}).
% XXX If you're just trying to write a web application...
-% XXX should create a URL on python.org to point people to.
+See \url{http://www.wsgi.org} for more information about WSGI,
+and links to tutorials and other resources.
diff --git a/Doc/lib/libxmlrpclib.tex b/Doc/lib/libxmlrpclib.tex
index b08920c..c870d26 100644
--- a/Doc/lib/libxmlrpclib.tex
+++ b/Doc/lib/libxmlrpclib.tex
@@ -68,7 +68,10 @@ Python type):
\lineii{arrays}{Any Python sequence type containing conformable
elements. Arrays are returned as lists}
\lineii{structures}{A Python dictionary. Keys must be strings,
- values may be any conformable type.}
+ values may be any conformable type. Objects
+ of user-defined classes can be passed in;
+ only their \var{__dict__} attribute is
+ transmitted.}
\lineii{dates}{in seconds since the epoch (pass in an instance of the
\class{DateTime} class) or a
\class{\refmodule{datetime}.datetime},
@@ -100,6 +103,10 @@ described below.
compatibility. New code should use \class{ServerProxy}.
\versionchanged[The \var{use_datetime} flag was added]{2.5}
+
+\versionchanged[Instances of new-style classes can be passed in
+if they have an \var{__dict__} attribute and don't have a base class
+that is marshalled in a special way]{2.6}
\end{classdesc}
diff --git a/Doc/lib/sqlite3/executescript.py b/Doc/lib/sqlite3/executescript.py
index 0795b47..7e53581 100644
--- a/Doc/lib/sqlite3/executescript.py
+++ b/Doc/lib/sqlite3/executescript.py
@@ -17,7 +17,7 @@ cur.executescript("""
insert into book(title, author, published)
values (
- 'Dirk Gently''s Holistic Detective Agency
+ 'Dirk Gently''s Holistic Detective Agency',
'Douglas Adams',
1987
);
diff --git a/Doc/lib/tkinter.tex b/Doc/lib/tkinter.tex
index db52cbd..20b2373 100644
--- a/Doc/lib/tkinter.tex
+++ b/Doc/lib/tkinter.tex
@@ -318,7 +318,7 @@ the name of a widget.
\item[\var{options}]
configure the widget's appearance and in some cases, its
behavior. The options come in the form of a list of flags and values.
-Flags are proceeded by a `-', like \UNIX{} shell command flags, and
+Flags are preceded by a `-', like \UNIX{} shell command flags, and
values are put in quotes if they are more than one word.
\end{description}
diff --git a/Doc/perl/python.perl b/Doc/perl/python.perl
index ab93c7c..cf0301e 100644
--- a/Doc/perl/python.perl
+++ b/Doc/perl/python.perl
@@ -883,6 +883,12 @@ sub process_grammar_files(){
$filename = 'grammar.txt';
}
open(GRAMMAR, ">$filename") || die "\n$!\n";
+ print GRAMMAR "##################################################\n";
+ print GRAMMAR "# This file is only meant to be a guide, #\n";
+ print GRAMMAR "# and differs in small ways from the real #\n";
+ print GRAMMAR "# grammar. The exact reference is the file #\n";
+ print GRAMMAR "# Grammar/Grammar distributed with the source. #\n";
+ print GRAMMAR "##################################################\n";
print GRAMMAR strip_grammar_markup($DefinedGrammars{$lang});
close(GRAMMAR);
print "Wrote grammar file $filename\n";
diff --git a/Doc/ref/ref3.tex b/Doc/ref/ref3.tex
index 7eddfcd..fb57eb0 100644
--- a/Doc/ref/ref3.tex
+++ b/Doc/ref/ref3.tex
@@ -378,6 +378,41 @@ additional example of a mutable sequence type.
\end{description} % Sequences
+
+\item[Set types]
+These represent unordered, finite sets of unique, immutable objects.
+As such, they cannot be indexed by any subscript. However, they can be
+iterated over, and the built-in function \function{len()} returns the
+number of items in a set. Common uses for sets are
+fast membership testing, removing duplicates from a sequence, and
+computing mathematical operations such as intersection, union, difference,
+and symmetric difference.
+\bifuncindex{len}
+\obindex{set type}
+
+For set elements, the same immutability rules apply as for dictionary
+keys. Note that numeric types obey the normal rules for numeric
+comparison: if two numbers compare equal (e.g., \code{1} and
+\code{1.0}), only one of them can be contained in a set.
+
+There are currently two intrinsic set types:
+
+\begin{description}
+
+\item[Sets]
+These\obindex{set} represent a mutable set. They are created by the
+built-in \function{set()} constructor and can be modified afterwards
+by several methods, such as \method{add()}.
+
+\item[Frozen sets]
+These\obindex{frozenset} represent an immutable set. They are created by
+the built-in \function{frozenset()} constructor. As a frozenset is
+immutable and hashable, it can be used again as an element of another set,
+or as a dictionary key.
+
+\end{description} % Set types
+
+
\item[Mappings]
These represent finite sets of objects indexed by arbitrary index sets.
The subscript notation \code{a[k]} selects the item indexed
@@ -761,7 +796,7 @@ user-defined method object whose associated class is the class
(call it~\class{C}) of the instance for which the attribute reference
was initiated or one of its bases,
it is transformed into a bound user-defined method object whose
-\member{im_class} attribute is~\class{C} whose \member{im_self} attribute
+\member{im_class} attribute is~\class{C} and whose \member{im_self} attribute
is the instance. Static method and class method objects are also
transformed, as if they had been retrieved from class~\class{C};
see above under ``Classes''. See section~\ref{descriptors} for
diff --git a/Doc/tools/py2texi.el b/Doc/tools/py2texi.el
index c3d8df0..404234f 100644
--- a/Doc/tools/py2texi.el
+++ b/Doc/tools/py2texi.el
@@ -1,5 +1,6 @@
;;; py2texi.el -- Conversion of Python LaTeX documentation to Texinfo
+;; Copyright (C) 2006 Jeroen Dekkers <jeroen@dekkers.cx>
;; Copyright (C) 1998, 1999, 2001, 2002 Milan Zamazal
;; Author: Milan Zamazal <pdm@zamazal.org>
@@ -168,6 +169,7 @@ Otherwise a generated Info file name is used.")
"@end table\n")
("productionlist" 0 "\n@table @code\n" "@end table\n")
("quotation" 0 "@quotation" "@end quotation")
+ ("quote" 0 "@quotation" "@end quotation")
("seealso" 0 "See also:\n@table @emph\n" "@end table\n")
("seealso*" 0 "@table @emph\n" "@end table\n")
("sloppypar" 0 "" "")
@@ -246,11 +248,12 @@ Both BEGIN and END are evaled. Moreover, you can reference arguments through
("env" 1 "@code{\\1}")
("EOF" 0 "@code{EOF}")
("email" 1 "@email{\\1}")
+ ("em" 1 "@emph{\\1}")
("emph" 1 "@emph{\\1}")
("envvar" 1 "@env{\\1}")
("exception" 1 "@code{\\1}")
("exindex" 1 (progn (setq obindex t) "@obindex{\\1}"))
- ("fi" 0 (concat "@end " last-if))
+ ("fi" 0 (if (equal last-if "ifx") "" (concat "@end " last-if)))
("file" 1 "@file{\\1}")
("filenq" 1 "@file{\\1}")
("filevar" 1 "@file{@var{\\1}}")
@@ -262,6 +265,7 @@ Both BEGIN and END are evaled. Moreover, you can reference arguments through
("grammartoken" 1 "@code{\\1}")
("guilabel" 1 "@strong{\\1}")
("hline" 0 "")
+ ("ifx" 0 (progn (setq last-if "ifx") ""))
("ifhtml" 0 (concat "@" (setq last-if "ifinfo")))
("iftexi" 0 (concat "@" (setq last-if "ifinfo")))
("index" 1 (progn (setq cindex t) "@cindex{\\1}"))
@@ -284,6 +288,7 @@ Both BEGIN and END are evaled. Moreover, you can reference arguments through
("lineiii" 3 "@item \\1 @tab \\2 @tab \\3")
("lineiv" 4 "@item \\1 @tab \\2 @tab \\3 @tab \\4")
("linev" 5 "@item \\1 @tab \\2 @tab \\3 @tab \\4 @tab \\5")
+ ("locallinewidth" 0 "")
("localmoduletable" 0 "")
("longprogramopt" 1 "@option{--\\1}")
("macro" 1 "@code{@backslash{}\\1}")
@@ -307,6 +312,7 @@ Both BEGIN and END are evaled. Moreover, you can reference arguments through
("moreargs" 0 "@dots{}")
("n" 0 "@backslash{}n")
("newcommand" 2 "")
+ ("newlength" 1 "")
("newsgroup" 1 "@samp{\\1}")
("nodename" 1
(save-excursion
@@ -322,6 +328,7 @@ Both BEGIN and END are evaled. Moreover, you can reference arguments through
("opindex" 1 (progn (setq cindex t) "@cindex{\\1}"))
("option" 1 "@option{\\1}")
("optional" 1 "[\\1]")
+ ("paragraph" 1 "@subsubheading \\1")
("pep" 1 (progn (setq cindex t) "PEP@ \\1@cindex PEP \\1\n"))
("pi" 0 "pi")
("platform" 1 "")
@@ -363,6 +370,7 @@ Both BEGIN and END are evaled. Moreover, you can reference arguments through
("seetitle" 1 "@cite{\\1}")
("seeurl" 2 "\n@table @url\n@item \\1\n\\2\n@end table\n")
("setindexsubitem" 1 (progn (setq cindex t) "@cindex \\1"))
+ ("setlength" 2 "")
("setreleaseinfo" 1 (progn (setq py2texi-releaseinfo "")))
("setshortversion" 1
(progn (setq py2texi-python-short-version (match-string 1 string)) ""))
@@ -382,8 +390,8 @@ Both BEGIN and END are evaled. Moreover, you can reference arguments through
("textasciicircum" 0 "^")
("textbackslash" 0 "@backslash{}")
("textbar" 0 "|")
- ; Some common versions of Texinfo don't support @euro yet:
- ; ("texteuro" 0 "@euro{}")
+ ("textbf" 1 "@strong{\\1}")
+ ("texteuro" 0 "@euro{}")
; Unfortunately, this alternate spelling doesn't actually apply to
; the usage found in Python Tutorial, which actually requires a
; Euro symbol to make sense, so this is commented out as well.
@@ -394,6 +402,7 @@ Both BEGIN and END are evaled. Moreover, you can reference arguments through
("textrm" 1 "\\1")
("texttt" 1 "@code{\\1}")
("textunderscore" 0 "_")
+ ("tilde" 0 "~")
("title" 1 (progn (setq title (match-string 1 string)) "@settitle \\1"))
("today" 0 "@today{}")
("token" 1 "@code{\\1}")
@@ -402,6 +411,7 @@ Both BEGIN and END are evaled. Moreover, you can reference arguments through
("u" 0 "@backslash{}u")
("ulink" 2 "\\1")
("UNIX" 0 "UNIX")
+ ("undefined" 0 "")
("unspecified" 0 "@dots{}")
("url" 1 "@url{\\1}")
("usepackage" 1 "")
@@ -534,15 +544,20 @@ Each list item is of the form (COMMAND ARGNUM SUBSTITUTION) where:
beg
end)
(py2texi-search-safe "\\\\begin{\\(verbatim\\|displaymath\\)}"
- (replace-match "@example")
- (setq beg (copy-marker (point) nil))
- (re-search-forward "\\\\end{\\(verbatim\\|displaymath\\)}")
- (setq end (copy-marker (match-beginning 0) nil))
- (replace-match "@end example")
- (py2texi-texinfo-escape beg end)
- (put-text-property (- beg (length "@example"))
- (+ end (length "@end example"))
- 'py2texi-protected t))
+ (when (save-excursion
+ ; Make sure we aren't looking at a commented out version
+ ; of a verbatim environment
+ (beginning-of-line)
+ (not (looking-at "%")))
+ (replace-match "@example ")
+ (setq beg (copy-marker (point) nil))
+ (re-search-forward "\\\\end{\\(verbatim\\|displaymath\\)}")
+ (setq end (copy-marker (match-beginning 0) nil))
+ (replace-match "@end example")
+ (py2texi-texinfo-escape beg end)
+ (put-text-property (- beg (length "@example "))
+ (+ end (length "@end example"))
+ 'py2texi-protected t)))
(py2texi-search-safe "\\\\verb\\([^a-z]\\)"
(setq delimiter (match-string 1))
(replace-match "@code{")
@@ -883,6 +898,10 @@ Do not include .ind files."
(defun py2texi-fix-braces ()
"Escape braces for Texinfo."
+ (py2texi-search "{@{}"
+ (replace-match "@{"))
+ (py2texi-search "{@}}"
+ (replace-match "@}"))
(let (string)
(py2texi-search "{"
(unless (or (py2texi-protected)
diff --git a/Doc/tut/tut.tex b/Doc/tut/tut.tex
index 2daf812..c76c518 100644
--- a/Doc/tut/tut.tex
+++ b/Doc/tut/tut.tex
@@ -2831,7 +2831,7 @@ Now what happens when the user writes \code{from Sound.Effects import
*}? Ideally, one would hope that this somehow goes out to the
filesystem, finds which submodules are present in the package, and
imports them all. Unfortunately, this operation does not work very
-well on Mac and Windows platforms, where the filesystem does not
+well on Windows platforms, where the filesystem does not
always have accurate information about the case of a filename! On
these platforms, there is no guaranteed way to know whether a file
\file{ECHO.PY} should be imported as a module \module{echo},
@@ -3036,6 +3036,7 @@ Here are two ways to write a table of squares and cubes:
8 64 512
9 81 729
10 100 1000
+
>>> for x in range(1,11):
... print '%2d %3d %4d' % (x, x*x, x*x*x)
...
@@ -3051,8 +3052,9 @@ Here are two ways to write a table of squares and cubes:
10 100 1000
\end{verbatim}
-(Note that one space between each column was added by the way
-\keyword{print} works: it always adds spaces between its arguments.)
+(Note that in the first example, one space between each column was
+added by the way \keyword{print} works: it always adds spaces between
+its arguments.)
This example demonstrates the \method{rjust()} method of string objects,
which right-justifies a string in a field of a given width by padding
@@ -3521,7 +3523,7 @@ be accessed or printed directly without having to reference \code{.args}.
But use of \code{.args} is discouraged. Instead, the preferred use is to pass
a single argument to an exception (which can be a tuple if multiple arguments
-are needed) and have it bound to the \code{message} attribute. One my also
+are needed) and have it bound to the \code{message} attribute. One may also
instantiate an exception first before raising it and add any attributes to it
as desired.
diff --git a/Doc/whatsnew/whatsnew25.tex b/Doc/whatsnew/whatsnew25.tex
index bf939c0..fb68acc 100644
--- a/Doc/whatsnew/whatsnew25.tex
+++ b/Doc/whatsnew/whatsnew25.tex
@@ -409,7 +409,7 @@ is always executed, or one or more \keyword{except} blocks to catch
specific exceptions. You couldn't combine both \keyword{except} blocks and a
\keyword{finally} block, because generating the right bytecode for the
combined version was complicated and it wasn't clear what the
-semantics of the combined should be.
+semantics of the combined statement should be.
Guido van~Rossum spent some time working with Java, which does support the
equivalent of combining \keyword{except} blocks and a
@@ -540,10 +540,10 @@ Traceback (most recent call last):
StopIteration
\end{verbatim}
-Because \keyword{yield} will often be returning \constant{None}, you
+\keyword{yield} will usually return \constant{None}, so you
should always check for this case. Don't just use its value in
expressions unless you're sure that the \method{send()} method
-will be the only method used resume your generator function.
+will be the only method used to resume your generator function.
In addition to \method{send()}, there are two other new methods on
generators:
@@ -683,22 +683,22 @@ with lock:
The lock is acquired before the block is executed and always released once
the block is complete.
-The \module{decimal} module's contexts, which encapsulate the desired
-precision and rounding characteristics for computations, provide a
-\method{context_manager()} method for getting a context manager:
+The new \function{localcontext()} function in the \module{decimal} module
+makes it easy to save and restore the current decimal context, which
+encapsulates the desired precision and rounding characteristics for
+computations:
\begin{verbatim}
-import decimal
+from decimal import Decimal, Context, localcontext
# Displays with default precision of 28 digits
-v1 = decimal.Decimal('578')
-print v1.sqrt()
+v = Decimal('578')
+print v.sqrt()
-ctx = decimal.Context(prec=16)
-with ctx.context_manager():
+with localcontext(Context(prec=16)):
# All code in this block uses a precision of 16 digits.
# The original context is restored on exiting the block.
- print v1.sqrt()
+ print v.sqrt()
\end{verbatim}
\subsection{Writing Context Managers\label{context-managers}}
@@ -1115,12 +1115,14 @@ Some examples:
\begin{verbatim}
>>> ('http://www.python.org').partition('://')
('http', '://', 'www.python.org')
->>> (u'Subject: a quick question').partition(':')
-(u'Subject', u':', u' a quick question')
>>> ('file:/usr/share/doc/index.html').partition('://')
('file:/usr/share/doc/index.html', '', '')
+>>> (u'Subject: a quick question').partition(':')
+(u'Subject', u':', u' a quick question')
>>> 'www.python.org'.rpartition('.')
('www.python', '.', 'org')
+>>> 'www.python.org'.rpartition(':')
+('', '', 'www.python.org')
\end{verbatim}
(Implemented by Fredrik Lundh following a suggestion by Raymond Hettinger.)
@@ -2114,14 +2116,16 @@ The pysqlite module (\url{http://www.pysqlite.org}), a wrapper for the
SQLite embedded database, has been added to the standard library under
the package name \module{sqlite3}.
-SQLite is a C library that provides a SQL-language database that
-stores data in disk files without requiring a separate server process.
+SQLite is a C library that provides a lightweight disk-based database
+that doesn't require a separate server process and allows accessing
+the database using a nonstandard variant of the SQL query language.
+Some applications can use SQLite for internal data storage. It's also
+possible to prototype an application using SQLite and then port the
+code to a larger database such as PostgreSQL or Oracle.
+
pysqlite was written by Gerhard H\"aring and provides a SQL interface
compliant with the DB-API 2.0 specification described by
-\pep{249}. This means that it should be possible to write the first
-version of your applications using SQLite for data storage. If
-switching to a larger database such as PostgreSQL or Oracle is
-later necessary, the switch should be relatively easy.
+\pep{249}.
If you're compiling the Python source yourself, note that the source
tree doesn't include the SQLite code, only the wrapper module.
@@ -2148,8 +2152,8 @@ c = conn.cursor()
# Create table
c.execute('''create table stocks
-(date timestamp, trans varchar, symbol varchar,
- qty decimal, price decimal)''')
+(date text, trans text, symbol text,
+ qty real, price real)''')
# Insert a row of data
c.execute("""insert into stocks
diff --git a/Doc/whatsnew/whatsnew26.tex b/Doc/whatsnew/whatsnew26.tex
new file mode 100644
index 0000000..afe067e
--- /dev/null
+++ b/Doc/whatsnew/whatsnew26.tex
@@ -0,0 +1,137 @@
+\documentclass{howto}
+\usepackage{distutils}
+% $Id$
+
+
+\title{What's New in Python 2.6}
+\release{0.0}
+\author{A.M. Kuchling}
+\authoraddress{\email{amk@amk.ca}}
+
+\begin{document}
+\maketitle
+\tableofcontents
+
+This article explains the new features in Python 2.6. No release date
+for Python 2.6 has been set; it will probably be released in late 2007.
+
+% Compare with previous release in 2 - 3 sentences here.
+
+This article doesn't attempt to provide a complete specification of
+the new features, but instead provides a convenient overview. For
+full details, you should refer to the documentation for Python 2.6.
+% add hyperlink when the documentation becomes available online.
+If you want to understand the complete implementation and design
+rationale, refer to the PEP for a particular new feature.
+
+
+%======================================================================
+
+% Large, PEP-level features and changes should be described here.
+
+
+%======================================================================
+\section{Other Language Changes}
+
+Here are all of the changes that Python 2.6 makes to the core Python
+language.
+
+\begin{itemize}
+\item TBD
+
+\end{itemize}
+
+
+%======================================================================
+\subsection{Optimizations}
+
+\begin{itemize}
+
+\item Optimizations should be described here.
+
+\end{itemize}
+
+The net result of the 2.6 optimizations is that Python 2.6 runs the
+pystone benchmark around XX\% faster than Python 2.5.
+
+
+%======================================================================
+\section{New, Improved, and Deprecated Modules}
+
+As usual, Python's standard library received a number of enhancements and
+bug fixes. Here's a partial list of the most notable changes, sorted
+alphabetically by module name. Consult the
+\file{Misc/NEWS} file in the source tree for a more
+complete list of changes, or look through the CVS logs for all the
+details.
+
+\begin{itemize}
+
+\item The \module{smtplib} module now supports SMTP over
+SSL thanks to the addition of the \class{SMTP_SSL} class.
+This class supports an interface identical to the existing \class{SMTP}
+class. (Contributed by Monty Taylor.)
+
+\end{itemize}
+
+
+%======================================================================
+% whole new modules get described in \subsections here
+
+
+% ======================================================================
+\section{Build and C API Changes}
+
+Changes to Python's build process and to the C API include:
+
+\begin{itemize}
+
+\item Detailed changes are listed here.
+
+\end{itemize}
+
+
+%======================================================================
+\subsection{Port-Specific Changes}
+
+Platform-specific changes go here.
+
+
+%======================================================================
+\section{Other Changes and Fixes \label{section-other}}
+
+As usual, there were a bunch of other improvements and bugfixes
+scattered throughout the source tree. A search through the change
+logs finds there were XXX patches applied and YYY bugs fixed between
+Python 2.5 and 2.6. Both figures are likely to be underestimates.
+
+Some of the more notable changes are:
+
+\begin{itemize}
+
+\item Details go here.
+
+\end{itemize}
+
+
+%======================================================================
+\section{Porting to Python 2.6}
+
+This section lists previously described changes that may require
+changes to your code:
+
+\begin{itemize}
+
+\item Everything is all in the details!
+
+\end{itemize}
+
+
+%======================================================================
+\section{Acknowledgements \label{acks}}
+
+The author would like to thank the following people for offering
+suggestions, corrections and assistance with various drafts of this
+article: .
+
+\end{document}