diff options
author | Martin v. Löwis <martin@v.loewis.de> | 2002-11-19 08:09:52 (GMT) |
---|---|---|
committer | Martin v. Löwis <martin@v.loewis.de> | 2002-11-19 08:09:52 (GMT) |
commit | 6aa4a1f29ca575e25fc595857b2a5168a02c9780 (patch) | |
tree | ee9cce4c56b3878e5d5b5178f05f6809a19bd9c4 /Lib/bsddb | |
parent | 1d2674051b5d9ad2764bb1379b06cf61974c6fdb (diff) | |
download | cpython-6aa4a1f29ca575e25fc595857b2a5168a02c9780.zip cpython-6aa4a1f29ca575e25fc595857b2a5168a02c9780.tar.gz cpython-6aa4a1f29ca575e25fc595857b2a5168a02c9780.tar.bz2 |
Import PyBSDDB 3.4.0. Rename historical wrapper to bsddb185.
Diffstat (limited to 'Lib/bsddb')
-rw-r--r-- | Lib/bsddb/__init__.py | 237 | ||||
-rw-r--r-- | Lib/bsddb/db.py | 44 | ||||
-rw-r--r-- | Lib/bsddb/dbobj.py | 178 | ||||
-rw-r--r-- | Lib/bsddb/dbrecio.py | 190 | ||||
-rw-r--r-- | Lib/bsddb/dbshelve.py | 291 | ||||
-rw-r--r-- | Lib/bsddb/dbtables.py | 629 | ||||
-rw-r--r-- | Lib/bsddb/dbutils.py | 69 |
7 files changed, 1638 insertions, 0 deletions
diff --git a/Lib/bsddb/__init__.py b/Lib/bsddb/__init__.py new file mode 100644 index 0000000..a2d820c --- /dev/null +++ b/Lib/bsddb/__init__.py @@ -0,0 +1,237 @@ +#---------------------------------------------------------------------- +# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA +# and Andrew Kuchling. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# o Redistributions of source code must retain the above copyright +# notice, this list of conditions, and the disclaimer that follows. +# +# o Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions, and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# +# o Neither the name of Digital Creations nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS +# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL +# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. +#---------------------------------------------------------------------- + + +""" +This package initialization module provides a compatibility interface +that should enable bsddb3 to be a near drop-in replacement for the original +old bsddb module. The functions and classes provided here are all +wrappers around the new functionality provided in the bsddb3.db module. + +People interested in the more advanced capabilites of Berkeley DB 3.x +should use the bsddb3.db module directly. +""" + +import _bsddb +# bsddb3 calls it _db +_db = _bsddb +__version__ = _db.__version__ + +error = _db.DBError # So bsddb3.error will mean something... + +#---------------------------------------------------------------------- + + +class _DBWithCursor: + """ + A simple wrapper around DB that makes it look like the bsddbobject in + the old module. It uses a cursor as needed to provide DB traversal. + """ + def __init__(self, db): + self.db = db + self.dbc = None + self.db.set_get_returns_none(0) + + def __del__(self): + self.close() + + def _checkCursor(self): + if self.dbc is None: + self.dbc = self.db.cursor() + + def _checkOpen(self): + if self.db is None: + raise error, "BSDDB object has already been closed" + + def isOpen(self): + return self.db is not None + + def __len__(self): + self._checkOpen() + return len(self.db) + + def __getitem__(self, key): + self._checkOpen() + return self.db[key] + + def __setitem__(self, key, value): + self._checkOpen() + self.db[key] = value + + def __delitem__(self, key): + self._checkOpen() + del self.db[key] + + def close(self): + if self.dbc is not None: + self.dbc.close() + v = 0 + if self.db is not None: + v = self.db.close() + self.dbc = None + self.db = None + return v + + def keys(self): + self._checkOpen() + return self.db.keys() + + def has_key(self, key): + self._checkOpen() + return self.db.has_key(key) + + def set_location(self, key): + self._checkOpen() + self._checkCursor() + return self.dbc.set(key) + + def next(self): + self._checkOpen() + self._checkCursor() + rv = self.dbc.next() + return rv + + def previous(self): + self._checkOpen() + self._checkCursor() + rv = self.dbc.prev() + return rv + + def first(self): + self._checkOpen() + self._checkCursor() + rv = self.dbc.first() + return rv + + def last(self): + self._checkOpen() + self._checkCursor() + rv = self.dbc.last() + return rv + + def sync(self): + self._checkOpen() + return self.db.sync() + + +#---------------------------------------------------------------------- +# Compatibility object factory functions + +def hashopen(file, flag='c', mode=0666, pgsize=None, ffactor=None, nelem=None, + cachesize=None, lorder=None, hflags=0): + + flags = _checkflag(flag) + d = _db.DB() + d.set_flags(hflags) + if cachesize is not None: d.set_cachesize(0, cachesize) + if pgsize is not None: d.set_pagesize(pgsize) + if lorder is not None: d.set_lorder(lorder) + if ffactor is not None: d.set_h_ffactor(ffactor) + if nelem is not None: d.set_h_nelem(nelem) + d.open(file, _db.DB_HASH, flags, mode) + return _DBWithCursor(d) + +#---------------------------------------------------------------------- + +def btopen(file, flag='c', mode=0666, + btflags=0, cachesize=None, maxkeypage=None, minkeypage=None, + pgsize=None, lorder=None): + + flags = _checkflag(flag) + d = _db.DB() + if cachesize is not None: d.set_cachesize(0, cachesize) + if pgsize is not None: d.set_pagesize(pgsize) + if lorder is not None: d.set_lorder(lorder) + d.set_flags(btflags) + if minkeypage is not None: d.set_bt_minkey(minkeypage) + if maxkeypage is not None: d.set_bt_maxkey(maxkeypage) + d.open(file, _db.DB_BTREE, flags, mode) + return _DBWithCursor(d) + +#---------------------------------------------------------------------- + + +def rnopen(file, flag='c', mode=0666, + rnflags=0, cachesize=None, pgsize=None, lorder=None, + rlen=None, delim=None, source=None, pad=None): + + flags = _checkflag(flag) + d = _db.DB() + if cachesize is not None: d.set_cachesize(0, cachesize) + if pgsize is not None: d.set_pagesize(pgsize) + if lorder is not None: d.set_lorder(lorder) + d.set_flags(rnflags) + if delim is not None: d.set_re_delim(delim) + if rlen is not None: d.set_re_len(rlen) + if source is not None: d.set_re_source(source) + if pad is not None: d.set_re_pad(pad) + d.open(file, _db.DB_RECNO, flags, mode) + return _DBWithCursor(d) + +#---------------------------------------------------------------------- + + +def _checkflag(flag): + if flag == 'r': + flags = _db.DB_RDONLY + elif flag == 'rw': + flags = 0 + elif flag == 'w': + flags = _db.DB_CREATE + elif flag == 'c': + flags = _db.DB_CREATE + elif flag == 'n': + flags = _db.DB_CREATE | _db.DB_TRUNCATE + else: + raise error, "flags should be one of 'r', 'w', 'c' or 'n'" + return flags | _db.DB_THREAD + +#---------------------------------------------------------------------- + + +# This is a silly little hack that allows apps to continue to use the +# DB_THREAD flag even on systems without threads without freaking out +# BerkeleyDB. +# +# This assumes that if Python was built with thread support then +# BerkeleyDB was too. + +try: + import thread + del thread +except ImportError: + _db.DB_THREAD = 0 + + +#---------------------------------------------------------------------- diff --git a/Lib/bsddb/db.py b/Lib/bsddb/db.py new file mode 100644 index 0000000..b4365d0 --- /dev/null +++ b/Lib/bsddb/db.py @@ -0,0 +1,44 @@ +#---------------------------------------------------------------------- +# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA +# and Andrew Kuchling. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# o Redistributions of source code must retain the above copyright +# notice, this list of conditions, and the disclaimer that follows. +# +# o Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions, and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# +# o Neither the name of Digital Creations nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS +# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL +# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. +#---------------------------------------------------------------------- + + +# This module is just a placeholder for possible future expansion, in +# case we ever want to augment the stuff in _db in any way. For now +# it just simply imports everything from _db. + +from _db import * +from _db import __version__ + +if version() < (3, 1, 0): + raise ImportError, "BerkeleyDB 3.x symbols not found. Perhaps python was statically linked with an older version?" diff --git a/Lib/bsddb/dbobj.py b/Lib/bsddb/dbobj.py new file mode 100644 index 0000000..9c3e90f --- /dev/null +++ b/Lib/bsddb/dbobj.py @@ -0,0 +1,178 @@ +#------------------------------------------------------------------------- +# This file contains real Python object wrappers for DB and DBEnv +# C "objects" that can be usefully subclassed. The previous SWIG +# based interface allowed this thanks to SWIG's shadow classes. +# -- Gregory P. Smith +#------------------------------------------------------------------------- +# +# (C) Copyright 2001 Autonomous Zone Industries +# +# License: This is free software. You may use this software for any +# purpose including modification/redistribution, so long as +# this header remains intact and that you do not claim any +# rights of ownership or authorship of this software. This +# software has been tested, but no warranty is expressed or +# implied. +# + +import db + + +class DBEnv: + def __init__(self, *args, **kwargs): + self._cobj = apply(db.DBEnv, args, kwargs) + + def close(self, *args, **kwargs): + return apply(self._cobj.close, args, kwargs) + def open(self, *args, **kwargs): + return apply(self._cobj.open, args, kwargs) + def remove(self, *args, **kwargs): + return apply(self._cobj.remove, args, kwargs) + def set_cachesize(self, *args, **kwargs): + return apply(self._cobj.set_cachesize, args, kwargs) + def set_data_dir(self, *args, **kwargs): + return apply(self._cobj.set_data_dir, args, kwargs) + def set_flags(self, *args, **kwargs): + return apply(self._cobj.set_flags, args, kwargs) + def set_lg_bsize(self, *args, **kwargs): + return apply(self._cobj.set_lg_bsize, args, kwargs) + def set_lg_dir(self, *args, **kwargs): + return apply(self._cobj.set_lg_dir, args, kwargs) + def set_lg_max(self, *args, **kwargs): + return apply(self._cobj.set_lg_max, args, kwargs) + def set_lk_detect(self, *args, **kwargs): + return apply(self._cobj.set_lk_detect, args, kwargs) + def set_lk_max(self, *args, **kwargs): + return apply(self._cobj.set_lk_max, args, kwargs) + def set_lk_max_locks(self, *args, **kwargs): + return apply(self._cobj.set_lk_max_locks, args, kwargs) + def set_lk_max_lockers(self, *args, **kwargs): + return apply(self._cobj.set_lk_max_lockers, args, kwargs) + def set_lk_max_objects(self, *args, **kwargs): + return apply(self._cobj.set_lk_max_objects, args, kwargs) + def set_mp_mmapsize(self, *args, **kwargs): + return apply(self._cobj.set_mp_mmapsize, args, kwargs) + def set_tmp_dir(self, *args, **kwargs): + return apply(self._cobj.set_tmp_dir, args, kwargs) + def txn_begin(self, *args, **kwargs): + return apply(self._cobj.txn_begin, args, kwargs) + def txn_checkpoint(self, *args, **kwargs): + return apply(self._cobj.txn_checkpoint, args, kwargs) + def txn_stat(self, *args, **kwargs): + return apply(self._cobj.txn_stat, args, kwargs) + def set_tx_max(self, *args, **kwargs): + return apply(self._cobj.set_tx_max, args, kwargs) + def lock_detect(self, *args, **kwargs): + return apply(self._cobj.lock_detect, args, kwargs) + def lock_get(self, *args, **kwargs): + return apply(self._cobj.lock_get, args, kwargs) + def lock_id(self, *args, **kwargs): + return apply(self._cobj.lock_id, args, kwargs) + def lock_put(self, *args, **kwargs): + return apply(self._cobj.lock_put, args, kwargs) + def lock_stat(self, *args, **kwargs): + return apply(self._cobj.lock_stat, args, kwargs) + def log_archive(self, *args, **kwargs): + return apply(self._cobj.log_archive, args, kwargs) + def set_get_returns_none(self, *args, **kwargs): + return apply(self._cobj.set_get_returns_none, args, kwargs) + + +class DB: + def __init__(self, dbenv, *args, **kwargs): + # give it the proper DBEnv C object that its expecting + self._cobj = apply(db.DB, (dbenv._cobj,) + args, kwargs) + + # TODO are there other dict methods that need to be overridden? + def __len__(self): + return len(self._cobj) + def __getitem__(self, arg): + return self._cobj[arg] + def __setitem__(self, key, value): + self._cobj[key] = value + def __delitem__(self, arg): + del self._cobj[arg] + + def append(self, *args, **kwargs): + return apply(self._cobj.append, args, kwargs) + def associate(self, *args, **kwargs): + return apply(self._cobj.associate, args, kwargs) + def close(self, *args, **kwargs): + return apply(self._cobj.close, args, kwargs) + def consume(self, *args, **kwargs): + return apply(self._cobj.consume, args, kwargs) + def consume_wait(self, *args, **kwargs): + return apply(self._cobj.consume_wait, args, kwargs) + def cursor(self, *args, **kwargs): + return apply(self._cobj.cursor, args, kwargs) + def delete(self, *args, **kwargs): + return apply(self._cobj.delete, args, kwargs) + def fd(self, *args, **kwargs): + return apply(self._cobj.fd, args, kwargs) + def get(self, *args, **kwargs): + return apply(self._cobj.get, args, kwargs) + def get_both(self, *args, **kwargs): + return apply(self._cobj.get_both, args, kwargs) + def get_byteswapped(self, *args, **kwargs): + return apply(self._cobj.get_byteswapped, args, kwargs) + def get_size(self, *args, **kwargs): + return apply(self._cobj.get_size, args, kwargs) + def get_type(self, *args, **kwargs): + return apply(self._cobj.get_type, args, kwargs) + def join(self, *args, **kwargs): + return apply(self._cobj.join, args, kwargs) + def key_range(self, *args, **kwargs): + return apply(self._cobj.key_range, args, kwargs) + def has_key(self, *args, **kwargs): + return apply(self._cobj.has_key, args, kwargs) + def items(self, *args, **kwargs): + return apply(self._cobj.items, args, kwargs) + def keys(self, *args, **kwargs): + return apply(self._cobj.keys, args, kwargs) + def open(self, *args, **kwargs): + return apply(self._cobj.open, args, kwargs) + def put(self, *args, **kwargs): + return apply(self._cobj.put, args, kwargs) + def remove(self, *args, **kwargs): + return apply(self._cobj.remove, args, kwargs) + def rename(self, *args, **kwargs): + return apply(self._cobj.rename, args, kwargs) + def set_bt_minkey(self, *args, **kwargs): + return apply(self._cobj.set_bt_minkey, args, kwargs) + def set_cachesize(self, *args, **kwargs): + return apply(self._cobj.set_cachesize, args, kwargs) + def set_flags(self, *args, **kwargs): + return apply(self._cobj.set_flags, args, kwargs) + def set_h_ffactor(self, *args, **kwargs): + return apply(self._cobj.set_h_ffactor, args, kwargs) + def set_h_nelem(self, *args, **kwargs): + return apply(self._cobj.set_h_nelem, args, kwargs) + def set_lorder(self, *args, **kwargs): + return apply(self._cobj.set_lorder, args, kwargs) + def set_pagesize(self, *args, **kwargs): + return apply(self._cobj.set_pagesize, args, kwargs) + def set_re_delim(self, *args, **kwargs): + return apply(self._cobj.set_re_delim, args, kwargs) + def set_re_len(self, *args, **kwargs): + return apply(self._cobj.set_re_len, args, kwargs) + def set_re_pad(self, *args, **kwargs): + return apply(self._cobj.set_re_pad, args, kwargs) + def set_re_source(self, *args, **kwargs): + return apply(self._cobj.set_re_source, args, kwargs) + def set_q_extentsize(self, *args, **kwargs): + return apply(self._cobj.set_q_extentsize, args, kwargs) + def stat(self, *args, **kwargs): + return apply(self._cobj.stat, args, kwargs) + def sync(self, *args, **kwargs): + return apply(self._cobj.sync, args, kwargs) + def type(self, *args, **kwargs): + return apply(self._cobj.type, args, kwargs) + def upgrade(self, *args, **kwargs): + return apply(self._cobj.upgrade, args, kwargs) + def values(self, *args, **kwargs): + return apply(self._cobj.values, args, kwargs) + def verify(self, *args, **kwargs): + return apply(self._cobj.verify, args, kwargs) + def set_get_returns_none(self, *args, **kwargs): + return apply(self._cobj.set_get_returns_none, args, kwargs) + diff --git a/Lib/bsddb/dbrecio.py b/Lib/bsddb/dbrecio.py new file mode 100644 index 0000000..995dad7 --- /dev/null +++ b/Lib/bsddb/dbrecio.py @@ -0,0 +1,190 @@ + +""" +File-like objects that read from or write to a bsddb3 record. + +This implements (nearly) all stdio methods. + +f = DBRecIO(db, key, txn=None) +f.close() # explicitly release resources held +flag = f.isatty() # always false +pos = f.tell() # get current position +f.seek(pos) # set current position +f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF +buf = f.read() # read until EOF +buf = f.read(n) # read up to n bytes +f.truncate([size]) # truncate file at to at most size (default: current pos) +f.write(buf) # write at current position +f.writelines(list) # for line in list: f.write(line) + +Notes: +- fileno() is left unimplemented so that code which uses it triggers + an exception early. +- There's a simple test set (see end of this file) - not yet updated + for DBRecIO. +- readline() is not implemented yet. + + +From: + Itamar Shtull-Trauring <itamar@maxnm.com> +""" + +import errno +import string + +class DBRecIO: + def __init__(self, db, key, txn=None): + self.db = db + self.key = key + self.txn = txn + self.len = None + self.pos = 0 + self.closed = 0 + self.softspace = 0 + + def close(self): + if not self.closed: + self.closed = 1 + del self.db, self.txn + + def isatty(self): + if self.closed: + raise ValueError, "I/O operation on closed file" + return 0 + + def seek(self, pos, mode = 0): + if self.closed: + raise ValueError, "I/O operation on closed file" + if mode == 1: + pos = pos + self.pos + elif mode == 2: + pos = pos + self.len + self.pos = max(0, pos) + + def tell(self): + if self.closed: + raise ValueError, "I/O operation on closed file" + return self.pos + + def read(self, n = -1): + if self.closed: + raise ValueError, "I/O operation on closed file" + if n < 0: + newpos = self.len + else: + newpos = min(self.pos+n, self.len) + + dlen = newpos - self.pos + + r = self.db.get(key, txn=self.txn, dlen=dlen, doff=self.pos) + self.pos = newpos + return r + + __fixme = """ + def readline(self, length=None): + if self.closed: + raise ValueError, "I/O operation on closed file" + if self.buflist: + self.buf = self.buf + string.joinfields(self.buflist, '') + self.buflist = [] + i = string.find(self.buf, '\n', self.pos) + if i < 0: + newpos = self.len + else: + newpos = i+1 + if length is not None: + if self.pos + length < newpos: + newpos = self.pos + length + r = self.buf[self.pos:newpos] + self.pos = newpos + return r + + def readlines(self, sizehint = 0): + total = 0 + lines = [] + line = self.readline() + while line: + lines.append(line) + total += len(line) + if 0 < sizehint <= total: + break + line = self.readline() + return lines + """ + + def truncate(self, size=None): + if self.closed: + raise ValueError, "I/O operation on closed file" + if size is None: + size = self.pos + elif size < 0: + raise IOError(errno.EINVAL, + "Negative size not allowed") + elif size < self.pos: + self.pos = size + self.db.put(key, "", txn=self.txn, dlen=self.len-size, doff=size) + + def write(self, s): + if self.closed: + raise ValueError, "I/O operation on closed file" + if not s: return + if self.pos > self.len: + self.buflist.append('\0'*(self.pos - self.len)) + self.len = self.pos + newpos = self.pos + len(s) + self.db.put(key, s, txn=self.txn, dlen=len(s), doff=self.pos) + self.pos = newpos + + def writelines(self, list): + self.write(string.joinfields(list, '')) + + def flush(self): + if self.closed: + raise ValueError, "I/O operation on closed file" + + +""" +# A little test suite + +def _test(): + import sys + if sys.argv[1:]: + file = sys.argv[1] + else: + file = '/etc/passwd' + lines = open(file, 'r').readlines() + text = open(file, 'r').read() + f = StringIO() + for line in lines[:-2]: + f.write(line) + f.writelines(lines[-2:]) + if f.getvalue() != text: + raise RuntimeError, 'write failed' + length = f.tell() + print 'File length =', length + f.seek(len(lines[0])) + f.write(lines[1]) + f.seek(0) + print 'First line =', `f.readline()` + here = f.tell() + line = f.readline() + print 'Second line =', `line` + f.seek(-len(line), 1) + line2 = f.read(len(line)) + if line != line2: + raise RuntimeError, 'bad result after seek back' + f.seek(len(line2), 1) + list = f.readlines() + line = list[-1] + f.seek(f.tell() - len(line)) + line2 = f.read() + if line != line2: + raise RuntimeError, 'bad result after seek back from EOF' + print 'Read', len(list), 'more lines' + print 'File length =', f.tell() + if f.tell() != length: + raise RuntimeError, 'bad length' + f.close() + +if __name__ == '__main__': + _test() +""" diff --git a/Lib/bsddb/dbshelve.py b/Lib/bsddb/dbshelve.py new file mode 100644 index 0000000..ce4a466 --- /dev/null +++ b/Lib/bsddb/dbshelve.py @@ -0,0 +1,291 @@ +#!/bin/env python +#------------------------------------------------------------------------ +# Copyright (c) 1997-2001 by Total Control Software +# All Rights Reserved +#------------------------------------------------------------------------ +# +# Module Name: dbShelve.py +# +# Description: A reimplementation of the standard shelve.py that +# forces the use of cPickle, and DB. +# +# Creation Date: 11/3/97 3:39:04PM +# +# License: This is free software. You may use this software for any +# purpose including modification/redistribution, so long as +# this header remains intact and that you do not claim any +# rights of ownership or authorship of this software. This +# software has been tested, but no warranty is expressed or +# implied. +# +# 13-Dec-2000: Updated to be used with the new bsddb3 package. +# Added DBShelfCursor class. +# +#------------------------------------------------------------------------ + +""" +Manage shelves of pickled objects using bsddb3 database files for the +storage. +""" + +#------------------------------------------------------------------------ + +import cPickle +from bsddb3 import db + +#------------------------------------------------------------------------ + + +def open(filename, flags=db.DB_CREATE, mode=0660, filetype=db.DB_HASH, + dbenv=None, dbname=None): + """ + A simple factory function for compatibility with the standard + shleve.py module. It can be used like this, where key is a string + and data is a pickleable object: + + from bsddb3 import dbshelve + db = dbshelve.open(filename) + + db[key] = data + + db.close() + """ + if type(flags) == type(''): + sflag = flags + if sflag == 'r': + flags = db.DB_RDONLY + elif sflag == 'rw': + flags = 0 + elif sflag == 'w': + flags = db.DB_CREATE + elif sflag == 'c': + flags = db.DB_CREATE + elif sflag == 'n': + flags = db.DB_TRUNCATE | db.DB_CREATE + else: + raise error, "flags should be one of 'r', 'w', 'c' or 'n' or use the bsddb3.db.DB_* flags" + + d = DBShelf(dbenv) + d.open(filename, dbname, filetype, flags, mode) + return d + +#--------------------------------------------------------------------------- + +class DBShelf: + """ + A shelf to hold pickled objects, built upon a bsddb3 DB object. It + automatically pickles/unpickles data objects going to/from the DB. + """ + def __init__(self, dbenv=None): + self.db = db.DB(dbenv) + self.binary = 1 + + + def __del__(self): + self.close() + + + def __getattr__(self, name): + """Many methods we can just pass through to the DB object. (See below)""" + return getattr(self.db, name) + + + #----------------------------------- + # Dictionary access methods + + def __len__(self): + return len(self.db) + + + def __getitem__(self, key): + data = self.db[key] + return cPickle.loads(data) + + + def __setitem__(self, key, value): + data = cPickle.dumps(value, self.binary) + self.db[key] = data + + + def __delitem__(self, key): + del self.db[key] + + + def keys(self, txn=None): + if txn != None: + return self.db.keys(txn) + else: + return self.db.keys() + + + def items(self, txn=None): + if txn != None: + items = self.db.items(txn) + else: + items = self.db.items() + newitems = [] + + for k, v in items: + newitems.append( (k, cPickle.loads(v)) ) + return newitems + + def values(self, txn=None): + if txn != None: + values = self.db.values(txn) + else: + values = self.db.values() + + return map(cPickle.loads, values) + + #----------------------------------- + # Other methods + + def append(self, value, txn=None): + data = cPickle.dumps(value, self.binary) + return self.db.append(data, txn) + + + def associate(self, secondaryDB, callback, flags=0): + def _shelf_callback(priKey, priData, realCallback=callback): + data = cPickle.loads(priData) + return realCallback(priKey, data) + return self.db.associate(secondaryDB, _shelf_callback, flags) + + + #def get(self, key, default=None, txn=None, flags=0): + def get(self, *args, **kw): + # We do it with *args and **kw so if the default value wasn't + # given nothing is passed to the extension module. That way + # an exception can be raised if set_get_returns_none is turned + # off. + data = apply(self.db.get, args, kw) + try: + return cPickle.loads(data) + except (TypeError, cPickle.UnpicklingError): + return data # we may be getting the default value, or None, + # so it doesn't need unpickled. + + def get_both(self, key, value, txn=None, flags=0): + data = cPickle.dumps(value, self.binary) + data = self.db.get(key, data, txn, flags) + return cPickle.loads(data) + + + def cursor(self, txn=None, flags=0): + c = DBShelfCursor(self.db.cursor(txn, flags)) + c.binary = self.binary + return c + + + def put(self, key, value, txn=None, flags=0): + data = cPickle.dumps(value, self.binary) + return self.db.put(key, data, txn, flags) + + + def join(self, cursorList, flags=0): + raise NotImplementedError + + + #---------------------------------------------- + # Methods allowed to pass-through to self.db + # + # close, delete, fd, get_byteswapped, get_type, has_key, + # key_range, open, remove, rename, stat, sync, + # upgrade, verify, and all set_* methods. + + +#--------------------------------------------------------------------------- + +class DBShelfCursor: + """ + """ + def __init__(self, cursor): + self.dbc = cursor + + def __del__(self): + self.close() + + + def __getattr__(self, name): + """Some methods we can just pass through to the cursor object. (See below)""" + return getattr(self.dbc, name) + + + #---------------------------------------------- + + def dup(self, flags=0): + return DBShelfCursor(self.dbc.dup(flags)) + + + def put(self, key, value, flags=0): + data = cPickle.dumps(value, self.binary) + return self.dbc.put(key, data, flags) + + + def get(self, *args): + count = len(args) # a method overloading hack + method = getattr(self, 'get_%d' % count) + apply(method, args) + + def get_1(self, flags): + rec = self.dbc.get(flags) + return self._extract(rec) + + def get_2(self, key, flags): + rec = self.dbc.get(key, flags) + return self._extract(rec) + + def get_3(self, key, value, flags): + data = cPickle.dumps(value, self.binary) + rec = self.dbc.get(key, flags) + return self._extract(rec) + + + def current(self, flags=0): return self.get_1(flags|db.DB_CURRENT) + def first(self, flags=0): return self.get_1(flags|db.DB_FIRST) + def last(self, flags=0): return self.get_1(flags|db.DB_LAST) + def next(self, flags=0): return self.get_1(flags|db.DB_NEXT) + def prev(self, flags=0): return self.get_1(flags|db.DB_PREV) + def consume(self, flags=0): return self.get_1(flags|db.DB_CONSUME) + def next_dup(self, flags=0): return self.get_1(flags|db.DB_NEXT_DUP) + def next_nodup(self, flags=0): return self.get_1(flags|db.DB_NEXT_NODUP) + def prev_nodup(self, flags=0): return self.get_1(flags|db.DB_PREV_NODUP) + + + def get_both(self, key, value, flags=0): + data = cPickle.dumps(value, self.binary) + rec = self.dbc.get_both(key, flags) + return self._extract(rec) + + + def set(self, key, flags=0): + rec = self.dbc.set(key, flags) + return self._extract(rec) + + def set_range(self, key, flags=0): + rec = self.dbc.set_range(key, flags) + return self._extract(rec) + + def set_recno(self, recno, flags=0): + rec = self.dbc.set_recno(recno, flags) + return self._extract(rec) + + set_both = get_both + + def _extract(self, rec): + if rec is None: + return None + else: + key, data = rec + return key, cPickle.loads(data) + + #---------------------------------------------- + # Methods allowed to pass-through to self.dbc + # + # close, count, delete, get_recno, join_item + + +#--------------------------------------------------------------------------- + + + diff --git a/Lib/bsddb/dbtables.py b/Lib/bsddb/dbtables.py new file mode 100644 index 0000000..4e93451 --- /dev/null +++ b/Lib/bsddb/dbtables.py @@ -0,0 +1,629 @@ +#----------------------------------------------------------------------- +# +# Copyright (C) 2000, 2001 by Autonomous Zone Industries +# +# License: This is free software. You may use this software for any +# purpose including modification/redistribution, so long as +# this header remains intact and that you do not claim any +# rights of ownership or authorship of this software. This +# software has been tested, but no warranty is expressed or +# implied. +# +# -- Gregory P. Smith <greg@electricrain.com> + +# This provides a simple database table interface built on top of +# the Python BerkeleyDB 3 interface. +# +_cvsid = '$Id$' + +import string +import sys +try: + import cPickle + pickle = cPickle +except ImportError: + import pickle +import whrandom +import xdrlib +import re +import copy + +from bsddb3.db import * + + +class TableDBError(StandardError): pass +class TableAlreadyExists(TableDBError): pass + + +class Cond: + """This condition matches everything""" + def __call__(self, s): + return 1 + +class ExactCond(Cond): + """Acts as an exact match condition function""" + def __init__(self, strtomatch): + self.strtomatch = strtomatch + def __call__(self, s): + return s == self.strtomatch + +class PrefixCond(Cond): + """Acts as a condition function for matching a string prefix""" + def __init__(self, prefix): + self.prefix = prefix + def __call__(self, s): + return s[:len(self.prefix)] == self.prefix + +class LikeCond(Cond): + """ + Acts as a function that will match using an SQL 'LIKE' style + string. Case insensitive and % signs are wild cards. + This isn't perfect but it should work for the simple common cases. + """ + def __init__(self, likestr, re_flags=re.IGNORECASE): + # escape python re characters + chars_to_escape = '.*+()[]?' + for char in chars_to_escape : + likestr = string.replace(likestr, char, '\\'+char) + # convert %s to wildcards + self.likestr = string.replace(likestr, '%', '.*') + self.re = re.compile('^'+self.likestr+'$', re_flags) + def __call__(self, s): + return self.re.match(s) + +# +# keys used to store database metadata +# +_table_names_key = '__TABLE_NAMES__' # list of the tables in this db +_columns = '._COLUMNS__' # table_name+this key contains a list of columns +def _columns_key(table) : return table + _columns + +# +# these keys are found within table sub databases +# +_data = '._DATA_.' # this+column+this+rowid key contains table data +_rowid = '._ROWID_.' # this+rowid+this key contains a unique entry for each + # row in the table. (no data is stored) +_rowid_str_len = 8 # length in bytes of the unique rowid strings +def _data_key(table, col, rowid) : return table + _data + col + _data + rowid +def _search_col_data_key(table, col) : return table + _data + col + _data +def _search_all_data_key(table) : return table + _data +def _rowid_key(table, rowid) : return table + _rowid + rowid + _rowid +def _search_rowid_key(table) : return table + _rowid + +def contains_metastrings(s) : + """Verify that the given string does not contain any + metadata strings that might interfere with dbtables database operation. + """ + if string.find(s, _table_names_key) >= 0 or \ + string.find(s, _columns) >= 0 or \ + string.find(s, _data) >= 0 or \ + string.find(s, _rowid) >= 0 : + return 1 + else : + return 0 + + +class bsdTableDB : + def __init__(self, filename, dbhome, create=0, truncate=0, mode=0600, recover=0, dbflags=0) : + """bsdTableDB.open(filename, dbhome, create=0, truncate=0, mode=0600) + Open database name in the dbhome BerkeleyDB directory. + Use keyword arguments when calling this constructor. + """ + myflags = DB_THREAD + if create : + myflags = myflags | DB_CREATE + flagsforenv = DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN | dbflags + if recover : + flagsforenv = flagsforenv | DB_RECOVER + self.env = DBEnv() + self.env.set_lk_detect(DB_LOCK_DEFAULT) # enable auto deadlock avoidance + self.env.open(dbhome, myflags | flagsforenv) + if truncate : + myflags = myflags | DB_TRUNCATE + self.db = DB(self.env) + self.db.set_flags(DB_DUP) # allow duplicate entries [warning: be careful w/ metadata] + self.db.open(filename, DB_BTREE, myflags, mode) + + self.dbfilename = filename + + # Initialize the table names list if this is a new database + if not self.db.has_key(_table_names_key) : + self.db.put(_table_names_key, pickle.dumps([], 1)) + + # TODO verify more of the database's metadata? + + self.__tablecolumns = {} + + def __del__(self): + self.close() + + def close(self): + if self.db is not None: + self.db.close() + self.db = None + if self.env is not None: + self.env.close() + self.env = None + + def checkpoint(self, mins=0): + try: + self.env.txn_checkpoint(mins) + except DBIncompleteError: + pass + + def sync(self): + try: + self.db.sync() + except DBIncompleteError: + pass + + def _db_print(self) : + """Print the database to stdout for debugging""" + print "******** Printing raw database for debugging ********" + cur = self.db.cursor() + try: + key, data = cur.first() + while 1 : + print `{key: data}` + next = cur.next() + if next: + key, data = next + else: + cur.close() + return + except DBNotFoundError: + cur.close() + + + def CreateTable(self, table, columns) : + """CreateTable(table, columns) - Create a new table in the database + raises TableDBError if it already exists or for other DB errors. + """ + assert type(columns) == type([]) + txn = None + try: + # checking sanity of the table and column names here on + # table creation will prevent problems elsewhere. + if contains_metastrings(table) : + raise ValueError, "bad table name: contains reserved metastrings" + for column in columns : + if contains_metastrings(column) : + raise ValueError, "bad column name: contains reserved metastrings" + + columnlist_key = _columns_key(table) + if self.db.has_key(columnlist_key) : + raise TableAlreadyExists, "table already exists" + + txn = self.env.txn_begin() + # store the table's column info + self.db.put(columnlist_key, pickle.dumps(columns, 1), txn=txn) + + # add the table name to the tablelist + tablelist = pickle.loads(self.db.get(_table_names_key, txn=txn, flags=DB_RMW)) + tablelist.append(table) + self.db.delete(_table_names_key, txn) # delete 1st, incase we opened with DB_DUP + self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn) + + txn.commit() + txn = None + + except DBError, dberror: + if txn : + txn.abort() + raise TableDBError, dberror[1] + + + def ListTableColumns(self, table): + """Return a list of columns in the given table. [] if the table doesn't exist. + """ + assert type(table) == type('') + if contains_metastrings(table) : + raise ValueError, "bad table name: contains reserved metastrings" + + columnlist_key = _columns_key(table) + if not self.db.has_key(columnlist_key): + return [] + pickledcolumnlist = self.db.get(columnlist_key) + if pickledcolumnlist: + return pickle.loads(pickledcolumnlist) + else: + return [] + + def ListTables(self): + """Return a list of tables in this database.""" + pickledtablelist = self.db.get(_table_names_key) + if pickledtablelist: + return pickle.loads(pickledtablelist) + else: + return [] + + def CreateOrExtendTable(self, table, columns): + """CreateOrExtendTable(table, columns) - Create a new table in the database. + If a table of this name already exists, extend it to have any + additional columns present in the given list as well as + all of its current columns. + """ + assert type(columns) == type([]) + try: + self.CreateTable(table, columns) + except TableAlreadyExists: + # the table already existed, add any new columns + txn = None + try: + columnlist_key = _columns_key(table) + txn = self.env.txn_begin() + + # load the current column list + oldcolumnlist = pickle.loads(self.db.get(columnlist_key, txn=txn, flags=DB_RMW)) + # create a hash table for fast lookups of column names in the loop below + oldcolumnhash = {} + for c in oldcolumnlist: + oldcolumnhash[c] = c + + # create a new column list containing both the old and new column names + newcolumnlist = copy.copy(oldcolumnlist) + for c in columns: + if not oldcolumnhash.has_key(c): + newcolumnlist.append(c) + + # store the table's new extended column list + if newcolumnlist != oldcolumnlist : + # delete the old one first since we opened with DB_DUP + self.db.delete(columnlist_key, txn) + self.db.put(columnlist_key, pickle.dumps(newcolumnlist, 1), txn=txn) + + txn.commit() + txn = None + + self.__load_column_info(table) + except DBError, dberror: + if txn: + txn.abort() + raise TableDBError, dberror[1] + + + def __load_column_info(self, table) : + """initialize the self.__tablecolumns dict""" + # check the column names + try: + tcolpickles = self.db.get(_columns_key(table)) + except DBNotFoundError: + raise TableDBError, "unknown table: " + `table` + if not tcolpickles: + raise TableDBError, "unknown table: " + `table` + self.__tablecolumns[table] = pickle.loads(tcolpickles) + + def __new_rowid(self, table, txn=None) : + """Create a new unique row identifier""" + unique = 0 + while not unique : + # Generate a random 64-bit row ID string + # (note: this code has <64 bits of randomness + # but it's plenty for our database id needs!) + p = xdrlib.Packer() + p.pack_int(int(whrandom.random()*2147483647)) + p.pack_int(int(whrandom.random()*2147483647)) + newid = p.get_buffer() + + # Guarantee uniqueness by adding this key to the database + try: + self.db.put(_rowid_key(table, newid), None, txn=txn, flags=DB_NOOVERWRITE) + except DBKeyExistsError: + pass + else: + unique = 1 + + return newid + + + def Insert(self, table, rowdict) : + """Insert(table, datadict) - Insert a new row into the table + using the keys+values from rowdict as the column values. + """ + txn = None + try: + if not self.db.has_key(_columns_key(table)) : + raise TableDBError, "unknown table" + + # check the validity of each column name + if not self.__tablecolumns.has_key(table) : + self.__load_column_info(table) + for column in rowdict.keys() : + if not self.__tablecolumns[table].count(column) : + raise TableDBError, "unknown column: "+`column` + + # get a unique row identifier for this row + rowid = self.__new_rowid(table) + + txn = self.env.txn_begin() + + # insert the row values into the table database + for column, dataitem in rowdict.items() : + # store the value + self.db.put(_data_key(table, column, rowid), dataitem, txn=txn) + + txn.commit() + txn = None + + except DBError, dberror: + if txn : + txn.abort() + self.db.delete(_rowid_key(table, rowid)) + raise TableDBError, dberror[1] + + + def Modify(self, table, conditions={}, mappings={}) : + """Modify(table, conditions) - Modify in rows matching 'conditions' + using mapping functions in 'mappings' + * conditions is a dictionary keyed on column names + containing condition functions expecting the data string as an + argument and returning a boolean. + * mappings is a dictionary keyed on column names containint condition + functions expecting the data string as an argument and returning the + new string for that column. + """ + try: + matching_rowids = self.__Select(table, [], conditions) + + # modify only requested columns + columns = mappings.keys() + for rowid in matching_rowids.keys() : + txn = None + try: + for column in columns : + txn = self.env.txn_begin() + # modify the requested column + try: + dataitem = self.db.get(_data_key(table, column, rowid), txn) + self.db.delete(_data_key(table, column, rowid), txn) + except DBNotFoundError: + dataitem = None # XXXXXXX row key somehow didn't exist, assume no error + dataitem = mappings[column](dataitem) + if dataitem <> None: + self.db.put(_data_key(table, column, rowid), dataitem, txn=txn) + txn.commit() + txn = None + + except DBError, dberror: + if txn : + txn.abort() + raise + + except DBError, dberror: + raise TableDBError, dberror[1] + + def Delete(self, table, conditions={}) : + """Delete(table, conditions) - Delete items matching the given + conditions from the table. + * conditions is a dictionary keyed on column names + containing condition functions expecting the data string as an + argument and returning a boolean. + """ + try: + matching_rowids = self.__Select(table, [], conditions) + + # delete row data from all columns + columns = self.__tablecolumns[table] + for rowid in matching_rowids.keys() : + txn = None + try: + txn = self.env.txn_begin() + for column in columns : + # delete the data key + try: + self.db.delete(_data_key(table, column, rowid), txn) + except DBNotFoundError: + pass # XXXXXXX column may not exist, assume no error + + try: + self.db.delete(_rowid_key(table, rowid), txn) + except DBNotFoundError: + pass # XXXXXXX row key somehow didn't exist, assume no error + txn.commit() + txn = None + except DBError, dberror: + if txn : + txn.abort() + raise + + except DBError, dberror: + raise TableDBError, dberror[1] + + + def Select(self, table, columns, conditions={}) : + """Select(table, conditions) - retrieve specific row data + Returns a list of row column->value mapping dictionaries. + * columns is a list of which column data to return. If + columns is None, all columns will be returned. + * conditions is a dictionary keyed on column names + containing callable conditions expecting the data string as an + argument and returning a boolean. + """ + try: + if not self.__tablecolumns.has_key(table) : + self.__load_column_info(table) + if columns is None : + columns = self.__tablecolumns[table] + matching_rowids = self.__Select(table, columns, conditions) + except DBError, dberror: + raise TableDBError, dberror[1] + + # return the matches as a list of dictionaries + return matching_rowids.values() + + + def __Select(self, table, columns, conditions) : + """__Select() - Used to implement Select and Delete (above) + Returns a dictionary keyed on rowids containing dicts + holding the row data for columns listed in the columns param + that match the given conditions. + * conditions is a dictionary keyed on column names + containing callable conditions expecting the data string as an + argument and returning a boolean. + """ + # check the validity of each column name + if not self.__tablecolumns.has_key(table) : + self.__load_column_info(table) + if columns is None : + columns = self.tablecolumns[table] + for column in (columns + conditions.keys()) : + if not self.__tablecolumns[table].count(column) : + raise TableDBError, "unknown column: "+`column` + + # keyed on rows that match so far, containings dicts keyed on + # column names containing the data for that row and column. + matching_rowids = {} + + rejected_rowids = {} # keys are rowids that do not match + + # attempt to sort the conditions in such a way as to minimize full column lookups + def cmp_conditions(atuple, btuple): + a = atuple[1] + b = btuple[1] + if type(a) == type(b) : + if isinstance(a, PrefixCond) and isinstance(b, PrefixCond): + return cmp(len(b.prefix), len(a.prefix)) # longest prefix first + if isinstance(a, LikeCond) and isinstance(b, LikeCond): + return cmp(len(b.likestr), len(a.likestr)) # longest likestr first + return 0 + if isinstance(a, ExactCond): + return -1 + if isinstance(b, ExactCond): + return 1 + if isinstance(a, PrefixCond): + return -1 + if isinstance(b, PrefixCond): + return 1 + # leave all unknown condition callables alone as equals + return 0 + + conditionlist = conditions.items() + conditionlist.sort(cmp_conditions) + + # Apply conditions to column data to find what we want + cur = self.db.cursor() + column_num = -1 + for column, condition in conditionlist : + column_num = column_num + 1 + searchkey = _search_col_data_key(table, column) + # speedup: don't linear search columns within loop + if column in columns : + savethiscolumndata = 1 # save the data for return + else : + savethiscolumndata = 0 # data only used for selection + + try: + key, data = cur.set_range(searchkey) + while key[:len(searchkey)] == searchkey : + # extract the rowid from the key + rowid = key[-_rowid_str_len:] + + if not rejected_rowids.has_key(rowid) : + # if no condition was specified or the condition + # succeeds, add row to our match list. + if not condition or condition(data) : + # only create new entries in matcing_rowids on + # the first pass, otherwise reject the + # rowid as it must not have matched + # the previous passes + if column_num == 0 : + if not matching_rowids.has_key(rowid) : + matching_rowids[rowid] = {} + if savethiscolumndata : + matching_rowids[rowid][column] = data + else : + rejected_rowids[rowid] = rowid + else : + if matching_rowids.has_key(rowid) : + del matching_rowids[rowid] + rejected_rowids[rowid] = rowid + + key, data = cur.next() + + except DBError, dberror: + if dberror[0] != DB_NOTFOUND : + raise + continue + + cur.close() + + # we're done selecting rows, garbage collect the reject list + del rejected_rowids + + # extract any remaining desired column data from the + # database for the matching rows. + if len(columns) > 0 : + for rowid, rowdata in matching_rowids.items() : + for column in columns : + if rowdata.has_key(column) : + continue + try: + rowdata[column] = self.db.get(_data_key(table, column, rowid)) + except DBError, dberror: + if dberror[0] != DB_NOTFOUND : + raise + rowdata[column] = None + + # return the matches + return matching_rowids + + + def Drop(self, table) : + """Remove an entire table from the database + """ + txn = None + try: + txn = self.env.txn_begin() + + # delete the column list + self.db.delete(_columns_key(table), txn) + + cur = self.db.cursor(txn) + + # delete all keys containing this tables column and row info + table_key = _search_all_data_key(table) + while 1 : + try: + key, data = cur.set_range(table_key) + except DBNotFoundError: + break + # only delete items in this table + if key[:len(table_key)] != table_key : + break + cur.delete() + + # delete all rowids used by this table + table_key = _search_rowid_key(table) + while 1 : + try: + key, data = cur.set_range(table_key) + except DBNotFoundError: + break + # only delete items in this table + if key[:len(table_key)] != table_key : + break + cur.delete() + + cur.close() + + # delete the tablename from the table name list + tablelist = pickle.loads(self.db.get(_table_names_key, txn=txn, flags=DB_RMW)) + try: + tablelist.remove(table) + except ValueError: + pass # hmm, it wasn't there, oh well, that's what we want. + self.db.delete(_table_names_key, txn) # delete 1st, incase we opened with DB_DUP + self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn) + + txn.commit() + txn = None + + if self.__tablecolumns.has_key(table) : + del self.__tablecolumns[table] + + except DBError, dberror: + if txn : + txn.abort() + raise TableDBError, dberror[1] + diff --git a/Lib/bsddb/dbutils.py b/Lib/bsddb/dbutils.py new file mode 100644 index 0000000..fe08407 --- /dev/null +++ b/Lib/bsddb/dbutils.py @@ -0,0 +1,69 @@ +#------------------------------------------------------------------------ +# +# In my performance tests, using this (as in dbtest.py test4) is +# slightly slower than simply compiling _db.c with MYDB_THREAD +# undefined to prevent multithreading support in the C module. +# Using NoDeadlockDb also prevent deadlocks from mutliple processes +# accessing the same database. +# +# Copyright (C) 2000 Autonomous Zone Industries +# +# License: This is free software. You may use this software for any +# purpose including modification/redistribution, so long as +# this header remains intact and that you do not claim any +# rights of ownership or authorship of this software. This +# software has been tested, but no warranty is expressed or +# implied. +# +# Author: Gregory P. Smith <greg@electricrain.com> +# +# Note: I don't know how useful this is in reality since when a +# DBDeadlockError happens the current transaction is supposed to be +# aborted. If it doesn't then when the operation is attempted again +# the deadlock is still happening... +# --Robin +# +#------------------------------------------------------------------------ + + +# +# import the time.sleep function in a namespace safe way to allow +# "from bsddb3.db import *" +# +from time import sleep +_sleep = sleep +del sleep + +import _db + +_deadlock_MinSleepTime = 1.0/64 # always sleep at least N seconds between retrys +_deadlock_MaxSleepTime = 1.0 # never sleep more than N seconds between retrys + + +def DeadlockWrap(function, *_args, **_kwargs): + """DeadlockWrap(function, *_args, **_kwargs) - automatically retries + function in case of a database deadlock. + + This is a DeadlockWrapper method which DB calls can be made using to + preform infinite retrys with sleeps in between when a DBLockDeadlockError + exception is raised in a database call: + + d = DB(...) + d.open(...) + DeadlockWrap(d.put, "foo", data="bar") # set key "foo" to "bar" + """ + sleeptime = _deadlock_MinSleepTime + while (1) : + try: + return apply(function, _args, _kwargs) + except _db.DBLockDeadlockError: + print 'DeadlockWrap sleeping ', sleeptime + _sleep(sleeptime) + # exponential backoff in the sleep time + sleeptime = sleeptime * 2 + if sleeptime > _deadlock_MaxSleepTime : + sleeptime = _deadlock_MaxSleepTime + + +#------------------------------------------------------------------------ + |