summaryrefslogtreecommitdiffstats
path: root/Lib/dumbdbm.py
blob: e35673387930c0b4ff1afc022257b9406025ac93 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
"""A dumb and slow but simple dbm clone.

For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).

XXX TO DO:

- seems to contain a bug when updating...

- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)

- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)

- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)

- support opening for read-only (flag = 'm')

"""

_os = __import__('os')
import __builtin__

_open = __builtin__.open

_BLOCKSIZE = 512

class _Database:

	def __init__(self, file):
		self._dirfile = file + '.dir'
		self._datfile = file + '.dat'
		self._bakfile = file + '.bak'
		# Mod by Jack: create data file if needed
		try:
			f = _open(self._datfile, 'r')
		except IOError:
			f = _open(self._datfile, 'w')
		f.close()
		self._update()
	
	def _update(self):
		self._index = {}
		try:
			f = _open(self._dirfile)
		except IOError:
			pass
		else:
			while 1:
				line = f.readline()
				if not line: break
				key, (pos, siz) = eval(line)
				self._index[key] = (pos, siz)
			f.close()

	def _commit(self):
		try: _os.unlink(self._bakfile)
		except _os.error: pass
		try: _os.rename(self._dirfile, self._bakfile)
		except _os.error: pass
		f = _open(self._dirfile, 'w')
		for key, (pos, siz) in self._index.items():
			f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
		f.close()
	
	def __getitem__(self, key):
		pos, siz = self._index[key]	# may raise KeyError
		f = _open(self._datfile, 'rb')
		f.seek(pos)
		dat = f.read(siz)
		f.close()
		return dat
	
	def _addval(self, val):
		f = _open(self._datfile, 'rb+')
		f.seek(0, 2)
		pos = f.tell()
## Does not work under MW compiler
##		pos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
##		f.seek(pos)
		npos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
		f.write('\0'*(npos-pos))
		pos = npos
		
		f.write(val)
		f.close()
		return (pos, len(val))
	
	def _setval(self, pos, val):
		f = _open(self._datfile, 'rb+')
		f.seek(pos)
		f.write(val)
		f.close()
		return (pos, len(val))
	
	def _addkey(self, key, (pos, siz)):
		self._index[key] = (pos, siz)
		f = _open(self._dirfile, 'a')
		f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
		f.close()
	
	def __setitem__(self, key, val):
		if not type(key) == type('') == type(val):
			raise TypeError, "keys and values must be strings"
		if not self._index.has_key(key):
			(pos, siz) = self._addval(val)
			self._addkey(key, (pos, siz))
		else:
			pos, siz = self._index[key]
			oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE
			newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE
			if newblocks <= oldblocks:
				pos, siz = self._setval(pos, val)
				self._index[key] = pos, siz
			else:
				pos, siz = self._addval(val)
				self._index[key] = pos, siz
			self._addkey(key, (pos, siz))
	
	def __delitem__(self, key):
		del self._index[key]
		self._commit()
	
	def keys(self):
		return self._index.keys()
	
	def has_key(self, key):
		return self._index.has_key(key)
	
	def __len__(self):
		return len(self._index)
	
	def close(self):
		self._index = None
		self._datfile = self._dirfile = self._bakfile = None


def open(file, flag = None, mode = None):
	# flag, mode arguments are currently ignored
	return _Database(file)