summaryrefslogtreecommitdiffstats
path: root/Demo/stdwin/ibrowse/ifile.py
blob: 584fb3493bc7ac297d64c930131b30ae290d1ccd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
# Tools for info file processing.

# XXX Need to be more careful with reading ahead searching for nodes.


import regexp
import string


# Exported exceptions.
#
NoSuchFile = 'no such file'
NoSuchNode = 'no such node'


# The search path for info files; this is site-specific.
# Directory names should end in a partname delimiter,
# so they can simply be concatenated to a relative pathname.
#
#INFOPATH = ['', ':Info.Ibrowse:', ':Info:']	# Mac
INFOPATH = ['', '/usr/local/emacs/info/']	# X11 on UNIX


# Tunable constants.
#
BLOCKSIZE = 512			# Qty to align reads to, if possible
FUZZ = 2*BLOCKSIZE		# Qty to back-up before searching for a node
CHUNKSIZE = 4*BLOCKSIZE		# Qty to read at once when reading lots of data


# Regular expressions used.
# Note that it is essential that Python leaves unrecognized backslash
# escapes in a string so they can be seen by regexp.compile!
#
findheader = regexp.compile('\037\014?\n(.*\n)').match
findescape = regexp.compile('\037').match
parseheader = regexp.compile('[nN]ode:[ \t]*([^\t,\n]*)').match
findfirstline = regexp.compile('^.*\n').match
findnode = regexp.compile('[nN]ode:[ \t]*([^\t,\n]*)').match
findprev = regexp.compile('[pP]rev[ious]*:[ \t]*([^\t,\n]*)').match
findnext = regexp.compile('[nN]ext:[ \t]*([^\t,\n]*)').match
findup = regexp.compile('[uU]p:[ \t]*([^\t,\n]*)').match
findmenu = regexp.compile('^\* [mM]enu:').match
findmenuitem = regexp.compile( \
	'^\* ([^:]+):[ \t]*(:|\([^\t]*\)[^\t,\n.]*|[^:(][^\t,\n.]*)').match
findfootnote = regexp.compile( \
	'\*[nN]ote ([^:]+):[ \t]*(:|[^:][^\t,\n.]*)').match
parsenoderef = regexp.compile('^\((.*)\)(.*)$').match


# Get a node and all information pertaining to it.
# This doesn't work if there is an indirect tag table,
# and in general you are better off using icache.get_node() instead.
# Functions get_whole_file() and get_file_node() provide part
# functionality used by icache.
# Raise NoSuchFile or NoSuchNode as appropriate.
#
def get_node(curfile, ref):
	file, node = parse_ref(curfile, ref)
	if node == '*':
		return get_whole_file(file)
	else:
		return get_file_node(file, 0, node)
#
def get_whole_file(file):
	f = try_open(file) # May raise NoSuchFile
	text = f.read()
	header, menu, footnotes = ('', '', ''), [], []
	return file, '*', header, menu, footnotes, text
#
def get_file_node(file, offset, node):
	f = try_open(file) # May raise NoSuchFile
	text = find_node(f, offset, node) # May raise NoSuchNode
	node, header, menu, footnotes = analyze_node(text)
	return file, node, header, menu, footnotes, text


# Parse a node reference into a file (possibly default) and node name.
# Possible reference formats are: "NODE", "(FILE)", "(FILE)NODE".
# Default file is the curfile argument; default node is Top.
# A node value of '*' is a special case: the whole file should
# be interpreted (by the caller!) as a single node.
#
def parse_ref(curfile, ref):
	match = parsenoderef(ref)
	if not match:
		file, node = curfile, ref
	else:
		(a, b), (a1, b1), (a2, b2) = match
		file, node = ref[a1:b1], ref[a2:b2]
	if not file:
		file = curfile # (Is this necessary?)
	if not node:
		node = 'Top'
	return file, node


# Extract node name, links, menu and footnotes from the node text.
#
def analyze_node(text):
	#
	# Get node name and links from the header line
	#
	match = findfirstline(text)
	if match:
		(a, b) = match[0]
		line = text[a:b]
	else:
		line = ''
	node = get_it(text, findnode)
	prev = get_it(text, findprev)
	next = get_it(text, findnext)
	up = get_it(text, findup)
	#
	# Get the menu items, if there is a menu
	#
	menu = []
	match = findmenu(text)
	if match:
		(a, b) = match[0]
		while 1:
			match = findmenuitem(text, b)
			if not match:
				break
			(a, b), (a1, b1), (a2, b2) = match
			topic, ref = text[a1:b1], text[a2:b2]
			if ref == ':':
				ref = topic
			menu.append(topic, ref)
	#
	# Get the footnotes
	#
	footnotes = []
	b = 0
	while 1:
		match = findfootnote(text, b)
		if not match:
			break
		(a, b), (a1, b1), (a2, b2) = match
		topic, ref = text[a1:b1], text[a2:b2]
		if ref == ':':
			ref = topic
		footnotes.append(topic, ref)
	#
	return node, (prev, next, up), menu, footnotes
#
def get_it(line, matcher):
	match = matcher(line)
	if not match:
		return ''
	else:
		(a, b), (a1, b1) = match
		return line[a1:b1]


# Find a node in an open file.
# The offset (from the tags table) is a hint about the node's position.
# Pass zero if there is no tags table.
# Raise NoSuchNode if the node isn't found.
# NB: This seeks around in the file.
#
def find_node(f, offset, node):
	node = string.lower(node) # Just to be sure
	#
	# Position a little before the given offset,
	# so we may find the node even if it has moved around
	# in the file a little.
	#
	offset = max(0, ((offset-FUZZ) / BLOCKSIZE) * BLOCKSIZE)
	f.seek(offset)
	#
	# Loop, hunting for a matching node header.
	#
	while 1:
		buf = f.read(CHUNKSIZE)
		if not buf:
			break
		i = 0
		while 1:
			match = findheader(buf, i)
			if match:
				(a,b), (a1,b1) = match
				start = a1
				line = buf[a1:b1]
				i = b
				match = parseheader(line)
				if match:
					(a,b), (a1,b1) = match
					key = string.lower(line[a1:b1])
					if key == node:
						# Got it!  Now read the rest.
						return read_node(f, buf[start:])
			elif findescape(buf, i):
				next = f.read(CHUNKSIZE)
				if not next:
					break
				buf = buf + next
			else:
				break
	#
	# If we get here, we didn't find it.  Too bad.
	#
	raise NoSuchNode, node


# Finish off getting a node (subroutine for find_node()).
# The node begins at the start of buf and may end in buf;
# if it doesn't end there, read additional data from f.
#
def read_node(f, buf):
	i = 0
	match = findescape(buf, i)
	while not match:
		next = f.read(CHUNKSIZE)
		if not next:
			end = len(buf)
			break
		i = len(buf)
		buf = buf + next
		match = findescape(buf, i)
	else:
		# Got a match
		(a, b) = match[0]
		end = a
	# Strip trailing newlines
	while end > 0 and buf[end-1] == '\n':
		end = end-1
	buf = buf[:end]
	return buf


# Read reverse starting at offset until the beginning of a node is found.
# Then return a buffer containing the beginning of the node,
# with f positioned just after the buffer.
# The buffer will contain at least the full header line of the node;
# the caller should finish off with read_node() if it is the right node.
# (It is also possible that the buffer extends beyond the node!)
# Return an empty string if there is no node before the given offset.
#
def backup_node(f, offset):
	start = max(0, ((offset-CHUNKSIZE) / BLOCKSIZE) * BLOCKSIZE)
	end = offset
	while start < end:
		f.seek(start)
		buf = f.read(end-start)
		i = 0
		hit = -1
		while 1:
			match = findheader(buf, i)
			if match:
				(a,b), (a1,b1) = match
				hit = a1
				i = b
			elif end < offset and findescape(buf, i):
				next = f.read(min(offset-end, BLOCKSIZE))
				if not next:
					break
				buf = buf + next
				end = end + len(next)
			else:
				break
		if hit >= 0:
			return buf[hit:]
		end = start
		start = max(0, end - CHUNKSIZE)
	return ''


# Make a tag table for the given file by scanning the file.
# The file must be open for reading, and positioned at the beginning
# (or wherever the hunt for tags must begin; it is read till the end).
#
def make_tags(f):
	tags = {}
	while 1:
		offset = f.tell()
		buf = f.read(CHUNKSIZE)
		if not buf:
			break
		i = 0
		while 1:
			match = findheader(buf, i)
			if match:
				(a,b), (a1,b1) = match
				start = offset+a1
				line = buf[a1:b1]
				i = b
				match = parseheader(line)
				if match:
					(a,b), (a1,b1) = match
					key = string.lower(line[a1:b1])
					if tags.has_key(key):
						print 'Duplicate node:',
						print key
					tags[key] = '', start, line
			elif findescape(buf, i):
				next = f.read(CHUNKSIZE)
				if not next:
					break
				buf = buf + next
			else:
				break
	return tags


# Try to open a file, return a file object if succeeds.
# Raise NoSuchFile if the file can't be opened.
# Should treat absolute pathnames special.
#
def try_open(file):
	for dir in INFOPATH:
		try:
			return open(dir + file, 'r')
		except RuntimeError:
			pass
	raise NoSuchFile, file


# A little test for the speed of make_tags().
#
TESTFILE = 'texinfo-1'
def test_make_tags():
	import time
	f = try_open(TESTFILE)
	t1 = time.millitimer()
	tags = make_tags(f)
	t2 = time.millitimer()
	print 'Making tag table for', `TESTFILE`, 'took', t2-t1, 'msec.'