summaryrefslogtreecommitdiffstats
path: root/Tools/scripts/logmerge.py
blob: 5999c2eb534374bc4b4c8fd769c7ed5e3d52a072 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
#! /usr/bin/env python

"""Consolidate a bunch of CVS or RCS logs read from stdin.

Input should be the output of a CVS or RCS logging command, e.g.

    cvs log -rrelease14

which dumps all log messages from release1.4 upwards (assuming that
release 1.4 was tagged with tag 'release14').

This collects all the revision records and outputs them sorted by date
rather than by file, collapsing duplicate revision record, i.e.,
records with the same message for different files.

The -t option causes it to truncate (discard) the last revision log
entry; this is useful when using something like the above cvs log
command, which shows the revisions including the given tag, while you
probably want everything *since* that tag.

XXX This code was created by reverse engineering CVS 1.9 and RCS 5.7.

"""

import os, sys, getopt, string, re

sep1 = '='*77 + '\n'			# file separator
sep2 = '-'*28 + '\n'			# revision separator

def main():
    """Main program"""
    truncate_last = 0
    opts, args = getopt.getopt(sys.argv[1:], "-t")
    for o, a in opts:
	if o == '-t':
	    truncate_last = 1
    database = []
    while 1:
	chunk = read_chunk(sys.stdin)
	if not chunk:
	    break
	records = digest_chunk(chunk)
	if truncate_last:
	    del records[-1]
	database[len(database):] = records
    database.sort()
    database.reverse()
    format_output(database)

def read_chunk(fp):
    """Read a chunk -- data for one file, ending with sep1.

    Split the chunk in parts separated by sep2.

    """
    chunk = []
    lines = []
    while 1:
	line = fp.readline()
	if not line:
	    break
	if line == sep1:
	    if lines:
		chunk.append(lines)
	    break
	if line == sep2:
	    if lines:
		chunk.append(lines)
		lines = []
	else:
	    lines.append(line)
    return chunk

def digest_chunk(chunk):
    """Digest a chunk -- extrach working file name and revisions"""
    lines = chunk[0]
    key = 'Working file:'
    keylen = len(key)
    for line in lines:
	if line[:keylen] == key:
	    working_file = string.strip(line[keylen:])
	    break
    else:
	working_file = None
    records = []
    for lines in chunk[1:]:
	revline = lines[0]
	dateline = lines[1]
	text = lines[2:]
	words = string.split(dateline)
	if len(words) >= 3 and words[0] == 'date:':
	    dateword = words[1]
	    timeword = words[2]
	    if timeword[-1:] == ';':
		timeword = timeword[:-1]
	    date = dateword + ' ' + timeword
	else:
	    date = None
	    text.insert(0, revline)
	words = string.split(revline)
	if len(words) >= 2 and words[0] == 'revision':
	    rev = words[1]
	else:
	    rev = None
	    text.insert(0, revline)
	records.append((date, working_file, rev, text))
    return records
	
def format_output(database):
    prevtext = None
    prev = []
    database.append((None, None, None, None)) # Sentinel
    for (date, working_file, rev, text) in database:
	if text != prevtext:
	    if prev:
		print sep2,
		for (date, working_file, rev) in prev:
		    print date, working_file
		sys.stdout.writelines(prevtext)
	    prev = []
	prev.append((date, working_file, rev))
	prevtext = text

main()