summaryrefslogtreecommitdiffstats
path: root/Lib/robotparser.py
diff options
context:
space:
mode:
authorRaymond Hettinger <python@rcn.com>2014-05-13 05:18:50 (GMT)
committerRaymond Hettinger <python@rcn.com>2014-05-13 05:18:50 (GMT)
commita5413c499702a74fdc50e4bc8e7e6a480856a1f9 (patch)
tree079266511a220614fb6b33699dc27e5102695ae1 /Lib/robotparser.py
parentc5945966aee2fb3ddd96d7521b245cdb9968afcb (diff)
downloadcpython-a5413c499702a74fdc50e4bc8e7e6a480856a1f9.zip
cpython-a5413c499702a74fdc50e4bc8e7e6a480856a1f9.tar.gz
cpython-a5413c499702a74fdc50e4bc8e7e6a480856a1f9.tar.bz2
Issue 21469: Mitigate risk of false positives with robotparser.
* Repair the broken link to norobots-rfc.txt. * HTTP response codes >= 500 treated as a failed read rather than as a not found. Not found means that we can assume the entire site is allowed. A 5xx server error tells us nothing. * A successful read() or parse() updates the mtime (which is defined to be "the time the robots.txt file was last fetched"). * The can_fetch() method returns False unless we've had a read() with a 2xx or 4xx response. This avoids false positives in the case where a user calls can_fetch() before calling read(). * I don't see any easy way to test this patch without hitting internet resources that might change or without use of mock objects that wouldn't provide must reassurance.
Diffstat (limited to 'Lib/robotparser.py')
-rw-r--r--Lib/robotparser.py14
1 files changed, 12 insertions, 2 deletions
diff --git a/Lib/robotparser.py b/Lib/robotparser.py
index ad3be94..b46b753 100644
--- a/Lib/robotparser.py
+++ b/Lib/robotparser.py
@@ -7,7 +7,8 @@
2) PSF license for Python 2.2
The robots.txt Exclusion Protocol is implemented as specified in
- http://info.webcrawler.com/mak/projects/robots/norobots-rfc.html
+ http://www.robotstxt.org/norobots-rfc.txt
+
"""
import urlparse
import urllib
@@ -60,7 +61,7 @@ class RobotFileParser:
self.errcode = opener.errcode
if self.errcode in (401, 403):
self.disallow_all = True
- elif self.errcode >= 400:
+ elif self.errcode >= 400 and self.errcode < 500:
self.allow_all = True
elif self.errcode == 200 and lines:
self.parse(lines)
@@ -86,6 +87,7 @@ class RobotFileParser:
linenumber = 0
entry = Entry()
+ self.modified()
for line in lines:
linenumber += 1
if not line:
@@ -131,6 +133,14 @@ class RobotFileParser:
return False
if self.allow_all:
return True
+
+ # Until the robots.txt file has been read or found not
+ # to exist, we must assume that no url is allowable.
+ # This prevents false positives when a user erronenously
+ # calls can_fetch() before calling read().
+ if not self.last_checked:
+ return False
+
# search for given user agent matches
# the first match counts
parsed_url = urlparse.urlparse(urllib.unquote(url))