diff --git a/plugins/Http.py b/plugins/Http.py
index f44f329ae..21c3c3577 100644
--- a/plugins/Http.py
+++ b/plugins/Http.py
@@ -50,6 +50,7 @@ import conf
import utils
import webutils
import privmsgs
+import registry
import callbacks
class FreshmeatException(Exception):
@@ -57,7 +58,6 @@ class FreshmeatException(Exception):
class Http(callbacks.Privmsg):
threaded = True
- maxSize = 4096
_titleRe = re.compile(r'
(.*?)', re.I | re.S)
def callCommand(self, method, irc, msg, *L):
try:
@@ -90,7 +90,7 @@ class Http(callbacks.Privmsg):
if not url.startswith('http://'):
irc.error('Only HTTP urls are valid.')
return
- s = webutils.getUrl(url, size=self.maxSize)
+ s = webutils.getUrl(url, size=conf.supybot.httpPeekSize())
m = self._doctypeRe.search(s)
if m:
s = utils.normalizeWhitespace(m.group(0))
@@ -113,13 +113,13 @@ class Http(callbacks.Privmsg):
size = fd.headers['Content-Length']
irc.reply('%s is %s bytes long.' % (url, size))
except KeyError:
- s = fd.read(self.maxSize)
- if len(s) != self.maxSize:
+ s = fd.read(conf.supybot.httpPeekSize())
+ if len(s) != conf.supybot.httpPeekSize():
irc.reply('%s is %s bytes long.' % (url, len(s)))
else:
irc.reply('The server didn\'t tell me how long %s is '
- 'but it\'s longer than %s bytes.' %
- (url,self.maxSize))
+ 'but it\'s longer than %s bytes.' %
+ (url,conf.supybot.httpPeekSize()))
def title(self, irc, msg, args):
"""
@@ -129,13 +129,13 @@ class Http(callbacks.Privmsg):
url = privmsgs.getArgs(args)
if '://' not in url:
url = 'http://%s' % url
- text = webutils.getUrl(url, size=self.maxSize)
+ text = webutils.getUrl(url, size=conf.supybot.httpPeekSize())
m = self._titleRe.search(text)
if m is not None:
irc.reply(utils.htmlToText(m.group(1).strip()))
else:
irc.reply('That URL appears to have no HTML title '
- 'within the first %s bytes.' % self.maxSize)
+ 'within the first %s bytes.'%conf.supybot.httpPeekSize())
def freshmeat(self, irc, msg, args):
"""
diff --git a/plugins/URL.py b/plugins/URL.py
index dd2fc9592..5dbd95e6c 100644
--- a/plugins/URL.py
+++ b/plugins/URL.py
@@ -102,7 +102,6 @@ class URL(callbacks.PrivmsgCommandAndRegexp,
plugins.ChannelDBHandler):
regexps = ['tinyurlSnarfer', 'titleSnarfer']
_titleRe = re.compile('(.*?)', re.I)
- maxSize = 4096
def __init__(self):
self.nextMsgs = {}
callbacks.PrivmsgCommandAndRegexp.__init__(self)
@@ -200,7 +199,7 @@ class URL(callbacks.PrivmsgCommandAndRegexp,
channel = msg.args[0]
if self.registryValue('titleSnarfer', channel):
url = match.group(0)
- text = webutils.getUrl(url, size=self.maxSize)
+ text = webutils.getUrl(url, size=conf.supybot.httpPeekSize())
m = self._titleRe.search(text)
if m is not None:
s = 'Title: %s' % utils.htmlToText(m.group(1).strip())
diff --git a/src/conf.py b/src/conf.py
index b71f3716a..af82d39d4 100644
--- a/src/conf.py
+++ b/src/conf.py
@@ -168,6 +168,11 @@ whether the bot will automatically thread all commands. At this point this
option exists almost exclusively for debugging purposes; it can do very little
except to take up more CPU."""))
+supybot.register('httpPeekSize', registry.PositiveInteger(4096, """Determines
+how many bytes the bot will 'peek' at when looking through a URL for a
+doctype or title or something similar. It'll give up after it reads this many
+bytes."""))
+
###
# Reply/error tweaking.
###