2005-02-01 10:41:54 +01:00
|
|
|
###
|
|
|
|
# Copyright (c) 2005, Jeremiah Fincher
|
2012-09-01 16:16:48 +02:00
|
|
|
# Copyright (c) 2009, James McCoy
|
2021-10-17 09:54:06 +02:00
|
|
|
# Copyright (c) 2010-2021, Valentin Lorentz
|
2005-02-01 10:41:54 +01:00
|
|
|
# All rights reserved.
|
|
|
|
#
|
|
|
|
# Redistribution and use in source and binary forms, with or without
|
|
|
|
# modification, are permitted provided that the following conditions are met:
|
|
|
|
#
|
|
|
|
# * Redistributions of source code must retain the above copyright notice,
|
|
|
|
# this list of conditions, and the following disclaimer.
|
|
|
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
# this list of conditions, and the following disclaimer in the
|
|
|
|
# documentation and/or other materials provided with the distribution.
|
|
|
|
# * Neither the name of the author of this software nor the name of
|
|
|
|
# contributors to this software may be used to endorse or promote products
|
|
|
|
# derived from this software without specific prior written consent.
|
|
|
|
#
|
|
|
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
# POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
###
|
|
|
|
|
|
|
|
import re
|
2012-09-22 19:34:33 +02:00
|
|
|
import sys
|
2018-03-02 01:26:00 +01:00
|
|
|
import string
|
2013-12-25 16:43:41 +01:00
|
|
|
import socket
|
2005-04-30 14:53:42 +02:00
|
|
|
|
2005-02-01 10:41:54 +01:00
|
|
|
import supybot.conf as conf
|
|
|
|
import supybot.utils as utils
|
|
|
|
from supybot.commands import *
|
2015-08-11 16:50:23 +02:00
|
|
|
import supybot.utils.minisix as minisix
|
2005-02-01 10:41:54 +01:00
|
|
|
import supybot.plugins as plugins
|
2013-08-09 12:15:54 +02:00
|
|
|
import supybot.commands as commands
|
2005-02-01 10:41:54 +01:00
|
|
|
import supybot.ircutils as ircutils
|
|
|
|
import supybot.callbacks as callbacks
|
2010-10-20 09:39:44 +02:00
|
|
|
from supybot.i18n import PluginInternationalization, internationalizeDocstring
|
|
|
|
_ = PluginInternationalization('Web')
|
2005-02-01 10:41:54 +01:00
|
|
|
|
2015-08-10 17:55:25 +02:00
|
|
|
if minisix.PY3:
|
2015-08-10 18:16:02 +02:00
|
|
|
from html.parser import HTMLParser
|
2015-08-10 17:55:25 +02:00
|
|
|
from html.entities import entitydefs
|
2016-11-11 12:12:40 +01:00
|
|
|
import http.client as http_client
|
2015-08-10 17:55:25 +02:00
|
|
|
else:
|
2015-08-10 18:16:02 +02:00
|
|
|
from HTMLParser import HTMLParser
|
2015-08-10 17:55:25 +02:00
|
|
|
from htmlentitydefs import entitydefs
|
2016-11-11 12:12:40 +01:00
|
|
|
import httplib as http_client
|
2015-08-10 17:55:25 +02:00
|
|
|
|
2015-10-23 07:41:36 +02:00
|
|
|
class Title(utils.web.HtmlToText):
|
2015-08-10 17:55:25 +02:00
|
|
|
entitydefs = entitydefs.copy()
|
2005-07-19 15:55:37 +02:00
|
|
|
entitydefs['nbsp'] = ' '
|
|
|
|
def __init__(self):
|
2005-04-30 14:53:42 +02:00
|
|
|
self.inTitle = False
|
2015-08-29 21:08:35 +02:00
|
|
|
self.inSvg = False
|
2015-10-23 07:41:36 +02:00
|
|
|
utils.web.HtmlToText.__init__(self)
|
2005-04-30 14:53:42 +02:00
|
|
|
|
2015-08-29 21:08:35 +02:00
|
|
|
@property
|
|
|
|
def inHtmlTitle(self):
|
|
|
|
return self.inTitle and not self.inSvg
|
|
|
|
|
2006-09-13 21:40:51 +02:00
|
|
|
def handle_starttag(self, tag, attrs):
|
|
|
|
if tag == 'title':
|
|
|
|
self.inTitle = True
|
2015-08-29 21:08:35 +02:00
|
|
|
elif tag == 'svg':
|
|
|
|
self.inSvg = True
|
2005-04-30 14:53:42 +02:00
|
|
|
|
2006-09-13 21:40:51 +02:00
|
|
|
def handle_endtag(self, tag):
|
|
|
|
if tag == 'title':
|
|
|
|
self.inTitle = False
|
2015-08-29 21:08:35 +02:00
|
|
|
elif tag == 'svg':
|
|
|
|
self.inSvg = False
|
2005-07-19 15:55:37 +02:00
|
|
|
|
2015-10-23 07:41:36 +02:00
|
|
|
def append(self, data):
|
2015-08-29 21:08:35 +02:00
|
|
|
if self.inHtmlTitle:
|
2015-10-23 07:41:36 +02:00
|
|
|
super(Title, self).append(data)
|
2014-12-11 09:59:08 +01:00
|
|
|
|
2013-08-09 12:15:54 +02:00
|
|
|
class DelayedIrc:
|
2013-08-20 11:37:39 +02:00
|
|
|
def __init__(self, irc):
|
2013-08-09 12:15:54 +02:00
|
|
|
self._irc = irc
|
|
|
|
self._replies = []
|
|
|
|
def reply(self, *args, **kwargs):
|
2013-08-20 11:37:39 +02:00
|
|
|
self._replies.append(('reply', args, kwargs))
|
2013-08-09 12:15:54 +02:00
|
|
|
def error(self, *args, **kwargs):
|
2013-08-20 11:37:39 +02:00
|
|
|
self._replies.append(('error', args, kwargs))
|
2013-08-09 12:15:54 +02:00
|
|
|
def __getattr__(self, name):
|
|
|
|
assert name not in ('reply', 'error', '_irc', '_msg', '_replies')
|
|
|
|
return getattr(self._irc, name)
|
|
|
|
|
2016-11-11 12:12:40 +01:00
|
|
|
if hasattr(http_client, '_MAXHEADERS'):
|
|
|
|
def fetch_sandbox(f):
|
|
|
|
"""Runs a command in a forked process with limited memory resources
|
|
|
|
to prevent memory bomb caused by specially crafted http responses.
|
|
|
|
|
|
|
|
On CPython versions with support for limiting the number of headers,
|
|
|
|
this is the identity function"""
|
|
|
|
return f
|
|
|
|
else:
|
|
|
|
# For the following CPython versions (as well as the matching Pypy
|
|
|
|
# versions):
|
|
|
|
# * 2.6 before 2.6.9
|
|
|
|
# * 2.7 before 2.7.9
|
|
|
|
# * 3.2 before 3.2.6
|
|
|
|
# * 3.3 before 3.3.3
|
|
|
|
def fetch_sandbox(f):
|
|
|
|
"""Runs a command in a forked process with limited memory resources
|
|
|
|
to prevent memory bomb caused by specially crafted http responses."""
|
|
|
|
def process(self, irc, msg, *args, **kwargs):
|
|
|
|
delayed_irc = DelayedIrc(irc)
|
|
|
|
f(self, delayed_irc, msg, *args, **kwargs)
|
|
|
|
return delayed_irc._replies
|
|
|
|
def newf(self, irc, *args):
|
|
|
|
try:
|
|
|
|
replies = commands.process(process, self, irc, *args,
|
|
|
|
timeout=10, heap_size=10*1024*1024,
|
|
|
|
pn=self.name(), cn=f.__name__)
|
|
|
|
except (commands.ProcessTimeoutError, MemoryError):
|
|
|
|
raise utils.web.Error(_('Page is too big or the server took '
|
|
|
|
'too much time to answer the request.'))
|
|
|
|
else:
|
|
|
|
for (method, args, kwargs) in replies:
|
|
|
|
getattr(irc, method)(*args, **kwargs)
|
|
|
|
newf.__doc__ = f.__doc__
|
|
|
|
return newf
|
2013-08-09 12:15:54 +02:00
|
|
|
|
|
|
|
def catch_web_errors(f):
|
|
|
|
"""Display a nice error instead of "An error has occurred"."""
|
|
|
|
def newf(self, irc, *args, **kwargs):
|
|
|
|
try:
|
|
|
|
f(self, irc, *args, **kwargs)
|
|
|
|
except utils.web.Error as e:
|
|
|
|
irc.reply(str(e))
|
2016-12-08 00:37:12 +01:00
|
|
|
return utils.python.changeFunctionName(newf, f.__name__, f.__doc__)
|
2013-08-09 12:15:54 +02:00
|
|
|
|
2005-02-09 08:04:04 +01:00
|
|
|
class Web(callbacks.PluginRegexp):
|
2017-10-25 21:19:37 +02:00
|
|
|
"""Add the help for 'help Web' here."""
|
2005-03-14 03:44:55 +01:00
|
|
|
regexps = ['titleSnarfer']
|
2016-12-08 00:48:11 +01:00
|
|
|
threaded = True
|
2005-04-30 14:53:42 +02:00
|
|
|
|
2015-05-15 12:38:56 +02:00
|
|
|
def noIgnore(self, irc, msg):
|
2019-08-24 17:50:05 +02:00
|
|
|
return not self.registryValue('checkIgnored', msg.channel, irc.network)
|
2015-05-15 12:38:56 +02:00
|
|
|
|
2020-01-12 02:30:12 +01:00
|
|
|
def getTitle(self, irc, url, raiseErrors, msg):
|
2015-08-29 21:04:38 +02:00
|
|
|
size = conf.supybot.protocols.http.peekSize()
|
2022-03-03 22:16:37 +01:00
|
|
|
|
|
|
|
parsed_url = utils.web.urlparse(url)
|
|
|
|
if parsed_url.netloc.endswith(('youtube.com', '.youtube.com')):
|
|
|
|
# there is a lot of Javascript before the <title>
|
|
|
|
size = 409600
|
|
|
|
if parsed_url.netloc in ('reddit.com', 'www.reddit.com', 'new.reddit.com'):
|
|
|
|
# Since 2022-03, New Reddit has 'Reddit - Dive into anything' as
|
|
|
|
# <title> on every page.
|
|
|
|
parsed_url = parsed_url._replace(netloc='old.reddit.com')
|
|
|
|
url = utils.web.urlunparse(parsed_url)
|
|
|
|
|
2016-12-08 10:11:15 +01:00
|
|
|
timeout = self.registryValue('timeout')
|
2020-01-14 19:03:12 +01:00
|
|
|
headers = conf.defaultHttpHeaders(irc.network, msg.channel)
|
2019-11-01 09:06:45 +01:00
|
|
|
try:
|
|
|
|
(target, text) = utils.web.getUrlTargetAndContent(url, size=size,
|
2020-01-12 02:30:12 +01:00
|
|
|
timeout=timeout, headers=headers)
|
2019-11-01 09:06:45 +01:00
|
|
|
except Exception as e:
|
|
|
|
if raiseErrors:
|
|
|
|
irc.error(_('That URL raised <' + str(e)) + '>',
|
|
|
|
Raise=True)
|
|
|
|
else:
|
2019-12-15 18:43:51 +01:00
|
|
|
self.log.info('Web plugin TitleSnarfer: URL <%s> raised <%s>',
|
|
|
|
url, str(e))
|
2019-11-22 18:17:44 +01:00
|
|
|
return
|
2015-08-29 21:04:38 +02:00
|
|
|
try:
|
|
|
|
text = text.decode(utils.web.getEncoding(text) or 'utf8',
|
|
|
|
'replace')
|
|
|
|
except UnicodeDecodeError:
|
2019-11-01 09:06:45 +01:00
|
|
|
if minisix.PY3:
|
|
|
|
if raiseErrors:
|
|
|
|
irc.error(_('Could not guess the page\'s encoding. (Try '
|
|
|
|
'installing python-charade.)'), Raise=True)
|
|
|
|
else:
|
|
|
|
self.log.info('Web plugin TitleSnarfer: URL <%s> Could '
|
|
|
|
'not guess the page\'s encoding. (Try '
|
|
|
|
'installing python-charade.)', url)
|
2019-11-22 18:17:44 +01:00
|
|
|
return
|
2019-02-01 21:02:57 +01:00
|
|
|
try:
|
|
|
|
parser = Title()
|
|
|
|
parser.feed(text)
|
|
|
|
except UnicodeDecodeError:
|
|
|
|
# Workaround for Python 2
|
2021-09-14 20:30:47 +02:00
|
|
|
# https://github.com/progval/Limnoria/issues/1359
|
2019-02-01 21:02:57 +01:00
|
|
|
parser = Title()
|
|
|
|
parser.feed(text.encode('utf8'))
|
2015-10-22 15:56:53 +02:00
|
|
|
parser.close()
|
2015-12-30 02:12:26 +01:00
|
|
|
title = utils.str.normalizeWhitespace(''.join(parser.data).strip())
|
2015-08-29 21:04:38 +02:00
|
|
|
if title:
|
2015-10-25 16:20:31 +01:00
|
|
|
return (target, title)
|
2015-10-17 15:41:20 +02:00
|
|
|
elif raiseErrors:
|
|
|
|
if len(text) < size:
|
2015-11-29 18:59:29 +01:00
|
|
|
irc.error(_('That URL appears to have no HTML title.'),
|
|
|
|
Raise=True)
|
2015-10-17 15:41:20 +02:00
|
|
|
else:
|
2015-11-29 18:59:29 +01:00
|
|
|
irc.error(format(_('That URL appears to have no HTML title '
|
|
|
|
'within the first %S.'), size), Raise=True)
|
2019-11-01 09:06:45 +01:00
|
|
|
else:
|
|
|
|
if len(text) < size:
|
2019-12-15 18:43:51 +01:00
|
|
|
self.log.debug('Web plugin TitleSnarfer: URL <%s> appears '
|
|
|
|
'to have no HTML title. ', url)
|
2019-11-01 09:06:45 +01:00
|
|
|
else:
|
2019-12-15 18:43:51 +01:00
|
|
|
self.log.debug('Web plugin TitleSnarfer: URL <%s> appears '
|
|
|
|
'to have no HTML title within the first %S.',
|
|
|
|
url, size)
|
2015-08-29 21:04:38 +02:00
|
|
|
|
2013-08-09 12:15:54 +02:00
|
|
|
@fetch_sandbox
|
2005-02-01 10:41:54 +01:00
|
|
|
def titleSnarfer(self, irc, msg, match):
|
2019-08-24 17:50:05 +02:00
|
|
|
channel = msg.channel
|
|
|
|
network = irc.network
|
|
|
|
if not channel:
|
2005-02-01 10:41:54 +01:00
|
|
|
return
|
2020-04-11 15:00:46 +02:00
|
|
|
if callbacks.addressed(irc, msg):
|
2005-02-01 10:41:54 +01:00
|
|
|
return
|
2019-08-24 17:50:05 +02:00
|
|
|
if self.registryValue('titleSnarfer', channel, network):
|
2005-02-01 10:41:54 +01:00
|
|
|
url = match.group(0)
|
2015-08-11 16:46:47 +02:00
|
|
|
if not self._checkURLWhitelist(url):
|
|
|
|
return
|
2019-08-24 17:50:05 +02:00
|
|
|
r = self.registryValue('nonSnarfingRegexp', channel, network)
|
2005-02-01 10:41:54 +01:00
|
|
|
if r and r.search(url):
|
|
|
|
self.log.debug('Not titleSnarfing %q.', url)
|
|
|
|
return
|
2020-01-12 02:30:12 +01:00
|
|
|
r = self.getTitle(irc, url, False, msg)
|
2015-11-29 18:59:29 +01:00
|
|
|
if not r:
|
|
|
|
return
|
|
|
|
(target, title) = r
|
2015-08-29 21:04:38 +02:00
|
|
|
if title:
|
2015-10-25 16:20:31 +01:00
|
|
|
domain = utils.web.getDomain(target
|
2019-08-24 17:50:05 +02:00
|
|
|
if self.registryValue('snarferShowTargetDomain',
|
|
|
|
channel, network)
|
2013-11-19 11:20:32 +01:00
|
|
|
else url)
|
2019-08-24 17:50:05 +02:00
|
|
|
prefix = self.registryValue('snarferPrefix', channel, network)
|
2019-12-15 17:26:47 +01:00
|
|
|
if prefix:
|
|
|
|
s = "%s %s" % (prefix, title)
|
|
|
|
else:
|
|
|
|
s = title
|
2019-08-24 17:50:05 +02:00
|
|
|
if self.registryValue('snarferShowDomain', channel, network):
|
2016-08-09 20:22:00 +02:00
|
|
|
s += format(_(' (at %s)'), domain)
|
2005-06-01 23:08:30 +02:00
|
|
|
irc.reply(s, prefixNick=False)
|
2019-08-24 17:50:05 +02:00
|
|
|
if self.registryValue('snarfMultipleUrls', channel, network):
|
2018-04-14 21:50:32 +02:00
|
|
|
# FIXME: hack
|
|
|
|
msg.tag('repliedTo', False)
|
2005-02-01 10:41:54 +01:00
|
|
|
titleSnarfer = urlSnarfer(titleSnarfer)
|
2009-10-16 03:56:26 +02:00
|
|
|
titleSnarfer.__doc__ = utils.web._httpUrlRe
|
2005-02-01 10:41:54 +01:00
|
|
|
|
2013-05-11 20:11:57 +02:00
|
|
|
def _checkURLWhitelist(self, url):
|
|
|
|
if not self.registryValue('urlWhitelist'):
|
|
|
|
return True
|
|
|
|
passed = False
|
|
|
|
for wu in self.registryValue('urlWhitelist'):
|
|
|
|
if wu.endswith('/') and url.find(wu) == 0:
|
|
|
|
passed = True
|
|
|
|
break
|
|
|
|
if (not wu.endswith('/')) and (url.find(wu + '/') == 0 or url == wu):
|
|
|
|
passed = True
|
|
|
|
break
|
|
|
|
return passed
|
|
|
|
|
2016-12-08 00:36:30 +01:00
|
|
|
@wrap(['httpUrl'])
|
2013-08-09 12:15:54 +02:00
|
|
|
@catch_web_errors
|
|
|
|
@fetch_sandbox
|
2005-02-01 10:41:54 +01:00
|
|
|
def headers(self, irc, msg, args, url):
|
|
|
|
"""<url>
|
|
|
|
|
|
|
|
Returns the HTTP headers of <url>. Only HTTP urls are valid, of
|
|
|
|
course.
|
|
|
|
"""
|
2013-05-11 20:11:57 +02:00
|
|
|
if not self._checkURLWhitelist(url):
|
|
|
|
irc.error("This url is not on the whitelist.")
|
|
|
|
return
|
2016-12-08 00:48:11 +01:00
|
|
|
timeout = self.registryValue('timeout')
|
|
|
|
fd = utils.web.getUrlFd(url, timeout=timeout)
|
2005-02-01 10:41:54 +01:00
|
|
|
try:
|
2010-10-20 09:39:44 +02:00
|
|
|
s = ', '.join([format(_('%s: %s'), k, v)
|
2005-02-01 10:41:54 +01:00
|
|
|
for (k, v) in fd.headers.items()])
|
|
|
|
irc.reply(s)
|
|
|
|
finally:
|
|
|
|
fd.close()
|
|
|
|
|
2020-10-13 22:28:52 +02:00
|
|
|
@wrap(['httpUrl'])
|
|
|
|
@catch_web_errors
|
|
|
|
@fetch_sandbox
|
|
|
|
def location(self, irc, msg, args, url):
|
|
|
|
"""<url>
|
|
|
|
|
|
|
|
If the <url> is redirected to another page, returns the URL of that
|
|
|
|
page. This works even if there are multiple redirects.
|
|
|
|
Only HTTP urls are valid.
|
|
|
|
Useful to "un-tinify" URLs."""
|
|
|
|
timeout = self.registryValue('timeout')
|
|
|
|
(target, text) = utils.web.getUrlTargetAndContent(url, size=60,
|
|
|
|
timeout=timeout)
|
|
|
|
irc.reply(target)
|
|
|
|
|
2005-02-01 10:41:54 +01:00
|
|
|
_doctypeRe = re.compile(r'(<!DOCTYPE[^>]+>)', re.M)
|
2016-12-08 00:36:30 +01:00
|
|
|
@wrap(['httpUrl'])
|
2013-08-09 12:15:54 +02:00
|
|
|
@catch_web_errors
|
|
|
|
@fetch_sandbox
|
2005-02-01 10:41:54 +01:00
|
|
|
def doctype(self, irc, msg, args, url):
|
|
|
|
"""<url>
|
|
|
|
|
|
|
|
Returns the DOCTYPE string of <url>. Only HTTP urls are valid, of
|
|
|
|
course.
|
|
|
|
"""
|
2013-05-11 20:11:57 +02:00
|
|
|
if not self._checkURLWhitelist(url):
|
|
|
|
irc.error("This url is not on the whitelist.")
|
|
|
|
return
|
2005-02-01 10:41:54 +01:00
|
|
|
size = conf.supybot.protocols.http.peekSize()
|
2016-12-08 00:48:11 +01:00
|
|
|
timeout = self.registryValue('timeout')
|
|
|
|
s = utils.web.getUrl(url, size=size, timeout=timeout).decode('utf8')
|
2005-02-01 10:41:54 +01:00
|
|
|
m = self._doctypeRe.search(s)
|
|
|
|
if m:
|
|
|
|
s = utils.str.normalizeWhitespace(m.group(0))
|
|
|
|
irc.reply(s)
|
|
|
|
else:
|
2010-10-20 09:39:44 +02:00
|
|
|
irc.reply(_('That URL has no specified doctype.'))
|
2005-02-01 10:41:54 +01:00
|
|
|
|
2016-12-08 00:36:30 +01:00
|
|
|
@wrap(['httpUrl'])
|
2013-08-09 12:15:54 +02:00
|
|
|
@catch_web_errors
|
|
|
|
@fetch_sandbox
|
2005-02-01 10:41:54 +01:00
|
|
|
def size(self, irc, msg, args, url):
|
|
|
|
"""<url>
|
|
|
|
|
|
|
|
Returns the Content-Length header of <url>. Only HTTP urls are valid,
|
|
|
|
of course.
|
|
|
|
"""
|
2013-05-11 20:11:57 +02:00
|
|
|
if not self._checkURLWhitelist(url):
|
|
|
|
irc.error("This url is not on the whitelist.")
|
|
|
|
return
|
2016-12-08 00:48:11 +01:00
|
|
|
timeout = self.registryValue('timeout')
|
|
|
|
fd = utils.web.getUrlFd(url, timeout=timeout)
|
2005-02-01 10:41:54 +01:00
|
|
|
try:
|
|
|
|
try:
|
|
|
|
size = fd.headers['Content-Length']
|
2017-09-20 02:57:33 +02:00
|
|
|
if size is None:
|
|
|
|
raise KeyError('content-length')
|
2011-07-01 17:07:54 +02:00
|
|
|
irc.reply(format(_('%u is %S long.'), url, int(size)))
|
2005-02-01 10:41:54 +01:00
|
|
|
except KeyError:
|
|
|
|
size = conf.supybot.protocols.http.peekSize()
|
|
|
|
s = fd.read(size)
|
|
|
|
if len(s) != size:
|
2010-10-23 10:38:52 +02:00
|
|
|
irc.reply(format(_('%u is %S long.'), url, len(s)))
|
2005-02-01 10:41:54 +01:00
|
|
|
else:
|
2010-10-20 09:39:44 +02:00
|
|
|
irc.reply(format(_('The server didn\'t tell me how long %u '
|
2010-10-23 10:38:52 +02:00
|
|
|
'is but it\'s longer than %S.'),
|
2005-02-01 10:41:54 +01:00
|
|
|
url, size))
|
|
|
|
finally:
|
|
|
|
fd.close()
|
|
|
|
|
2016-12-08 00:36:30 +01:00
|
|
|
@wrap([getopts({'no-filter': ''}), 'httpUrl'])
|
2013-08-09 12:15:54 +02:00
|
|
|
@catch_web_errors
|
|
|
|
@fetch_sandbox
|
2012-10-31 17:35:51 +01:00
|
|
|
def title(self, irc, msg, args, optlist, url):
|
|
|
|
"""[--no-filter] <url>
|
2005-02-01 10:41:54 +01:00
|
|
|
|
|
|
|
Returns the HTML <title>...</title> of a URL.
|
2012-10-31 17:35:51 +01:00
|
|
|
If --no-filter is given, the bot won't strip special chars (action,
|
|
|
|
DCC, ...).
|
2005-02-01 10:41:54 +01:00
|
|
|
"""
|
2013-05-11 20:11:57 +02:00
|
|
|
if not self._checkURLWhitelist(url):
|
|
|
|
irc.error("This url is not on the whitelist.")
|
|
|
|
return
|
2020-01-12 02:30:12 +01:00
|
|
|
r = self.getTitle(irc, url, True, msg)
|
2015-11-29 18:59:29 +01:00
|
|
|
if not r:
|
|
|
|
return
|
|
|
|
(target, title) = r
|
2015-08-29 21:04:38 +02:00
|
|
|
if title:
|
2012-10-31 17:35:51 +01:00
|
|
|
if not [y for x,y in optlist if x == 'no-filter']:
|
|
|
|
for i in range(1, 4):
|
|
|
|
title = title.replace(chr(i), '')
|
|
|
|
irc.reply(title)
|
2005-02-01 10:41:54 +01:00
|
|
|
|
2016-12-08 00:36:30 +01:00
|
|
|
@wrap(['text'])
|
2005-02-01 10:41:54 +01:00
|
|
|
def urlquote(self, irc, msg, args, text):
|
|
|
|
"""<text>
|
|
|
|
|
|
|
|
Returns the URL quoted form of the text.
|
|
|
|
"""
|
|
|
|
irc.reply(utils.web.urlquote(text))
|
|
|
|
|
2016-12-08 00:36:30 +01:00
|
|
|
@wrap(['text'])
|
2005-02-01 10:41:54 +01:00
|
|
|
def urlunquote(self, irc, msg, args, text):
|
|
|
|
"""<text>
|
|
|
|
|
|
|
|
Returns the text un-URL quoted.
|
|
|
|
"""
|
|
|
|
s = utils.web.urlunquote(text)
|
|
|
|
irc.reply(s)
|
|
|
|
|
2016-12-08 00:36:30 +01:00
|
|
|
@wrap(['url'])
|
2013-08-09 12:15:54 +02:00
|
|
|
@catch_web_errors
|
|
|
|
@fetch_sandbox
|
2005-03-14 03:44:55 +01:00
|
|
|
def fetch(self, irc, msg, args, url):
|
|
|
|
"""<url>
|
2005-02-01 10:41:54 +01:00
|
|
|
|
2005-03-14 03:44:55 +01:00
|
|
|
Returns the contents of <url>, or as much as is configured in
|
|
|
|
supybot.plugins.Web.fetch.maximum. If that configuration variable is
|
|
|
|
set to 0, this command will be effectively disabled.
|
|
|
|
"""
|
2013-05-11 20:11:57 +02:00
|
|
|
if not self._checkURLWhitelist(url):
|
|
|
|
irc.error("This url is not on the whitelist.")
|
|
|
|
return
|
2005-03-14 03:44:55 +01:00
|
|
|
max = self.registryValue('fetch.maximum')
|
2016-12-08 00:48:11 +01:00
|
|
|
timeout = self.registryValue('fetch.timeout')
|
2005-03-14 03:44:55 +01:00
|
|
|
if not max:
|
2010-10-20 09:39:44 +02:00
|
|
|
irc.error(_('This command is disabled '
|
|
|
|
'(supybot.plugins.Web.fetch.maximum is set to 0).'),
|
2005-03-14 03:44:55 +01:00
|
|
|
Raise=True)
|
2016-12-08 00:48:11 +01:00
|
|
|
fd = utils.web.getUrl(url, size=max, timeout=timeout).decode('utf8')
|
2012-10-02 18:19:53 +02:00
|
|
|
irc.reply(fd)
|
2005-02-01 10:41:54 +01:00
|
|
|
|
|
|
|
Class = Web
|
|
|
|
|
2006-02-11 16:52:51 +01:00
|
|
|
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|