2005-02-01 13:09:12 +01:00
|
|
|
###
|
|
|
|
# Copyright (c) 2002-2004, Jeremiah Fincher
|
2012-09-01 16:16:48 +02:00
|
|
|
# Copyright (c) 2008-2010, James McCoy
|
2005-02-01 13:09:12 +01:00
|
|
|
# All rights reserved.
|
|
|
|
#
|
|
|
|
# Redistribution and use in source and binary forms, with or without
|
|
|
|
# modification, are permitted provided that the following conditions are met:
|
|
|
|
#
|
|
|
|
# * Redistributions of source code must retain the above copyright notice,
|
|
|
|
# this list of conditions, and the following disclaimer.
|
|
|
|
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
# this list of conditions, and the following disclaimer in the
|
|
|
|
# documentation and/or other materials provided with the distribution.
|
|
|
|
# * Neither the name of the author of this software nor the name of
|
|
|
|
# contributors to this software may be used to endorse or promote products
|
|
|
|
# derived from this software without specific prior written consent.
|
|
|
|
#
|
|
|
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
# POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
###
|
|
|
|
|
|
|
|
import re
|
2013-01-06 16:46:21 +01:00
|
|
|
import sys
|
2012-09-11 02:07:43 +02:00
|
|
|
import json
|
2005-02-01 13:09:12 +01:00
|
|
|
|
|
|
|
import supybot.conf as conf
|
|
|
|
import supybot.utils as utils
|
|
|
|
import supybot.world as world
|
|
|
|
from supybot.commands import *
|
2015-08-11 16:50:23 +02:00
|
|
|
import supybot.utils.minisix as minisix
|
2005-02-01 13:09:12 +01:00
|
|
|
import supybot.ircmsgs as ircmsgs
|
|
|
|
import supybot.ircutils as ircutils
|
|
|
|
import supybot.callbacks as callbacks
|
2010-10-17 14:50:31 +02:00
|
|
|
from supybot.i18n import PluginInternationalization, internationalizeDocstring
|
|
|
|
_ = PluginInternationalization('Google')
|
2005-02-01 13:09:12 +01:00
|
|
|
|
2005-02-09 08:04:04 +01:00
|
|
|
class Google(callbacks.PluginRegexp):
|
2014-11-30 21:07:41 +01:00
|
|
|
"""This is a simple plugin to provide access to the Google services we
|
|
|
|
all know and love from our favorite IRC bot."""
|
2005-02-01 13:09:12 +01:00
|
|
|
threaded = True
|
2005-02-18 19:46:57 +01:00
|
|
|
callBefore = ['Web']
|
2010-09-09 04:20:23 +02:00
|
|
|
regexps = ['googleSnarfer']
|
2005-02-01 13:09:12 +01:00
|
|
|
|
|
|
|
_colorGoogles = {}
|
|
|
|
def _getColorGoogle(self, m):
|
|
|
|
s = m.group(1)
|
|
|
|
ret = self._colorGoogles.get(s)
|
|
|
|
if not ret:
|
|
|
|
L = list(s)
|
|
|
|
L[0] = ircutils.mircColor(L[0], 'blue')[:-1]
|
|
|
|
L[1] = ircutils.mircColor(L[1], 'red')[:-1]
|
|
|
|
L[2] = ircutils.mircColor(L[2], 'yellow')[:-1]
|
|
|
|
L[3] = ircutils.mircColor(L[3], 'blue')[:-1]
|
|
|
|
L[4] = ircutils.mircColor(L[4], 'green')[:-1]
|
|
|
|
L[5] = ircutils.mircColor(L[5], 'red')
|
|
|
|
ret = ''.join(L)
|
|
|
|
self._colorGoogles[s] = ret
|
|
|
|
return ircutils.bold(ret)
|
|
|
|
|
|
|
|
_googleRe = re.compile(r'\b(google)\b', re.I)
|
|
|
|
def outFilter(self, irc, msg):
|
|
|
|
if msg.command == 'PRIVMSG' and \
|
2019-08-24 17:50:05 +02:00
|
|
|
self.registryValue('colorfulFilter', msg.channel, irc.network):
|
2005-02-01 13:09:12 +01:00
|
|
|
s = msg.args[1]
|
|
|
|
s = re.sub(self._googleRe, self._getColorGoogle, s)
|
|
|
|
msg = ircmsgs.privmsg(msg.args[0], s, msg=msg)
|
|
|
|
return msg
|
|
|
|
|
2020-05-15 19:56:36 +02:00
|
|
|
_decode_re = re.compile(r'<div class="\w+"><a href="/url\?q=(?P<url>[^"]+)&[^"]+"[^>]*><div class="(\w| )+">(?P<title>.*?)</div><div class="(\w| )+">(?P<breadcrumbs>.*?)</div></a></div>(?P<content><div class="(\w| )+">.*?</div></div>)', re.DOTALL | re.MULTILINE)
|
2016-05-04 19:05:03 +02:00
|
|
|
@classmethod
|
|
|
|
def decode(cls, text):
|
2019-05-28 19:37:47 +02:00
|
|
|
matches = cls._decode_re.finditer(text)
|
2016-05-04 19:05:03 +02:00
|
|
|
results = []
|
|
|
|
for match in matches:
|
2019-05-28 19:37:47 +02:00
|
|
|
r = match.groupdict()
|
2016-08-10 22:52:00 +02:00
|
|
|
r['url'] = utils.web.urlunquote(utils.web.htmlToText(r['url'].split('&')[0]))
|
2016-05-04 19:05:03 +02:00
|
|
|
results.append(r)
|
|
|
|
return results
|
|
|
|
|
|
|
|
|
2016-05-05 08:31:04 +02:00
|
|
|
_gsearchUrl = 'https://www.google.com/search'
|
2019-08-24 17:50:05 +02:00
|
|
|
def search(self, query, channel, network, options={}):
|
2016-05-06 09:02:54 +02:00
|
|
|
"""search("search phrase", options={})
|
2008-08-15 01:20:06 +02:00
|
|
|
|
|
|
|
Valid options are:
|
|
|
|
smallsearch - True/False (Default: False)
|
2009-02-09 06:41:16 +01:00
|
|
|
filter - {active,moderate,off} (Default: "moderate")
|
2008-08-15 01:20:06 +02:00
|
|
|
language - Restrict search to documents in the given language
|
|
|
|
(Default: "lang_en")
|
|
|
|
"""
|
2016-05-06 09:02:54 +02:00
|
|
|
self.log.warning('The Google plugin search is deprecated since '
|
|
|
|
'Google closed their public API and will be removed in a '
|
|
|
|
'future release. Please consider switching to an other '
|
|
|
|
'plugin for your searches, like '
|
|
|
|
'<https://github.com/Hoaas/Supybot-plugins/tree/master/DuckDuckGo>, '
|
2016-05-06 16:14:25 +02:00
|
|
|
'<https://github.com/joulez/GoogleCSE>, or '
|
|
|
|
'<https://github.com/GLolol/SupyPlugins/tree/master/DDG>.')
|
2008-08-15 01:20:06 +02:00
|
|
|
ref = self.registryValue('referer')
|
|
|
|
if not ref:
|
|
|
|
ref = 'http://%s/%s' % (dynamic.irc.server,
|
|
|
|
dynamic.irc.nick)
|
2014-01-14 21:55:56 +01:00
|
|
|
headers = dict(utils.web.defaultHeaders)
|
2008-08-15 01:20:06 +02:00
|
|
|
headers['Referer'] = ref
|
2016-05-05 08:31:04 +02:00
|
|
|
opts = {'q': query, 'gbv': '2'}
|
2015-08-08 22:20:14 +02:00
|
|
|
for (k, v) in options.items():
|
2008-08-15 01:20:06 +02:00
|
|
|
if k == 'smallsearch':
|
|
|
|
if v:
|
|
|
|
opts['rsz'] = 'small'
|
|
|
|
else:
|
|
|
|
opts['rsz'] = 'large'
|
2009-02-09 06:41:16 +01:00
|
|
|
elif k == 'filter':
|
2008-08-15 01:20:06 +02:00
|
|
|
opts['safe'] = v
|
|
|
|
elif k == 'language':
|
2016-05-05 08:03:10 +02:00
|
|
|
opts['hl'] = v
|
2019-08-24 17:50:05 +02:00
|
|
|
defLang = self.registryValue('defaultLanguage', channel, network)
|
2016-05-05 08:03:10 +02:00
|
|
|
if 'hl' not in opts and defLang:
|
|
|
|
opts['hl'] = defLang.strip('lang_')
|
2008-08-15 01:20:06 +02:00
|
|
|
if 'safe' not in opts:
|
2019-08-24 17:50:05 +02:00
|
|
|
opts['safe'] = self.registryValue('searchFilter', channel, network)
|
2008-08-15 01:20:06 +02:00
|
|
|
if 'rsz' not in opts:
|
2008-10-17 23:27:45 +02:00
|
|
|
opts['rsz'] = 'large'
|
2008-08-15 01:20:06 +02:00
|
|
|
|
2012-08-04 19:50:30 +02:00
|
|
|
text = utils.web.getUrl('%s?%s' % (self._gsearchUrl,
|
2015-08-10 17:55:25 +02:00
|
|
|
utils.web.urlencode(opts)),
|
2012-08-04 19:50:30 +02:00
|
|
|
headers=headers).decode('utf8')
|
2016-05-04 19:05:03 +02:00
|
|
|
return text
|
2008-08-15 01:20:06 +02:00
|
|
|
|
2012-07-30 16:57:52 +02:00
|
|
|
def formatData(self, data, bold=True, max=0, onetoone=False):
|
2016-05-04 19:05:03 +02:00
|
|
|
data = self.decode(data)
|
2005-02-01 13:09:12 +01:00
|
|
|
results = []
|
|
|
|
if max:
|
2008-08-15 01:20:06 +02:00
|
|
|
data = data[:max]
|
|
|
|
for result in data:
|
2016-05-04 19:05:03 +02:00
|
|
|
title = utils.web.htmlToText(result['title']\
|
2008-08-15 01:20:06 +02:00
|
|
|
.encode('utf-8'))
|
2016-05-04 19:05:03 +02:00
|
|
|
url = result['url']
|
2015-08-09 00:23:03 +02:00
|
|
|
if minisix.PY2:
|
2013-01-06 16:46:21 +01:00
|
|
|
url = url.encode('utf-8')
|
2005-02-01 13:09:12 +01:00
|
|
|
if title:
|
|
|
|
if bold:
|
|
|
|
title = ircutils.bold(title)
|
|
|
|
results.append(format('%s: %u', title, url))
|
|
|
|
else:
|
|
|
|
results.append(url)
|
2015-08-09 00:23:03 +02:00
|
|
|
if minisix.PY2:
|
2013-05-13 14:18:22 +02:00
|
|
|
repl = lambda x:x if isinstance(x, unicode) else unicode(x, 'utf8')
|
2014-01-21 10:57:38 +01:00
|
|
|
results = list(map(repl, results))
|
2005-02-01 13:09:12 +01:00
|
|
|
if not results:
|
2013-05-13 14:18:22 +02:00
|
|
|
return [_('No matches found.')]
|
2012-07-30 16:57:52 +02:00
|
|
|
elif onetoone:
|
|
|
|
return results
|
2005-02-01 13:09:12 +01:00
|
|
|
else:
|
2015-08-10 19:14:22 +02:00
|
|
|
return [minisix.u('; ').join(results)]
|
2005-02-01 13:09:12 +01:00
|
|
|
|
2010-10-17 14:50:31 +02:00
|
|
|
@internationalizeDocstring
|
2010-07-14 21:56:48 +02:00
|
|
|
def lucky(self, irc, msg, args, opts, text):
|
|
|
|
"""[--snippet] <search>
|
2005-02-01 13:09:12 +01:00
|
|
|
|
|
|
|
Does a google search, but only returns the first result.
|
2010-07-14 21:56:48 +02:00
|
|
|
If option --snippet is given, returns also the page text snippet.
|
2005-02-01 13:09:12 +01:00
|
|
|
"""
|
2010-07-14 21:56:48 +02:00
|
|
|
opts = dict(opts)
|
2019-08-24 17:50:05 +02:00
|
|
|
data = self.search(text, msg.channel, irc.network,
|
|
|
|
{'smallsearch': True})
|
2016-05-05 09:01:51 +02:00
|
|
|
data = self.decode(data)
|
2016-05-04 19:05:03 +02:00
|
|
|
if data:
|
2016-05-05 09:01:51 +02:00
|
|
|
url = data[0]['url']
|
Start accelerating the 2to3 step (remove fix_apply, fix_buffer, fix_callable, fix_exec, fix_execfile, fix_exitfunc, fix_filter, fix_funcattrs, fix_future, fix_getcwdu, and fix_has_key).
2014-01-20 14:49:47 +01:00
|
|
|
if 'snippet' in opts:
|
2016-05-05 09:01:51 +02:00
|
|
|
snippet = data[0]['content']
|
2010-07-15 01:03:31 +02:00
|
|
|
snippet = " | " + utils.web.htmlToText(snippet, tagReplace='')
|
2010-07-14 21:56:48 +02:00
|
|
|
else:
|
|
|
|
snippet = ""
|
|
|
|
result = url + snippet
|
2010-07-26 15:22:07 +02:00
|
|
|
irc.reply(result)
|
2005-02-01 13:09:12 +01:00
|
|
|
else:
|
2010-10-17 14:50:31 +02:00
|
|
|
irc.reply(_('Google found nothing.'))
|
2010-07-14 21:56:48 +02:00
|
|
|
lucky = wrap(lucky, [getopts({'snippet':'',}), 'text'])
|
2005-02-01 13:09:12 +01:00
|
|
|
|
2010-10-17 14:50:31 +02:00
|
|
|
@internationalizeDocstring
|
2005-02-01 13:09:12 +01:00
|
|
|
def google(self, irc, msg, args, optlist, text):
|
2009-02-09 06:41:16 +01:00
|
|
|
"""<search> [--{filter,language} <value>]
|
2005-02-01 13:09:12 +01:00
|
|
|
|
|
|
|
Searches google.com for the given string. As many results as can fit
|
2009-02-09 06:41:16 +01:00
|
|
|
are included. --language accepts a language abbreviation; --filter
|
2008-08-15 01:20:06 +02:00
|
|
|
accepts a filtering level ('active', 'moderate', 'off').
|
2005-02-01 13:09:12 +01:00
|
|
|
"""
|
2008-08-15 01:20:06 +02:00
|
|
|
if 'language' in optlist and optlist['language'].lower() not in \
|
|
|
|
conf.supybot.plugins.Google.safesearch.validStrings:
|
|
|
|
irc.errorInvalid('language')
|
2019-08-24 17:50:05 +02:00
|
|
|
data = self.search(text, msg.channel, irc.network, dict(optlist))
|
|
|
|
bold = self.registryValue('bold', msg.channel, irc.network)
|
|
|
|
max = self.registryValue('maximumResults', msg.channel, irc.network)
|
2012-07-30 16:57:52 +02:00
|
|
|
# We don't use supybot.reply.oneToOne here, because you generally
|
|
|
|
# do not want @google to echo ~20 lines of results, even if you
|
|
|
|
# have reply.oneToOne enabled.
|
2019-08-24 17:50:05 +02:00
|
|
|
onetoone = self.registryValue('oneToOne', msg.channel, irc.network)
|
2016-05-04 19:05:03 +02:00
|
|
|
for result in self.formatData(data,
|
2012-07-30 16:57:52 +02:00
|
|
|
bold=bold, max=max, onetoone=onetoone):
|
|
|
|
irc.reply(result)
|
2005-02-01 13:09:12 +01:00
|
|
|
google = wrap(google, [getopts({'language':'something',
|
2009-02-09 06:41:16 +01:00
|
|
|
'filter':''}),
|
2008-08-15 01:20:06 +02:00
|
|
|
'text'])
|
2005-02-01 13:09:12 +01:00
|
|
|
|
2010-10-17 14:50:31 +02:00
|
|
|
@internationalizeDocstring
|
2005-02-01 13:09:12 +01:00
|
|
|
def cache(self, irc, msg, args, url):
|
|
|
|
"""<url>
|
|
|
|
|
|
|
|
Returns a link to the cached version of <url> if it is available.
|
|
|
|
"""
|
2019-08-24 17:50:05 +02:00
|
|
|
data = self.search(url, msg.channel, irc.network, {'smallsearch': True})
|
2016-05-04 19:05:03 +02:00
|
|
|
if data:
|
|
|
|
m = data[0]
|
2008-08-15 01:20:06 +02:00
|
|
|
if m['cacheUrl']:
|
|
|
|
url = m['cacheUrl'].encode('utf-8')
|
|
|
|
irc.reply(url)
|
|
|
|
return
|
2010-10-17 14:50:31 +02:00
|
|
|
irc.error(_('Google seems to have no cache for that site.'))
|
2005-02-01 13:09:12 +01:00
|
|
|
cache = wrap(cache, ['url'])
|
|
|
|
|
2016-05-04 19:05:03 +02:00
|
|
|
_fight_re = re.compile(r'id="resultStats"[^>]*>(?P<stats>[^<]*)')
|
2010-10-17 14:50:31 +02:00
|
|
|
@internationalizeDocstring
|
2005-02-01 13:09:12 +01:00
|
|
|
def fight(self, irc, msg, args):
|
|
|
|
"""<search string> <search string> [<search string> ...]
|
|
|
|
|
|
|
|
Returns the results of each search, in order, from greatest number
|
|
|
|
of results to least.
|
|
|
|
"""
|
2019-08-24 17:50:05 +02:00
|
|
|
channel = msg.channel
|
|
|
|
network = irc.network
|
2005-02-01 13:09:12 +01:00
|
|
|
results = []
|
|
|
|
for arg in args:
|
2019-08-24 17:50:05 +02:00
|
|
|
text = self.search(arg, channel, network, {'smallsearch': True})
|
2016-05-04 19:05:03 +02:00
|
|
|
i = text.find('id="resultStats"')
|
|
|
|
stats = utils.web.htmlToText(self._fight_re.search(text).group('stats'))
|
|
|
|
if stats == '':
|
|
|
|
results.append((0, args))
|
|
|
|
continue
|
|
|
|
count = ''.join(filter('0123456789'.__contains__, stats))
|
2008-08-15 01:20:06 +02:00
|
|
|
results.append((int(count), arg))
|
2005-02-01 13:09:12 +01:00
|
|
|
results.sort()
|
|
|
|
results.reverse()
|
2019-08-24 17:50:05 +02:00
|
|
|
if self.registryValue('bold', channel, network):
|
2005-02-01 13:09:12 +01:00
|
|
|
bold = ircutils.bold
|
|
|
|
else:
|
|
|
|
bold = repr
|
|
|
|
s = ', '.join([format('%s: %i', bold(s), i) for (i, s) in results])
|
|
|
|
irc.reply(s)
|
|
|
|
|
2015-07-04 01:42:01 +02:00
|
|
|
|
2015-09-09 21:24:47 +02:00
|
|
|
def _translate(self, sourceLang, targetLang, text):
|
2015-07-04 01:42:01 +02:00
|
|
|
headers = dict(utils.web.defaultHeaders)
|
|
|
|
headers['User-Agent'] = ('Mozilla/5.0 (X11; U; Linux i686) '
|
|
|
|
'Gecko/20071127 Firefox/2.0.0.11')
|
|
|
|
|
2015-08-10 17:55:25 +02:00
|
|
|
sourceLang = utils.web.urlquote(sourceLang)
|
|
|
|
targetLang = utils.web.urlquote(targetLang)
|
2015-07-04 01:42:01 +02:00
|
|
|
|
2015-08-10 17:55:25 +02:00
|
|
|
text = utils.web.urlquote(text)
|
2015-07-04 01:42:01 +02:00
|
|
|
|
|
|
|
result = utils.web.getUrlFd('http://translate.googleapis.com/translate_a/single'
|
|
|
|
'?client=gtx&dt=t&sl=%s&tl=%s&q='
|
|
|
|
'%s' % (sourceLang, targetLang, text),
|
|
|
|
headers).read().decode('utf8')
|
|
|
|
|
|
|
|
while ',,' in result:
|
|
|
|
result = result.replace(',,', ',null,')
|
|
|
|
while '[,' in result:
|
|
|
|
result = result.replace('[,', '[')
|
|
|
|
data = json.loads(result)
|
|
|
|
|
|
|
|
try:
|
|
|
|
language = data[2]
|
|
|
|
except:
|
|
|
|
language = 'unknown'
|
|
|
|
|
2015-10-03 00:06:18 +02:00
|
|
|
if data[0]:
|
|
|
|
return (''.join(x[0] for x in data[0]), language)
|
|
|
|
else:
|
|
|
|
return (_('No translations found.'), language)
|
2015-09-09 21:24:47 +02:00
|
|
|
|
|
|
|
@internationalizeDocstring
|
|
|
|
def translate(self, irc, msg, args, sourceLang, targetLang, text):
|
|
|
|
"""<source language> [to] <target language> <text>
|
|
|
|
|
|
|
|
Returns <text> translated from <source language> into <target
|
2016-07-15 20:48:26 +02:00
|
|
|
language>. <source language> and <target language> take language
|
|
|
|
codes (not language names), which are listed here:
|
2016-12-18 23:53:01 +01:00
|
|
|
https://cloud.google.com/translate/docs/languages
|
2015-09-09 21:24:47 +02:00
|
|
|
"""
|
|
|
|
(text, language) = self._translate(sourceLang, targetLang, text)
|
|
|
|
irc.reply(text, language)
|
2015-07-04 01:42:01 +02:00
|
|
|
translate = wrap(translate, ['something', 'to', 'something', 'text'])
|
|
|
|
|
2005-02-01 13:09:12 +01:00
|
|
|
def googleSnarfer(self, irc, msg, match):
|
|
|
|
r"^google\s+(.*)$"
|
2019-08-24 17:50:05 +02:00
|
|
|
if not self.registryValue('searchSnarfer', msg.channel, irc.network):
|
2005-02-01 13:09:12 +01:00
|
|
|
return
|
|
|
|
searchString = match.group(1)
|
2019-08-24 17:50:05 +02:00
|
|
|
data = self.search(searchString, msg.channel, irc.network,
|
|
|
|
{'smallsearch': True})
|
2008-10-01 22:17:51 +02:00
|
|
|
if data['responseData']['results']:
|
|
|
|
url = data['responseData']['results'][0]['unescapedUrl']
|
2013-02-24 01:51:40 +01:00
|
|
|
irc.reply(url, prefixNick=False)
|
2005-02-01 13:09:12 +01:00
|
|
|
googleSnarfer = urlSnarfer(googleSnarfer)
|
|
|
|
|
2019-08-24 17:50:05 +02:00
|
|
|
def _googleUrl(self, s, channel, network):
|
2015-08-10 17:55:25 +02:00
|
|
|
s = utils.web.urlquote_plus(s)
|
2013-05-21 18:40:38 +02:00
|
|
|
url = r'http://%s/search?q=%s' % \
|
2019-08-24 17:50:05 +02:00
|
|
|
(self.registryValue('baseUrl', channel, network), s)
|
2011-12-12 20:57:10 +01:00
|
|
|
return url
|
|
|
|
|
2013-11-29 16:09:37 +01:00
|
|
|
_calcRe1 = re.compile(r'<span class="cwcot".*?>(.*?)</span>', re.I)
|
|
|
|
_calcRe2 = re.compile(r'<div class="vk_ans.*?>(.*?)</div>', re.I | re.S)
|
|
|
|
_calcRe3 = re.compile(r'<div class="side_div" id="rhs_div">.*?<input class="ucw_data".*?value="(.*?)"', re.I)
|
2013-02-03 08:53:55 +01:00
|
|
|
@internationalizeDocstring
|
|
|
|
def calc(self, irc, msg, args, expr):
|
|
|
|
"""<expression>
|
|
|
|
|
|
|
|
Uses Google's calculator to calculate the value of <expression>.
|
|
|
|
"""
|
2019-08-24 17:50:05 +02:00
|
|
|
url = self._googleUrl(expr, msg.channel, irc.network)
|
2013-11-29 16:09:37 +01:00
|
|
|
h = {"User-Agent":"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36"}
|
2014-05-02 21:36:03 +02:00
|
|
|
html = utils.web.getUrl(url, headers=h).decode('utf8')
|
2013-02-03 08:53:55 +01:00
|
|
|
match = self._calcRe1.search(html)
|
2013-11-29 16:09:37 +01:00
|
|
|
if not match:
|
2013-02-03 08:53:55 +01:00
|
|
|
match = self._calcRe2.search(html)
|
2013-11-29 16:09:37 +01:00
|
|
|
if not match:
|
|
|
|
match = self._calcRe3.search(html)
|
|
|
|
if not match:
|
|
|
|
irc.reply("I could not find an output from Google Calc for: %s" % expr)
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
s = match.group(1)
|
|
|
|
else:
|
|
|
|
s = match.group(1)
|
|
|
|
else:
|
2013-02-03 08:53:55 +01:00
|
|
|
s = match.group(1)
|
2013-11-29 16:09:37 +01:00
|
|
|
# do some cleanup of text
|
|
|
|
s = re.sub(r'<sup>(.*)</sup>⁄<sub>(.*)</sub>', r' \1/\2', s)
|
|
|
|
s = re.sub(r'<sup>(.*)</sup>', r'^\1', s)
|
|
|
|
s = utils.web.htmlToText(s)
|
|
|
|
irc.reply("%s = %s" % (expr, s))
|
2013-02-03 08:53:55 +01:00
|
|
|
calc = wrap(calc, ['text'])
|
|
|
|
|
|
|
|
_phoneRe = re.compile(r'Phonebook.*?<font size=-1>(.*?)<a href')
|
|
|
|
@internationalizeDocstring
|
|
|
|
def phonebook(self, irc, msg, args, phonenumber):
|
|
|
|
"""<phone number>
|
|
|
|
|
|
|
|
Looks <phone number> up on Google.
|
|
|
|
"""
|
2019-08-24 17:50:05 +02:00
|
|
|
url = self._googleUrl(phonenumber, msg.channel, irc.network)
|
2013-02-03 08:53:55 +01:00
|
|
|
html = utils.web.getUrl(url).decode('utf8')
|
|
|
|
m = self._phoneRe.search(html)
|
|
|
|
if m is not None:
|
|
|
|
s = m.group(1)
|
|
|
|
s = s.replace('<b>', '')
|
|
|
|
s = s.replace('</b>', '')
|
|
|
|
s = utils.web.htmlToText(s)
|
|
|
|
irc.reply(s)
|
|
|
|
else:
|
|
|
|
irc.reply(_('Google\'s phonebook didn\'t come up with anything.'))
|
|
|
|
phonebook = wrap(phonebook, ['text'])
|
|
|
|
|
2005-02-01 13:09:12 +01:00
|
|
|
|
|
|
|
Class = Google
|
|
|
|
|
|
|
|
|
2006-02-11 16:52:51 +01:00
|
|
|
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|