Forgot an import.

This commit is contained in:
Jeremy Fincher 2004-09-01 11:48:00 +00:00
parent 1b4c1f068a
commit e4f361d7ca

View File

@ -1,449 +1,484 @@
#!/usr/bin/env python #!/usr/bin/env python
### ###
# Copyright (c) 2002-2004, Jeremiah Fincher # Copyright (c) 2002-2004, Jeremiah Fincher
# All rights reserved. # All rights reserved.
# #
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met: # modification, are permitted provided that the following conditions are met:
# #
# * Redistributions of source code must retain the above copyright notice, # * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer. # this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, # * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the # this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution. # documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of # * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products # contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent. # derived from this software without specific prior written consent.
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
### ###
""" """
Provides several commands that go out to websites and get things. Provides several commands that go out to websites and get things.
""" """
__revision__ = "$Id$" import supybot
import supybot.plugins as plugins __revision__ = "$Id$"
__contributors__ = {
import re supybot.authors.jemfinch: ['bender', 'cyborg', 'doctype', 'freshmeat',
import sets 'headers', 'netcraft', 'size', 'title'],
import getopt supybot.authors.skorobeus: ['geekquote snarfer'],
import socket supybot.authors.jamessan: ['pgpkey', 'kernel', 'filext', 'zipinfo',
import urllib 'acronym'],
import xml.dom.minidom }
from itertools import imap, ifilter
import supybot.plugins as plugins
import supybot.conf as conf
import supybot.utils as utils import re
import supybot.webutils as webutils import sets
import supybot.privmsgs as privmsgs import getopt
import supybot.registry as registry import socket
import supybot.callbacks as callbacks import urllib
import xml.dom.minidom
class FreshmeatException(Exception): from itertools import imap, ifilter
pass
import supybot.conf as conf
class Http(callbacks.Privmsg): import supybot.utils as utils
threaded = True import supybot.webutils as webutils
_titleRe = re.compile(r'<title>(.*?)</title>', re.I | re.S) import supybot.privmsgs as privmsgs
def callCommand(self, method, irc, msg, *L): import supybot.registry as registry
try: import supybot.callbacks as callbacks
callbacks.Privmsg.callCommand(self, method, irc, msg, *L)
except webutils.WebError, e: def configure(advanced):
irc.error(str(e)) from supybot.questions import output, expect, anything, something, yn
conf.registerPlugin('Http', True)
def headers(self, irc, msg, args): output("""The Http plugin has the ability to watch for geekquote
"""<url> (bash.org) URLs and respond to them as though the user had
asked for the geekquote by ID""")
Returns the HTTP headers of <url>. Only HTTP urls are valid, of if yn('Do you want the Geekquote snarfer enabled by default?'):
course. conf.supybot.plugins.Http.geekSnarfer.setValue(True)
"""
url = privmsgs.getArgs(args) conf.registerPlugin('Http')
if not url.startswith('http://'): conf.registerChannelValue(conf.supybot.plugins.Http, 'geekSnarfer',
irc.error('Only HTTP urls are valid.') registry.Boolean(False, """Determines whether the bot will automatically
return 'snarf' Geekquote auction URLs and print information about them."""))
fd = webutils.getUrlFd(url)
try: class FreshmeatException(Exception):
s = ', '.join(['%s: %s' % (k, v) for (k, v) in fd.headers.items()]) pass
irc.reply(s)
finally: class Http(callbacks.PrivmsgCommandAndRegexp):
fd.close() threaded = True
regexps = ['geekSnarfer']
_doctypeRe = re.compile(r'(<!DOCTYPE[^>]+>)', re.M)
def doctype(self, irc, msg, args): _titleRe = re.compile(r'<title>(.*?)</title>', re.I | re.S)
"""<url>
def callCommand(self, method, irc, msg, *L, **kwargs):
Returns the DOCTYPE string of <url>. Only HTTP urls are valid, of try:
course. super(Http, self).callCommand(method, irc, msg, *L, **kwargs)
""" except webutils.WebError, e:
url = privmsgs.getArgs(args) irc.error(str(e))
if not url.startswith('http://'):
irc.error('Only HTTP urls are valid.') def headers(self, irc, msg, args):
return """<url>
size = conf.supybot.protocols.http.peekSize()
s = webutils.getUrl(url, size=size) Returns the HTTP headers of <url>. Only HTTP urls are valid, of
m = self._doctypeRe.search(s) course.
if m: """
s = utils.normalizeWhitespace(m.group(0)) url = privmsgs.getArgs(args)
irc.reply(s) if not url.startswith('http://'):
else: irc.error('Only HTTP urls are valid.')
irc.reply('That URL has no specified doctype.') return
fd = webutils.getUrlFd(url)
def size(self, irc, msg, args): try:
"""<url> s = ', '.join(['%s: %s' % (k, v) for (k, v) in fd.headers.items()])
irc.reply(s)
Returns the Content-Length header of <url>. Only HTTP urls are valid, finally:
of course. fd.close()
"""
url = privmsgs.getArgs(args) _doctypeRe = re.compile(r'(<!DOCTYPE[^>]+>)', re.M)
if not url.startswith('http://'): def doctype(self, irc, msg, args):
irc.error('Only HTTP urls are valid.') """<url>
return
fd = webutils.getUrlFd(url) Returns the DOCTYPE string of <url>. Only HTTP urls are valid, of
try: course.
try: """
size = fd.headers['Content-Length'] url = privmsgs.getArgs(args)
irc.reply('%s is %s bytes long.' % (url, size)) if not url.startswith('http://'):
except KeyError: irc.error('Only HTTP urls are valid.')
size = conf.supybot.protocols.http.peekSize() return
s = fd.read(size) size = conf.supybot.protocols.http.peekSize()
if len(s) != size: s = webutils.getUrl(url, size=size)
irc.reply('%s is %s bytes long.' % (url, len(s))) m = self._doctypeRe.search(s)
else: if m:
irc.reply('The server didn\'t tell me how long %s is ' s = utils.normalizeWhitespace(m.group(0))
'but it\'s longer than %s bytes.' % (url, size)) irc.reply(s)
finally: else:
fd.close() irc.reply('That URL has no specified doctype.')
def title(self, irc, msg, args): def size(self, irc, msg, args):
"""<url> """<url>
Returns the HTML <title>...</title> of a URL. Returns the Content-Length header of <url>. Only HTTP urls are valid,
""" of course.
url = privmsgs.getArgs(args) """
if '://' not in url: url = privmsgs.getArgs(args)
url = 'http://%s' % url if not url.startswith('http://'):
size = conf.supybot.protocols.http.peekSize() irc.error('Only HTTP urls are valid.')
text = webutils.getUrl(url, size=size) return
m = self._titleRe.search(text) fd = webutils.getUrlFd(url)
if m is not None: try:
irc.reply(utils.htmlToText(m.group(1).strip())) try:
else: size = fd.headers['Content-Length']
irc.reply('That URL appears to have no HTML title ' irc.reply('%s is %s bytes long.' % (url, size))
'within the first %s bytes.' % size) except KeyError:
size = conf.supybot.protocols.http.peekSize()
def freshmeat(self, irc, msg, args): s = fd.read(size)
"""<project name> if len(s) != size:
irc.reply('%s is %s bytes long.' % (url, len(s)))
Returns Freshmeat data about a given project. else:
""" irc.reply('The server didn\'t tell me how long %s is '
project = privmsgs.getArgs(args) 'but it\'s longer than %s bytes.' % (url, size))
project = ''.join(project.split()) finally:
url = 'http://www.freshmeat.net/projects-xml/%s' % project fd.close()
try:
text = webutils.getUrl(url) def title(self, irc, msg, args):
if text.startswith('Error'): """<url>
text = text.split(None, 1)[1]
raise FreshmeatException, text Returns the HTML <title>...</title> of a URL.
dom = xml.dom.minidom.parseString(text) """
def getNode(name): url = privmsgs.getArgs(args)
node = dom.getElementsByTagName(name)[0] if '://' not in url:
return str(node.childNodes[0].data) url = 'http://%s' % url
project = getNode('projectname_full') size = conf.supybot.protocols.http.peekSize()
version = getNode('latest_release_version') text = webutils.getUrl(url, size=size)
vitality = getNode('vitality_percent') m = self._titleRe.search(text)
popularity = getNode('popularity_percent') if m is not None:
lastupdated = getNode('date_updated') irc.reply(utils.htmlToText(m.group(1).strip()))
irc.reply('%s, last updated %s, with a vitality percent of %s ' else:
'and a popularity of %s, is in version %s.' % irc.reply('That URL appears to have no HTML title '
(project, lastupdated, vitality, popularity, version)) 'within the first %s bytes.' % size)
except FreshmeatException, e:
irc.error(str(e)) def freshmeat(self, irc, msg, args):
"""<project name>
def stockquote(self, irc, msg, args):
"""<company symbol> Returns Freshmeat data about a given project.
"""
Gets the information about the current price and change from the project = privmsgs.getArgs(args)
previous day of a given company (represented by a stock symbol). project = ''.join(project.split())
""" url = 'http://www.freshmeat.net/projects-xml/%s' % project
symbol = privmsgs.getArgs(args) try:
if ' ' in symbol: text = webutils.getUrl(url)
irc.error('Only one stockquote can be looked up at a time.') if text.startswith('Error'):
return text = text.split(None, 1)[1]
url = 'http://finance.yahoo.com/d/quotes.csv?s=%s' \ raise FreshmeatException, text
'&f=sl1d1t1c1ohgv&e=.csv' % symbol dom = xml.dom.minidom.parseString(text)
quote = webutils.getUrl(url) def getNode(name):
data = quote.split(',') node = dom.getElementsByTagName(name)[0]
if data[1] != '0.00': return str(node.childNodes[0].data)
irc.reply('The current price of %s is %s, as of %s EST. ' project = getNode('projectname_full')
'A change of %s from the last business day.' % version = getNode('latest_release_version')
(data[0][1:-1], data[1], data[3][1:-1], data[4])) vitality = getNode('vitality_percent')
else: popularity = getNode('popularity_percent')
m = 'I couldn\'t find a listing for %s' % symbol lastupdated = getNode('date_updated')
irc.error(m) irc.reply('%s, last updated %s, with a vitality percent of %s '
'and a popularity of %s, is in version %s.' %
_mlgeekquotere = re.compile('<p class="qt">(.*?)</p>', re.M | re.DOTALL) (project, lastupdated, vitality, popularity, version))
def geekquote(self, irc, msg, args): except FreshmeatException, e:
"""[<id>] irc.error(str(e))
Returns a random geek quote from bash.org; the optional argument def stockquote(self, irc, msg, args):
id specifies which quote to retrieve. """<company symbol>
"""
id = privmsgs.getArgs(args, required=0, optional=1) Gets the information about the current price and change from the
if id: previous day of a given company (represented by a stock symbol).
try: """
id = int(id) symbol = privmsgs.getArgs(args)
except ValueError: if ' ' in symbol:
irc.error('Invalid id: %s' % id, Raise=True) irc.error('Only one stockquote can be looked up at a time.')
id = 'quote=%s' % id return
else: url = 'http://finance.yahoo.com/d/quotes.csv?s=%s' \
id = 'random' '&f=sl1d1t1c1ohgv&e=.csv' % symbol
html = webutils.getUrl('http://bash.org/?%s' % id) quote = webutils.getUrl(url)
m = self._mlgeekquotere.search(html) data = quote.split(',')
if m is None: if data[1] != '0.00':
irc.error('No quote found.') irc.reply('The current price of %s is %s, as of %s EST. '
return 'A change of %s from the last business day.' %
quote = utils.htmlToText(m.group(1)) (data[0][1:-1], data[1], data[3][1:-1], data[4]))
quote = ' // '.join(quote.splitlines()) else:
irc.reply(quote) m = 'I couldn\'t find a listing for %s' % symbol
irc.error(m)
_cyborgRe = re.compile(r'<p class="mediumheader">(.*?)</p>', re.I)
def cyborg(self, irc, msg, args): _mlgeekquotere = re.compile('<p class="qt">(.*?)</p>', re.M | re.DOTALL)
"""[<name>] def geekquote(self, irc, msg, args):
"""[<id>]
Returns a cyborg acronym for <name> from <http://www.cyborgname.com/>.
If <name> is not specified, uses that of the user. Returns a random geek quote from bash.org; the optional argument
""" id specifies which quote to retrieve.
name = privmsgs.getArgs(args, required=0, optional=1) """
if not name: id = privmsgs.getArgs(args, required=0, optional=1)
name = msg.nick if id:
name = urllib.quote(name) try:
url = 'http://www.cyborgname.com/cyborger.cgi?acronym=%s' % name id = int(id)
html = webutils.getUrl(url) except ValueError:
m = self._cyborgRe.search(html) irc.error('Invalid id: %s' % id, Raise=True)
if m: id = 'quote=%s' % id
s = m.group(1) else:
s = utils.normalizeWhitespace(s) id = 'random'
irc.reply(s) html = webutils.getUrl('http://bash.org/?%s' % id)
else: m = self._mlgeekquotere.search(html)
irc.errorPossibleBug('No cyborg name returned.') if m is None:
irc.error('No quote found on bash.org.')
_acronymre = re.compile(r'valign="middle" width="7\d%" bgcolor="[^"]+">' return
r'(?:<b>)?([^<]+)') quote = utils.htmlToText(m.group(1))
def acronym(self, irc, msg, args): quote = ' // '.join(quote.splitlines())
"""<acronym> irc.reply(quote)
Displays acronym matches from acronymfinder.com def geekSnarfer(self, irc, msg, match):
""" r"http://(?:www\.)?bash\.org/\?(\d+)"
acronym = privmsgs.getArgs(args) if not self.registryValue('geekSnarfer', msg.args[0]):
url = 'http://www.acronymfinder.com/' \ return
'af-query.asp?String=exact&Acronym=%s' % urllib.quote(acronym) id = match.group(1)
request = webutils.Request(url, headers={'User-agent': self.log.info('Snarfing geekquote %s.', id)
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0)'}) self.geekquote(irc, msg, [id])
html = webutils.getUrl(request) geekSnarfer = privmsgs.urlSnarfer(geekSnarfer)
if 'daily limit' in html:
s = 'Acronymfinder.com says I\'ve reached my daily limit. Sorry.' _cyborgRe = re.compile(r'<p class="mediumheader">(.*?)</p>', re.I)
irc.error(s) def cyborg(self, irc, msg, args):
return """[<name>]
# The following definitions are stripped and empties are removed.
defs = filter(None, imap(str.strip, self._acronymre.findall(html))) Returns a cyborg acronym for <name> from <http://www.cyborgname.com/>.
utils.sortBy(lambda s: not s.startswith('[not an acronym]'), defs) If <name> is not specified, uses that of the user.
for (i, s) in enumerate(defs): """
if s.startswith('[not an acronym]'): name = privmsgs.getArgs(args, required=0, optional=1)
defs[i] = s.split('is ', 1)[1] if not name:
if len(defs) == 0: name = msg.nick
irc.reply('No definitions found.') name = urllib.quote(name)
else: url = 'http://www.cyborgname.com/cyborger.cgi?acronym=%s' % name
s = ', or '.join(defs) html = webutils.getUrl(url)
irc.reply('%s could be %s' % (acronym, s)) m = self._cyborgRe.search(html)
if m:
_netcraftre = re.compile(r'td align="left">\s+<a[^>]+>(.*?)<a href', s = m.group(1)
re.S | re.I) s = utils.normalizeWhitespace(s)
def netcraft(self, irc, msg, args): irc.reply(s)
"""<hostname|ip> else:
irc.errorPossibleBug('No cyborg name returned.')
Returns Netcraft.com's determination of what operating system and
webserver is running on the host given. _acronymre = re.compile(r'valign="middle" width="7\d%" bgcolor="[^"]+">'
""" r'(?:<b>)?([^<]+)')
hostname = privmsgs.getArgs(args) def acronym(self, irc, msg, args):
url = 'http://uptime.netcraft.com/up/graph/?host=%s' % hostname """<acronym>
html = webutils.getUrl(url)
m = self._netcraftre.search(html) Displays acronym matches from acronymfinder.com
if m: """
html = m.group(1) acronym = privmsgs.getArgs(args)
s = utils.htmlToText(html, tagReplace='').strip() url = 'http://www.acronymfinder.com/' \
s = s.rstrip('-').strip() 'af-query.asp?String=exact&Acronym=%s' % urllib.quote(acronym)
irc.reply(s) # Snip off "the site" request = webutils.Request(url, headers={'User-agent':
elif 'We could not get any results' in html: 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0)'})
irc.reply('No results found for %s.' % hostname) html = webutils.getUrl(request)
else: if 'daily limit' in html:
irc.error('The format of page the was odd.') s = 'Acronymfinder.com says I\'ve reached my daily limit. Sorry.'
irc.error(s)
def kernel(self, irc, msg, args): return
"""takes no arguments # The following definitions are stripped and empties are removed.
defs = filter(None, imap(str.strip, self._acronymre.findall(html)))
Returns information about the current version of the Linux kernel. utils.sortBy(lambda s: not s.startswith('[not an acronym]'), defs)
""" for (i, s) in enumerate(defs):
fd = webutils.getUrlFd('http://kernel.org/kdist/finger_banner') if s.startswith('[not an acronym]'):
try: defs[i] = s.split('is ', 1)[1]
stable = 'unknown' if len(defs) == 0:
beta = 'unknown' irc.reply('No definitions found.')
for line in fd: else:
(name, version) = line.split(':') s = ', or '.join(defs)
if 'latest stable' in name: irc.reply('%s could be %s' % (acronym, s))
stable = version.strip()
elif 'latest beta' in name: _netcraftre = re.compile(r'td align="left">\s+<a[^>]+>(.*?)<a href',
beta = version.strip() re.S | re.I)
finally: def netcraft(self, irc, msg, args):
fd.close() """<hostname|ip>
irc.reply('The latest stable kernel is %s; '
'the latest beta kernel is %s.' % (stable, beta)) Returns Netcraft.com's determination of what operating system and
webserver is running on the host given.
_pgpkeyre = re.compile(r'pub\s+\d{4}\w/<a href="([^"]+)">' """
r'([^<]+)</a>[^>]+>([^<]+)</a>') hostname = privmsgs.getArgs(args)
def pgpkey(self, irc, msg, args): url = 'http://uptime.netcraft.com/up/graph/?host=%s' % hostname
"""<search words> html = webutils.getUrl(url)
m = self._netcraftre.search(html)
Returns the results of querying pgp.mit.edu for keys that match if m:
the <search words>. html = m.group(1)
""" s = utils.htmlToText(html, tagReplace='').strip()
search = privmsgs.getArgs(args) s = s.rstrip('-').strip()
urlClean = search.replace(' ', '+') irc.reply(s) # Snip off "the site"
host = 'http://pgp.mit.edu:11371' elif 'We could not get any results' in html:
url = '%s/pks/lookup?op=index&search=%s' % (host, urlClean) irc.reply('No results found for %s.' % hostname)
fd = webutils.getUrlFd(url, headers={}) else:
try: irc.error('The format of page the was odd.')
L = []
for line in iter(fd.next, ''): def kernel(self, irc, msg, args):
info = self._pgpkeyre.search(line) """takes no arguments
if info:
L.append('%s <%s%s>' % (info.group(3),host,info.group(1))) Returns information about the current version of the Linux kernel.
if len(L) == 0: """
irc.reply('No results found for %s.' % search) fd = webutils.getUrlFd('http://kernel.org/kdist/finger_banner')
else: try:
s = 'Matches found for %s: %s' % (search, ' :: '.join(L)) stable = 'unknown'
irc.reply(s) beta = 'unknown'
finally: for line in fd:
fd.close() (name, version) = line.split(':')
if 'latest stable' in name:
_filextre = re.compile( stable = version.strip()
r'<strong>Extension:</strong>.*?<tr>.*?</tr>\s+<tr>\s+<td colspan=' elif 'latest beta' in name:
r'"2">(?:<a href[^>]+>([^<]+)</a>\s+|([^<]+))</td>\s+<td>' beta = version.strip()
r'(?:<a href[^>]+>([^<]+)</a>|<img src="images/spacer.gif"(.))', finally:
re.I|re.S) fd.close()
def extension(self, irc, msg, args): irc.reply('The latest stable kernel is %s; '
"""<ext> 'the latest beta kernel is %s.' % (stable, beta))
Returns the results of querying filext.com for file extensions that _pgpkeyre = re.compile(r'pub\s+\d{4}\w/<a href="([^"]+)">'
match <ext>. r'([^<]+)</a>[^>]+>([^<]+)</a>')
""" def pgpkey(self, irc, msg, args):
ext = privmsgs.getArgs(args) """<search words>
invalid = '|<>\^=?/[]";,*'
for c in invalid: Returns the results of querying pgp.mit.edu for keys that match
if c in ext: the <search words>.
irc.error('\'%s\' is an invalid extension character' % c) """
return search = privmsgs.getArgs(args)
s = 'http://www.filext.com/detaillist.php?extdetail=%s&goButton=Go' urlClean = search.replace(' ', '+')
text = webutils.getUrl(s % ext) host = 'http://pgp.mit.edu:11371'
matches = self._filextre.findall(text) url = '%s/pks/lookup?op=index&search=%s' % (host, urlClean)
#print matches fd = webutils.getUrlFd(url, headers={})
res = [] try:
for match in matches: L = []
(file1, file2, comp1, comp2) = match for line in iter(fd.next, ''):
if file1: info = self._pgpkeyre.search(line)
filetype = file1.strip() if info:
else: L.append('%s <%s%s>' % (info.group(3),host,info.group(1)))
filetype = file2.strip() if len(L) == 0:
if comp1: irc.reply('No results found for %s.' % search)
company = comp1.strip() else:
else: s = 'Matches found for %s: %s' % (search, ' :: '.join(L))
company = comp2.strip() irc.reply(s)
if company: finally:
res.append('%s\'s %s' % (company, filetype)) fd.close()
else:
res.append(filetype) _filextre = re.compile(
if res: r'<strong>Extension:</strong>.*?<tr>.*?</tr>\s+<tr>\s+<td colspan='
irc.reply(utils.commaAndify(res)) r'"2">(?:<a href[^>]+>([^<]+)</a>\s+|([^<]+))</td>\s+<td>'
else: r'(?:<a href[^>]+>([^<]+)</a>|<img src="images/spacer.gif"(.))',
irc.error('No matching file extensions were found.') re.I|re.S)
def extension(self, irc, msg, args):
_zipinfore = re.compile(r'Latitude<BR>\(([^)]+)\)</th><th>Longitude<BR>' """<ext>
r'\(([^)]+)\).*?<tr>(.*?)</tr>', re.I)
_zipstatre = re.compile(r'(Only about \d+,\d{3} of.*?in use.)') Returns the results of querying filext.com for file extensions that
def zipinfo(self, irc, msg, args): match <ext>.
"""<zip code> """
ext = privmsgs.getArgs(args)
Returns a plethora of information for the given <zip code>. invalid = '|<>\^=?/[]";,*'
""" for c in invalid:
zipcode = privmsgs.getArgs(args) if c in ext:
try: irc.error('\'%s\' is an invalid extension character' % c)
int(zipcode) return
except ValueError: s = 'http://www.filext.com/detaillist.php?extdetail=%s&goButton=Go'
irc.error('Zip code must be a 5-digit integer.') text = webutils.getUrl(s % ext)
return matches = self._filextre.findall(text)
if len(zipcode) != 5: #print matches
irc.error('Zip code must be a 5-digit integer.') res = []
return for match in matches:
url = 'http://zipinfo.com/cgi-local/zipsrch.exe?cnty=cnty&ac=ac&'\ (file1, file2, comp1, comp2) = match
'tz=tz&ll=ll&zip=%s&Go=Go' % zipcode if file1:
text = webutils.getUrl(url) filetype = file1.strip()
if 'daily usage limit' in text: else:
irc.error('I have exceeded the site\'s daily usage limit.') filetype = file2.strip()
return if comp1:
m = self._zipstatre.search(text) company = comp1.strip()
if m: else:
irc.reply('%s %s is not one of them.' % (m.group(1), zipcode)) company = comp2.strip()
return if company:
n = self._zipinfore.search(text) res.append('%s\'s %s' % (company, filetype))
if not n: else:
irc.error('Unable to retrieve information for that zip code.') res.append(filetype)
return if res:
(latdir, longdir, rawinfo) = n.groups() irc.reply(utils.commaAndify(res))
# Info consists of the following (whitespace separated): else:
# City, State Abbrev., Zip Code, County, FIPS Code, Area Code, Time irc.error('No matching file extensions were found.')
# Zone, Daylight Time(?), Latitude, Longitude
info = utils.htmlToText(rawinfo) _zipinfore = re.compile(r'Latitude<BR>\(([^)]+)\)</th><th>Longitude<BR>'
info = info.split() r'\(([^)]+)\).*?<tr>(.*?)</tr>', re.I)
zipindex = info.index(zipcode) _zipstatre = re.compile(r'(Only about \d+,\d{3} of.*?in use.)')
resp = ['City: %s' % ' '.join(info[:zipindex-1]), def zipinfo(self, irc, msg, args):
'State: %s' % info[zipindex-1], """<zip code>
'County: %s' % ' '.join(info[zipindex+1:-6]),
'Area Code: %s' % info[-5], Returns a plethora of information for the given <zip code>.
'Time Zone: %s' % info[-4], """
'Daylight Savings: %s' % info[-3], zipcode = privmsgs.getArgs(args)
'Latitude: %s (%s)' % (info[-2], latdir), try:
'Longitude: %s (%s)' % (info[-1], longdir), int(zipcode)
] except ValueError:
irc.reply('; '.join(resp)) irc.error('Zip code must be a 5-digit integer.')
return
def bender(self, irc, msg, args): if len(zipcode) != 5:
"""takes no arguments irc.error('Zip code must be a 5-digit integer.')
return
Returns a random Bender (from Futurama) quote from Slashdot's HTTP url = 'http://zipinfo.com/cgi-local/zipsrch.exe?cnty=cnty&ac=ac&'\
headers. 'tz=tz&ll=ll&zip=%s&Go=Go' % zipcode
""" text = webutils.getUrl(url)
fd = webutils.getUrlFd('http://slashdot.org/') if 'daily usage limit' in text:
try: irc.error('I have exceeded the site\'s daily usage limit.')
if 'X-Bender' in fd.headers: return
irc.reply(fd.headers['X-Bender']) m = self._zipstatre.search(text)
else: if m:
irc.reply('Slashdot seems to be running low on Bender quotes.') irc.reply('%s %s is not one of them.' % (m.group(1), zipcode))
finally: return
fd.close() n = self._zipinfore.search(text)
if not n:
irc.error('Unable to retrieve information for that zip code.')
Class = Http return
(latdir, longdir, rawinfo) = n.groups()
# vim:set shiftwidth=4 tabstop=8 expandtab textwidth=78: # Info consists of the following (whitespace separated):
# City, State Abbrev., Zip Code, County, FIPS Code, Area Code, Time
# Zone, Daylight Time(?), Latitude, Longitude
info = utils.htmlToText(rawinfo)
info = info.split()
zipindex = info.index(zipcode)
resp = ['City: %s' % ' '.join(info[:zipindex-1]),
'State: %s' % info[zipindex-1],
'County: %s' % ' '.join(info[zipindex+1:-6]),
'Area Code: %s' % info[-5],
'Time Zone: %s' % info[-4],
'Daylight Savings: %s' % info[-3],
'Latitude: %s (%s)' % (info[-2], latdir),
'Longitude: %s (%s)' % (info[-1], longdir),
]
irc.reply('; '.join(resp))
def bender(self, irc, msg, args):
"""takes no arguments
Returns a random Bender (from Futurama) quote from Slashdot's HTTP
headers.
"""
fd = webutils.getUrlFd('http://slashdot.org/')
try:
if 'X-Bender' in fd.headers:
irc.reply(fd.headers['X-Bender'])
else:
irc.reply('Slashdot seems to be running low on Bender quotes.')
finally:
fd.close()
Class = Http
# vim:set shiftwidth=4 tabstop=8 expandtab textwidth=78: