Someone committed this in dos file format. Let's purge that evil.

This commit is contained in:
James Vega 2004-11-08 17:41:13 +00:00
parent 6341b26755
commit beffc15c0d

View File

@ -1,411 +1,411 @@
### ###
# Copyright (c) 2002-2004, Jeremiah Fincher # Copyright (c) 2002-2004, Jeremiah Fincher
# All rights reserved. # All rights reserved.
# #
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met: # modification, are permitted provided that the following conditions are met:
# #
# * Redistributions of source code must retain the above copyright notice, # * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer. # this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, # * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the # this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution. # documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of # * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products # contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent. # derived from this software without specific prior written consent.
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
### ###
""" """
Provides several commands that go out to websites and get things. Provides several commands that go out to websites and get things.
""" """
import supybot import supybot
__revision__ = "$Id$" __revision__ = "$Id$"
__contributors__ = { __contributors__ = {
supybot.authors.jemfinch: ['bender', 'cyborg', 'doctype', 'freshmeat', supybot.authors.jemfinch: ['bender', 'cyborg', 'doctype', 'freshmeat',
'headers', 'netcraft', 'size', 'title'], 'headers', 'netcraft', 'size', 'title'],
supybot.authors.jamessan: ['pgpkey', 'kernel', 'filext', 'zipinfo', supybot.authors.jamessan: ['pgpkey', 'kernel', 'filext', 'zipinfo',
'acronym'], 'acronym'],
} }
import supybot.plugins as plugins import supybot.plugins as plugins
import re import re
import sets import sets
import getopt import getopt
import socket import socket
import urllib import urllib
import xml.dom.minidom import xml.dom.minidom
from itertools import imap from itertools import imap
import supybot.conf as conf import supybot.conf as conf
import supybot.utils as utils import supybot.utils as utils
from supybot.commands import * from supybot.commands import *
import supybot.webutils as webutils import supybot.webutils as webutils
import supybot.registry as registry import supybot.registry as registry
import supybot.callbacks as callbacks import supybot.callbacks as callbacks
conf.registerPlugin('Http') conf.registerPlugin('Http')
class FreshmeatException(Exception): class FreshmeatException(Exception):
pass pass
class Http(callbacks.Privmsg): class Http(callbacks.Privmsg):
threaded = True threaded = True
_titleRe = re.compile(r'<title>(.*?)</title>', re.I | re.S) _titleRe = re.compile(r'<title>(.*?)</title>', re.I | re.S)
def __init__(self): def __init__(self):
self.__parent = super(Http, self) self.__parent = super(Http, self)
self.__parent.__init__() self.__parent.__init__()
def callCommand(self, name, irc, msg, *L, **kwargs): def callCommand(self, name, irc, msg, *L, **kwargs):
try: try:
self.__parent.callCommand(name, irc, msg, *L, **kwargs) self.__parent.callCommand(name, irc, msg, *L, **kwargs)
except webutils.WebError, e: except webutils.WebError, e:
irc.error(str(e)) irc.error(str(e))
def headers(self, irc, msg, args, url): def headers(self, irc, msg, args, url):
"""<url> """<url>
Returns the HTTP headers of <url>. Only HTTP urls are valid, of Returns the HTTP headers of <url>. Only HTTP urls are valid, of
course. course.
""" """
fd = webutils.getUrlFd(url) fd = webutils.getUrlFd(url)
try: try:
s = ', '.join(['%s: %s' % (k, v) for (k, v) in fd.headers.items()]) s = ', '.join(['%s: %s' % (k, v) for (k, v) in fd.headers.items()])
irc.reply(s) irc.reply(s)
finally: finally:
fd.close() fd.close()
headers = wrap(headers, ['httpUrl']) headers = wrap(headers, ['httpUrl'])
_doctypeRe = re.compile(r'(<!DOCTYPE[^>]+>)', re.M) _doctypeRe = re.compile(r'(<!DOCTYPE[^>]+>)', re.M)
def doctype(self, irc, msg, args, url): def doctype(self, irc, msg, args, url):
"""<url> """<url>
Returns the DOCTYPE string of <url>. Only HTTP urls are valid, of Returns the DOCTYPE string of <url>. Only HTTP urls are valid, of
course. course.
""" """
size = conf.supybot.protocols.http.peekSize() size = conf.supybot.protocols.http.peekSize()
s = webutils.getUrl(url, size=size) s = webutils.getUrl(url, size=size)
m = self._doctypeRe.search(s) m = self._doctypeRe.search(s)
if m: if m:
s = utils.normalizeWhitespace(m.group(0)) s = utils.normalizeWhitespace(m.group(0))
irc.reply(s) irc.reply(s)
else: else:
irc.reply('That URL has no specified doctype.') irc.reply('That URL has no specified doctype.')
doctype = wrap(doctype, ['httpUrl']) doctype = wrap(doctype, ['httpUrl'])
def size(self, irc, msg, args, url): def size(self, irc, msg, args, url):
"""<url> """<url>
Returns the Content-Length header of <url>. Only HTTP urls are valid, Returns the Content-Length header of <url>. Only HTTP urls are valid,
of course. of course.
""" """
fd = webutils.getUrlFd(url) fd = webutils.getUrlFd(url)
try: try:
try: try:
size = fd.headers['Content-Length'] size = fd.headers['Content-Length']
irc.reply('%s is %s bytes long.' % (url, size)) irc.reply('%s is %s bytes long.' % (url, size))
except KeyError: except KeyError:
size = conf.supybot.protocols.http.peekSize() size = conf.supybot.protocols.http.peekSize()
s = fd.read(size) s = fd.read(size)
if len(s) != size: if len(s) != size:
irc.reply('%s is %s bytes long.' % (url, len(s))) irc.reply('%s is %s bytes long.' % (url, len(s)))
else: else:
irc.reply('The server didn\'t tell me how long %s is ' irc.reply('The server didn\'t tell me how long %s is '
'but it\'s longer than %s bytes.' % (url, size)) 'but it\'s longer than %s bytes.' % (url, size))
finally: finally:
fd.close() fd.close()
size = wrap(size, ['httpUrl']) size = wrap(size, ['httpUrl'])
def title(self, irc, msg, args, url): def title(self, irc, msg, args, url):
"""<url> """<url>
Returns the HTML <title>...</title> of a URL. Returns the HTML <title>...</title> of a URL.
""" """
size = conf.supybot.protocols.http.peekSize() size = conf.supybot.protocols.http.peekSize()
text = webutils.getUrl(url, size=size) text = webutils.getUrl(url, size=size)
m = self._titleRe.search(text) m = self._titleRe.search(text)
if m is not None: if m is not None:
irc.reply(utils.htmlToText(m.group(1).strip())) irc.reply(utils.htmlToText(m.group(1).strip()))
else: else:
irc.reply('That URL appears to have no HTML title ' irc.reply('That URL appears to have no HTML title '
'within the first %s bytes.' % size) 'within the first %s bytes.' % size)
title = wrap(title, ['httpUrl']) title = wrap(title, ['httpUrl'])
def freshmeat(self, irc, msg, args, project): def freshmeat(self, irc, msg, args, project):
"""<project name> """<project name>
Returns Freshmeat data about a given project. Returns Freshmeat data about a given project.
""" """
project = ''.join(project.split()) project = ''.join(project.split())
url = 'http://www.freshmeat.net/projects-xml/%s' % project url = 'http://www.freshmeat.net/projects-xml/%s' % project
try: try:
text = webutils.getUrl(url) text = webutils.getUrl(url)
if text.startswith('Error'): if text.startswith('Error'):
text = text.split(None, 1)[1] text = text.split(None, 1)[1]
raise FreshmeatException, text raise FreshmeatException, text
dom = xml.dom.minidom.parseString(text) dom = xml.dom.minidom.parseString(text)
def getNode(name): def getNode(name):
node = dom.getElementsByTagName(name)[0] node = dom.getElementsByTagName(name)[0]
return str(node.childNodes[0].data) return str(node.childNodes[0].data)
project = getNode('projectname_full') project = getNode('projectname_full')
version = getNode('latest_release_version') version = getNode('latest_release_version')
vitality = getNode('vitality_percent') vitality = getNode('vitality_percent')
popularity = getNode('popularity_percent') popularity = getNode('popularity_percent')
lastupdated = getNode('date_updated') lastupdated = getNode('date_updated')
irc.reply('%s, last updated %s, with a vitality percent of %s ' irc.reply('%s, last updated %s, with a vitality percent of %s '
'and a popularity of %s, is in version %s.' % 'and a popularity of %s, is in version %s.' %
(project, lastupdated, vitality, popularity, version)) (project, lastupdated, vitality, popularity, version))
except FreshmeatException, e: except FreshmeatException, e:
irc.error(str(e)) irc.error(str(e))
freshmeat = wrap(freshmeat, ['text']) freshmeat = wrap(freshmeat, ['text'])
def stockquote(self, irc, msg, args, symbol): def stockquote(self, irc, msg, args, symbol):
"""<company symbol> """<company symbol>
Gets the information about the current price and change from the Gets the information about the current price and change from the
previous day of a given company (represented by a stock symbol). previous day of a given company (represented by a stock symbol).
""" """
url = 'http://finance.yahoo.com/d/quotes.csv?s=%s' \ url = 'http://finance.yahoo.com/d/quotes.csv?s=%s' \
'&f=sl1d1t1c1ohgv&e=.csv' % symbol '&f=sl1d1t1c1ohgv&e=.csv' % symbol
quote = webutils.getUrl(url) quote = webutils.getUrl(url)
data = quote.split(',') data = quote.split(',')
if data[1] != '0.00': if data[1] != '0.00':
irc.reply('The current price of %s is %s, as of %s EST. ' irc.reply('The current price of %s is %s, as of %s EST. '
'A change of %s from the last business day.' % 'A change of %s from the last business day.' %
(data[0][1:-1], data[1], data[3][1:-1], data[4])) (data[0][1:-1], data[1], data[3][1:-1], data[4]))
else: else:
m = 'I couldn\'t find a listing for %s' % symbol m = 'I couldn\'t find a listing for %s' % symbol
irc.error(m) irc.error(m)
stockquote = wrap(stockquote, ['something']) stockquote = wrap(stockquote, ['something'])
_cyborgRe = re.compile(r'<p class="mediumheader">(.*?)</p>', re.I) _cyborgRe = re.compile(r'<p class="mediumheader">(.*?)</p>', re.I)
def cyborg(self, irc, msg, args, name): def cyborg(self, irc, msg, args, name):
"""[<name>] """[<name>]
Returns a cyborg acronym for <name> from <http://www.cyborgname.com/>. Returns a cyborg acronym for <name> from <http://www.cyborgname.com/>.
If <name> is not specified, uses that of the user. If <name> is not specified, uses that of the user.
""" """
if not name: if not name:
name = msg.nick name = msg.nick
name = urllib.quote(name) name = urllib.quote(name)
url = 'http://www.cyborgname.com/cyborger.cgi?acronym=%s' % name url = 'http://www.cyborgname.com/cyborger.cgi?acronym=%s' % name
html = webutils.getUrl(url) html = webutils.getUrl(url)
m = self._cyborgRe.search(html) m = self._cyborgRe.search(html)
if m: if m:
s = m.group(1) s = m.group(1)
s = utils.normalizeWhitespace(s) s = utils.normalizeWhitespace(s)
irc.reply(s) irc.reply(s)
else: else:
irc.errorPossibleBug('No cyborg name returned.') irc.errorPossibleBug('No cyborg name returned.')
cyborg = wrap(cyborg, [additional('somethingWithoutSpaces')]) cyborg = wrap(cyborg, [additional('somethingWithoutSpaces')])
_acronymre = re.compile(r'valign="middle" width="7\d%" bgcolor="[^"]+">' _acronymre = re.compile(r'valign="middle" width="7\d%" bgcolor="[^"]+">'
r'(?:<b>)?([^<]+)') r'(?:<b>)?([^<]+)')
def acronym(self, irc, msg, args, acronym): def acronym(self, irc, msg, args, acronym):
"""<acronym> """<acronym>
Displays acronym matches from acronymfinder.com Displays acronym matches from acronymfinder.com
""" """
url = 'http://www.acronymfinder.com/' \ url = 'http://www.acronymfinder.com/' \
'af-query.asp?String=exact&Acronym=%s' % urllib.quote(acronym) 'af-query.asp?String=exact&Acronym=%s' % urllib.quote(acronym)
html = webutils.getUrl(url) html = webutils.getUrl(url)
if 'daily limit' in html: if 'daily limit' in html:
s = 'Acronymfinder.com says I\'ve reached my daily limit. Sorry.' s = 'Acronymfinder.com says I\'ve reached my daily limit. Sorry.'
irc.error(s) irc.error(s)
return return
# The following definitions are stripped and empties are removed. # The following definitions are stripped and empties are removed.
defs = filter(None, imap(str.strip, self._acronymre.findall(html))) defs = filter(None, imap(str.strip, self._acronymre.findall(html)))
utils.sortBy(lambda s: not s.startswith('[not an acronym]'), defs) utils.sortBy(lambda s: not s.startswith('[not an acronym]'), defs)
for (i, s) in enumerate(defs): for (i, s) in enumerate(defs):
if s.startswith('[not an acronym]'): if s.startswith('[not an acronym]'):
defs[i] = s.split('] ', 1)[1].strip() defs[i] = s.split('] ', 1)[1].strip()
if len(defs) == 0: if len(defs) == 0:
irc.reply('No definitions found.') irc.reply('No definitions found.')
else: else:
s = ', or '.join(defs) s = ', or '.join(defs)
irc.reply('%s could be %s' % (acronym, s)) irc.reply('%s could be %s' % (acronym, s))
acronym = wrap(acronym, ['text']) acronym = wrap(acronym, ['text'])
_netcraftre = re.compile(r'td align="left">\s+<a[^>]+>(.*?)<a href', _netcraftre = re.compile(r'td align="left">\s+<a[^>]+>(.*?)<a href',
re.S | re.I) re.S | re.I)
def netcraft(self, irc, msg, args, hostname): def netcraft(self, irc, msg, args, hostname):
"""<hostname|ip> """<hostname|ip>
Returns Netcraft.com's determination of what operating system and Returns Netcraft.com's determination of what operating system and
webserver is running on the host given. webserver is running on the host given.
""" """
url = 'http://uptime.netcraft.com/up/graph/?host=%s' % hostname url = 'http://uptime.netcraft.com/up/graph/?host=%s' % hostname
html = webutils.getUrl(url) html = webutils.getUrl(url)
m = self._netcraftre.search(html) m = self._netcraftre.search(html)
if m: if m:
html = m.group(1) html = m.group(1)
s = utils.htmlToText(html, tagReplace='').strip() s = utils.htmlToText(html, tagReplace='').strip()
s = s.rstrip('-').strip() s = s.rstrip('-').strip()
irc.reply(s) # Snip off "the site" irc.reply(s) # Snip off "the site"
elif 'We could not get any results' in html: elif 'We could not get any results' in html:
irc.reply('No results found for %s.' % hostname) irc.reply('No results found for %s.' % hostname)
else: else:
irc.error('The format of page the was odd.') irc.error('The format of page the was odd.')
netcraft = wrap(netcraft, ['text']) netcraft = wrap(netcraft, ['text'])
def kernel(self, irc, msg, args): def kernel(self, irc, msg, args):
"""takes no arguments """takes no arguments
Returns information about the current version of the Linux kernel. Returns information about the current version of the Linux kernel.
""" """
fd = webutils.getUrlFd('http://kernel.org/kdist/finger_banner') fd = webutils.getUrlFd('http://kernel.org/kdist/finger_banner')
try: try:
stable = 'unknown' stable = 'unknown'
snapshot = 'unknown' snapshot = 'unknown'
mm = 'unknown' mm = 'unknown'
for line in fd: for line in fd:
(name, version) = line.split(':') (name, version) = line.split(':')
if 'latest stable' in name: if 'latest stable' in name:
stable = version.strip() stable = version.strip()
elif 'snapshot for the stable' in name: elif 'snapshot for the stable' in name:
snapshot = version.strip() snapshot = version.strip()
elif '-mm patch' in name: elif '-mm patch' in name:
mm = version.strip() mm = version.strip()
finally: finally:
fd.close() fd.close()
irc.reply('The latest stable kernel is %s; ' irc.reply('The latest stable kernel is %s; '
'the latest snapshot of the stable kernel is %s; ' 'the latest snapshot of the stable kernel is %s; '
'the latest beta kernel is %s.' % (stable, snapshot, mm)) 'the latest beta kernel is %s.' % (stable, snapshot, mm))
kernel = wrap(kernel) kernel = wrap(kernel)
_pgpkeyre = re.compile(r'pub\s+\d{4}\w/<a href="([^"]+)">' _pgpkeyre = re.compile(r'pub\s+\d{4}\w/<a href="([^"]+)">'
r'([^<]+)</a>[^>]+>([^<]+)</a>') r'([^<]+)</a>[^>]+>([^<]+)</a>')
def pgpkey(self, irc, msg, args, search): def pgpkey(self, irc, msg, args, search):
"""<search words> """<search words>
Returns the results of querying pgp.mit.edu for keys that match Returns the results of querying pgp.mit.edu for keys that match
the <search words>. the <search words>.
""" """
urlClean = search.replace(' ', '+') urlClean = search.replace(' ', '+')
host = 'http://pgp.mit.edu:11371' host = 'http://pgp.mit.edu:11371'
url = '%s/pks/lookup?op=index&search=%s' % (host, urlClean) url = '%s/pks/lookup?op=index&search=%s' % (host, urlClean)
fd = webutils.getUrlFd(url, headers={}) fd = webutils.getUrlFd(url, headers={})
try: try:
L = [] L = []
for line in iter(fd.next, ''): for line in iter(fd.next, ''):
info = self._pgpkeyre.search(line) info = self._pgpkeyre.search(line)
if info: if info:
L.append('%s <%s%s>' % (info.group(3),host,info.group(1))) L.append('%s <%s%s>' % (info.group(3),host,info.group(1)))
if len(L) == 0: if len(L) == 0:
irc.reply('No results found for %s.' % search) irc.reply('No results found for %s.' % search)
else: else:
s = 'Matches found for %s: %s' % (search, ' :: '.join(L)) s = 'Matches found for %s: %s' % (search, ' :: '.join(L))
irc.reply(s) irc.reply(s)
finally: finally:
fd.close() fd.close()
pgpkey = wrap(pgpkey, ['text']) pgpkey = wrap(pgpkey, ['text'])
_filextre = re.compile( _filextre = re.compile(
r'<strong>Extension:</strong>.*?<tr>.*?</tr>\s+<tr>\s+<td colspan=' r'<strong>Extension:</strong>.*?<tr>.*?</tr>\s+<tr>\s+<td colspan='
r'"2">(?:<a href[^>]+>([^<]+)</a>\s+|([^<]+))</td>\s+<td>' r'"2">(?:<a href[^>]+>([^<]+)</a>\s+|([^<]+))</td>\s+<td>'
r'(?:<a href[^>]+>([^<]+)</a>|<img src="images/spacer.gif"(.))', r'(?:<a href[^>]+>([^<]+)</a>|<img src="images/spacer.gif"(.))',
re.I|re.S) re.I|re.S)
def extension(self, irc, msg, args, ext): def extension(self, irc, msg, args, ext):
"""<ext> """<ext>
Returns the results of querying filext.com for file extensions that Returns the results of querying filext.com for file extensions that
match <ext>. match <ext>.
""" """
# XXX This probably ought to be handled in a converter from commands.py # XXX This probably ought to be handled in a converter from commands.py
invalid = '|<>\^=?/[]";,*' invalid = '|<>\^=?/[]";,*'
for c in invalid: for c in invalid:
if c in ext: if c in ext:
irc.error('\'%s\' is an invalid extension character' % c) irc.error('\'%s\' is an invalid extension character' % c)
return return
s = 'http://www.filext.com/detaillist.php?extdetail=%s&goButton=Go' s = 'http://www.filext.com/detaillist.php?extdetail=%s&goButton=Go'
text = webutils.getUrl(s % ext) text = webutils.getUrl(s % ext)
matches = self._filextre.findall(text) matches = self._filextre.findall(text)
#print matches #print matches
res = [] res = []
for match in matches: for match in matches:
(file1, file2, comp1, comp2) = match (file1, file2, comp1, comp2) = match
if file1: if file1:
filetype = file1.strip() filetype = file1.strip()
else: else:
filetype = file2.strip() filetype = file2.strip()
if comp1: if comp1:
company = comp1.strip() company = comp1.strip()
else: else:
company = comp2.strip() company = comp2.strip()
if company: if company:
res.append('%s\'s %s' % (company, filetype)) res.append('%s\'s %s' % (company, filetype))
else: else:
res.append(filetype) res.append(filetype)
if res: if res:
irc.reply(utils.commaAndify(res)) irc.reply(utils.commaAndify(res))
else: else:
irc.error('No matching file extensions were found.') irc.error('No matching file extensions were found.')
extension = wrap(extension, ['text']) extension = wrap(extension, ['text'])
_zipinfore = re.compile(r'Latitude<BR>\(([^)]+)\)</th><th>Longitude<BR>' _zipinfore = re.compile(r'Latitude<BR>\(([^)]+)\)</th><th>Longitude<BR>'
r'\(([^)]+)\).*?<tr>(.*?)</tr>', re.I) r'\(([^)]+)\).*?<tr>(.*?)</tr>', re.I)
_zipstatre = re.compile(r'(Only about \d+,\d{3} of.*?in use.)') _zipstatre = re.compile(r'(Only about \d+,\d{3} of.*?in use.)')
def zipinfo(self, irc, msg, args, zipcode): def zipinfo(self, irc, msg, args, zipcode):
"""<zip code> """<zip code>
Returns a plethora of information for the given <zip code>. Returns a plethora of information for the given <zip code>.
""" """
try: try:
int(zipcode) int(zipcode)
except ValueError: except ValueError:
irc.error('Zip code must be a 5-digit integer.') irc.error('Zip code must be a 5-digit integer.')
return return
if len(zipcode) != 5: if len(zipcode) != 5:
irc.error('Zip code must be a 5-digit integer.') irc.error('Zip code must be a 5-digit integer.')
return return
url = 'http://zipinfo.com/cgi-local/zipsrch.exe?cnty=cnty&ac=ac&'\ url = 'http://zipinfo.com/cgi-local/zipsrch.exe?cnty=cnty&ac=ac&'\
'tz=tz&ll=ll&zip=%s&Go=Go' % zipcode 'tz=tz&ll=ll&zip=%s&Go=Go' % zipcode
text = webutils.getUrl(url) text = webutils.getUrl(url)
if 'daily usage limit' in text: if 'daily usage limit' in text:
irc.error('I have exceeded the site\'s daily usage limit.') irc.error('I have exceeded the site\'s daily usage limit.')
return return
m = self._zipstatre.search(text) m = self._zipstatre.search(text)
if m: if m:
irc.reply('%s %s is not one of them.' % (m.group(1), zipcode)) irc.reply('%s %s is not one of them.' % (m.group(1), zipcode))
return return
n = self._zipinfore.search(text) n = self._zipinfore.search(text)
if not n: if not n:
irc.error('Unable to retrieve information for that zip code.') irc.error('Unable to retrieve information for that zip code.')
return return
(latdir, longdir, rawinfo) = n.groups() (latdir, longdir, rawinfo) = n.groups()
# Info consists of the following (whitespace separated): # Info consists of the following (whitespace separated):
# City, State Abbrev., Zip Code, County, FIPS Code, Area Code, Time # City, State Abbrev., Zip Code, County, FIPS Code, Area Code, Time
# Zone, Daylight Time(?), Latitude, Longitude # Zone, Daylight Time(?), Latitude, Longitude
info = utils.htmlToText(rawinfo) info = utils.htmlToText(rawinfo)
info = info.split() info = info.split()
zipindex = info.index(zipcode) zipindex = info.index(zipcode)
resp = ['City: %s' % ' '.join(info[:zipindex-1]), resp = ['City: %s' % ' '.join(info[:zipindex-1]),
'State: %s' % info[zipindex-1], 'State: %s' % info[zipindex-1],
'County: %s' % ' '.join(info[zipindex+1:-6]), 'County: %s' % ' '.join(info[zipindex+1:-6]),
'Area Code: %s' % info[-5], 'Area Code: %s' % info[-5],
'Time Zone: %s' % info[-4], 'Time Zone: %s' % info[-4],
'Daylight Savings: %s' % info[-3], 'Daylight Savings: %s' % info[-3],
'Latitude: %s (%s)' % (info[-2], latdir), 'Latitude: %s (%s)' % (info[-2], latdir),
'Longitude: %s (%s)' % (info[-1], longdir), 'Longitude: %s (%s)' % (info[-1], longdir),
] ]
irc.reply('; '.join(resp)) irc.reply('; '.join(resp))
zipinfo = wrap(zipinfo, ['text']) zipinfo = wrap(zipinfo, ['text'])
Class = Http Class = Http
# vim:set shiftwidth=4 tabstop=8 expandtab textwidth=78: # vim:set shiftwidth=4 tabstop=8 expandtab textwidth=78: