mirror of
https://github.com/Mikaela/Limnoria.git
synced 2024-12-23 19:22:45 +01:00
Merge branch 'py3k-backport' into testing
This branch provides support of Python 3 via 2to3 (without dropping Python 2 support).
This commit is contained in:
commit
45bf9db03c
22
2to3/fix_def_iteritems.py
Normal file
22
2to3/fix_def_iteritems.py
Normal file
@ -0,0 +1,22 @@
|
||||
"""Fixer for iteritems -> items methods."""
|
||||
# Author: Valentin Lorentz
|
||||
|
||||
# Code modified from fix_nonzero by Collin Winter
|
||||
|
||||
from lib2to3 import fixer_base
|
||||
from lib2to3.fixer_util import Name, syms
|
||||
|
||||
class FixDefIteritems(fixer_base.BaseFix):
|
||||
BM_compatible = True
|
||||
PATTERN = """
|
||||
classdef< 'class' any+ ':'
|
||||
suite< any*
|
||||
funcdef< 'def' name='iteritems'
|
||||
parameters< '(' NAME ')' > any+ >
|
||||
any* > >
|
||||
"""
|
||||
|
||||
def transform(self, node, results):
|
||||
name = results["name"]
|
||||
new = Name(u"items", prefix=name.prefix)
|
||||
name.replace(new)
|
22
2to3/fix_def_iterkeys.py
Normal file
22
2to3/fix_def_iterkeys.py
Normal file
@ -0,0 +1,22 @@
|
||||
"""Fixer for iterkeys -> keys methods."""
|
||||
# Author: Valentin Lorentz
|
||||
|
||||
# Code modified from fix_nonzero by Collin Winter
|
||||
|
||||
from lib2to3 import fixer_base
|
||||
from lib2to3.fixer_util import Name, syms
|
||||
|
||||
class FixDefIterkeys(fixer_base.BaseFix):
|
||||
BM_compatible = True
|
||||
PATTERN = """
|
||||
classdef< 'class' any+ ':'
|
||||
suite< any*
|
||||
funcdef< 'def' name='iterkeys'
|
||||
parameters< '(' NAME ')' > any+ >
|
||||
any* > >
|
||||
"""
|
||||
|
||||
def transform(self, node, results):
|
||||
name = results["name"]
|
||||
new = Name(u"keys", prefix=name.prefix)
|
||||
name.replace(new)
|
22
2to3/fix_def_itervalues.py
Normal file
22
2to3/fix_def_itervalues.py
Normal file
@ -0,0 +1,22 @@
|
||||
"""Fixer for itervalues -> values methods."""
|
||||
# Author: Valentin Lorentz
|
||||
|
||||
# Code modified from fix_nonzero by Collin Winter
|
||||
|
||||
from lib2to3 import fixer_base
|
||||
from lib2to3.fixer_util import Name, syms
|
||||
|
||||
class FixDefItervalues(fixer_base.BaseFix):
|
||||
BM_compatible = True
|
||||
PATTERN = """
|
||||
classdef< 'class' any+ ':'
|
||||
suite< any*
|
||||
funcdef< 'def' name='itervalues'
|
||||
parameters< '(' NAME ')' > any+ >
|
||||
any* > >
|
||||
"""
|
||||
|
||||
def transform(self, node, results):
|
||||
name = results["name"]
|
||||
new = Name(u"values", prefix=name.prefix)
|
||||
name.replace(new)
|
27
2to3/fix_reload.py
Normal file
27
2to3/fix_reload.py
Normal file
@ -0,0 +1,27 @@
|
||||
# Based on fix_intern.py. Original copyright:
|
||||
# Copyright 2006 Georg Brandl.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Fixer for intern().
|
||||
|
||||
intern(s) -> sys.intern(s)"""
|
||||
|
||||
# Local imports
|
||||
from lib2to3 import pytree
|
||||
from lib2to3 import fixer_base
|
||||
from lib2to3.fixer_util import Name, Attr, touch_import
|
||||
|
||||
|
||||
class FixReload(fixer_base.BaseFix):
|
||||
BM_compatible = True
|
||||
order = "pre"
|
||||
|
||||
PATTERN = """
|
||||
power< 'reload'
|
||||
after=any*
|
||||
>
|
||||
"""
|
||||
|
||||
def transform(self, node, results):
|
||||
touch_import(u'imp', u'reload', node)
|
||||
return node
|
13
2to3/run.py
Normal file
13
2to3/run.py
Normal file
@ -0,0 +1,13 @@
|
||||
#! /usr/bin/python2.7
|
||||
import sys
|
||||
from lib2to3.main import main
|
||||
|
||||
import fix_def_iteritems, fix_def_itervalues, fix_def_iterkeys, fix_reload
|
||||
|
||||
# Hacks
|
||||
sys.modules['lib2to3.fixes.fix_def_iteritems'] = fix_def_iteritems
|
||||
sys.modules['lib2to3.fixes.fix_def_itervalues'] = fix_def_itervalues
|
||||
sys.modules['lib2to3.fixes.fix_def_iterkeys'] = fix_def_iterkeys
|
||||
sys.modules['lib2to3.fixes.fix_reload'] = fix_reload
|
||||
|
||||
sys.exit(main("lib2to3.fixes"))
|
46
locale/fr.py
46
locale/fr.py
@ -37,40 +37,40 @@ def pluralize(s):
|
||||
"""
|
||||
lowered = s.lower()
|
||||
if lowered.endswith('ou') and \
|
||||
lowered in ['bijou', 'caillou', 'chou', 'genou', 'hibou', 'joujou',
|
||||
'pou']:
|
||||
return s + 'x'
|
||||
lowered in ['bijou', 'caillou', 'chou', 'genou', 'hibou', 'joujou',
|
||||
'pou']:
|
||||
return s + 'x'
|
||||
elif lowered.endswith('al') and \
|
||||
lowered not in ['bal', 'carnaval', 'chacal', 'festival', 'récital',
|
||||
'régal', 'cal', 'étal', 'aval', 'caracal', 'val', 'choral',
|
||||
'corral', 'galgal', 'gayal']:
|
||||
return s[0:-2] + 'aux'
|
||||
lowered not in ['bal', 'carnaval', 'chacal', 'festival', 'récital',
|
||||
'régal', 'cal', 'étal', 'aval', 'caracal', 'val', 'choral',
|
||||
'corral', 'galgal', 'gayal']:
|
||||
return s[0:-2] + 'aux'
|
||||
elif lowered.endswith('ail') and \
|
||||
lowered not in ['bail', 'corail', 'émail', 'soupirail', 'travail',
|
||||
'ventail', 'vitrail', 'aspirail', 'fermail']:
|
||||
return s[0:-3] + 'aux'
|
||||
lowered not in ['bail', 'corail', 'émail', 'soupirail', 'travail',
|
||||
'ventail', 'vitrail', 'aspirail', 'fermail']:
|
||||
return s[0:-3] + 'aux'
|
||||
elif lowered.endswith('eau'):
|
||||
return s + 'x'
|
||||
return s + 'x'
|
||||
elif lowered == 'pare-feu':
|
||||
return s
|
||||
return s
|
||||
elif lowered.endswith('eu') and \
|
||||
lowered not in ['bleu', 'pneu', 'émeu', 'enfeu']:
|
||||
# Note: when 'lieu' is a fish, it has a 's' ; else, it has a 'x'
|
||||
return s + 'x'
|
||||
lowered not in ['bleu', 'pneu', 'émeu', 'enfeu']:
|
||||
# Note: when 'lieu' is a fish, it has a 's' ; else, it has a 'x'
|
||||
return s + 'x'
|
||||
else:
|
||||
return s + 's'
|
||||
return s + 's'
|
||||
|
||||
def depluralize(s):
|
||||
"""Returns the singular of s."""
|
||||
lowered = s.lower()
|
||||
if lowered.endswith('aux') and \
|
||||
lowered in ['baux', 'coraux', 'émaux', 'soupiraux', 'travaux',
|
||||
'ventaux', 'vitraux', 'aspiraux', 'fermaux']:
|
||||
return s[0:-3] + 'ail'
|
||||
lowered in ['baux', 'coraux', 'émaux', 'soupiraux', 'travaux',
|
||||
'ventaux', 'vitraux', 'aspiraux', 'fermaux']:
|
||||
return s[0:-3] + 'ail'
|
||||
elif lowered.endswith('aux'):
|
||||
return s[0:-3] + 'al'
|
||||
return s[0:-3] + 'al'
|
||||
else:
|
||||
return s[0:-1]
|
||||
return s[0:-1]
|
||||
|
||||
def ordinal(i):
|
||||
"""Returns i + the ordinal indicator for the number.
|
||||
@ -79,9 +79,9 @@ def ordinal(i):
|
||||
"""
|
||||
i = int(i)
|
||||
if i == 1:
|
||||
return '1er'
|
||||
return '1er'
|
||||
else:
|
||||
return '%sème' % i
|
||||
return '%sème' % i
|
||||
|
||||
def be(i):
|
||||
"""Returns the form of the verb 'être' based on the number i."""
|
||||
|
@ -29,7 +29,7 @@
|
||||
###
|
||||
|
||||
import re
|
||||
import new
|
||||
import types
|
||||
|
||||
import supybot.conf as conf
|
||||
import supybot.utils as utils
|
||||
@ -329,7 +329,7 @@ class Alias(callbacks.Plugin):
|
||||
raise AliasError, format('Alias %q is locked.', name)
|
||||
try:
|
||||
f = makeNewAlias(name, alias)
|
||||
f = new.instancemethod(f, self, Alias)
|
||||
f = types.MethodType(f, self)
|
||||
except RecursiveAlias:
|
||||
raise AliasError, 'You can\'t define a recursive alias.'
|
||||
if '.' in name or '|' in name:
|
||||
|
@ -33,7 +33,7 @@ import supybot.conf as conf
|
||||
import supybot.plugin as plugin
|
||||
import supybot.registry as registry
|
||||
|
||||
Alias = plugin.loadPluginModule('Alias')
|
||||
import plugin as Alias
|
||||
|
||||
class FunctionsTest(SupyTestCase):
|
||||
def testFindBiggestDollar(self):
|
||||
|
@ -27,6 +27,8 @@
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
###
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import time
|
||||
|
||||
import supybot.conf as conf
|
||||
@ -64,7 +66,7 @@ conf.registerChannelValue(BadWords,'requireWordBoundaries',
|
||||
class String256(registry.String):
|
||||
def __call__(self):
|
||||
s = registry.String.__call__(self)
|
||||
return s * (1024/len(s))
|
||||
return s * (1024//len(s))
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
@ -148,7 +148,7 @@ class ChannelLogger(callbacks.Plugin):
|
||||
try:
|
||||
name = self.getLogName(channel)
|
||||
logDir = self.getLogDir(irc, channel)
|
||||
log = file(os.path.join(logDir, name), 'a')
|
||||
log = open(os.path.join(logDir, name), 'a')
|
||||
logs[channel] = log
|
||||
return log
|
||||
except IOError:
|
||||
|
@ -297,6 +297,7 @@ class ChannelStats(callbacks.Plugin):
|
||||
name, channel))
|
||||
stats = wrap(stats, ['channeldb', additional('something')])
|
||||
|
||||
_calc_match_forbidden_chars = re.compile('[_[\]]')
|
||||
_env = {'__builtins__': types.ModuleType('__builtins__')}
|
||||
_env.update(math.__dict__)
|
||||
@internationalizeDocstring
|
||||
@ -311,7 +312,7 @@ class ChannelStats(callbacks.Plugin):
|
||||
"""
|
||||
# XXX I could do this the right way, and abstract out a safe eval,
|
||||
# or I could just copy/paste from the Math plugin.
|
||||
if expr != expr.translate(utils.str.chars, '_[]'):
|
||||
if self._calc_match_forbidden_chars.match(expr):
|
||||
irc.error(_('There\'s really no reason why you should have '
|
||||
'underscores or brackets in your mathematical '
|
||||
'expression. Please remove them.'), Raise=True)
|
||||
|
@ -45,15 +45,15 @@ class Connection:
|
||||
def __init__(self, hostname = 'localhost', port = 2628):
|
||||
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.sock.connect((hostname, port))
|
||||
self.rfile = self.sock.makefile("rt")
|
||||
self.wfile = self.sock.makefile("wt", 0)
|
||||
self.rfile = self.sock.makefile("rb")
|
||||
self.wfile = self.sock.makefile("wb", 0)
|
||||
self.saveconnectioninfo()
|
||||
|
||||
def getresultcode(self):
|
||||
"""Generic function to get a result code. It will return a list
|
||||
consisting of two items: the integer result code and the text
|
||||
following. You will not usually use this function directly."""
|
||||
line = self.rfile.readline().strip()
|
||||
line = self.rfile.readline().decode('utf8').strip()
|
||||
code, text = line.split(' ', 1)
|
||||
return [int(code), text]
|
||||
|
||||
@ -72,7 +72,7 @@ class Connection:
|
||||
part only. Does not get any codes or anything! Returns a string."""
|
||||
data = []
|
||||
while 1:
|
||||
line = self.rfile.readline().strip()
|
||||
line = self.rfile.readline().decode('ascii').strip()
|
||||
if line == '.':
|
||||
break
|
||||
data.append(line)
|
||||
@ -163,7 +163,7 @@ class Connection:
|
||||
def sendcommand(self, command):
|
||||
"""Takes a command, without a newline character, and sends it to
|
||||
the server."""
|
||||
self.wfile.write(command + "\n")
|
||||
self.wfile.write(command.encode('ascii') + b"\n")
|
||||
|
||||
def define(self, database, word):
|
||||
"""Returns a list of Definition objects for each matching
|
||||
|
@ -649,7 +649,7 @@ class Factoids(callbacks.Plugin, plugins.ChannelDBHandler):
|
||||
change = wrap(change, ['channel', 'something',
|
||||
'factoidId', 'regexpReplacer'])
|
||||
|
||||
_sqlTrans = string.maketrans('*?', '%_')
|
||||
_sqlTrans = utils.str.MultipleReplacer({'*': '%', '?': '_'})
|
||||
@internationalizeDocstring
|
||||
def search(self, irc, msg, args, channel, optlist, globs):
|
||||
"""[<channel>] [--values] [--{regexp} <value>] [<glob> ...]
|
||||
@ -681,7 +681,7 @@ class Factoids(callbacks.Plugin, plugins.ChannelDBHandler):
|
||||
predicateName += 'p'
|
||||
for glob in globs:
|
||||
criteria.append('TARGET LIKE ?')
|
||||
formats.append(glob.translate(self._sqlTrans))
|
||||
formats.append(self._sqlTrans(glob))
|
||||
cursor = db.cursor()
|
||||
sql = """SELECT keys.key FROM %s WHERE %s""" % \
|
||||
(', '.join(tables), ' AND '.join(criteria))
|
||||
|
@ -29,6 +29,7 @@
|
||||
###
|
||||
|
||||
import re
|
||||
import codecs
|
||||
import string
|
||||
import random
|
||||
from cStringIO import StringIO
|
||||
@ -102,6 +103,7 @@ class Filter(callbacks.Plugin):
|
||||
[('checkChannelCapability', 'op'),
|
||||
additional('commandName')])
|
||||
|
||||
_hebrew_remover = utils.str.MultipleRemover('aeiou')
|
||||
@internationalizeDocstring
|
||||
def hebrew(self, irc, msg, args, text):
|
||||
"""<text>
|
||||
@ -110,8 +112,7 @@ class Filter(callbacks.Plugin):
|
||||
named 'hebrew' it's because I (jemfinch) thought of it in Hebrew class,
|
||||
and printed Hebrew often elides the vowels.)
|
||||
"""
|
||||
text = filter(lambda c: c not in 'aeiou', text)
|
||||
irc.reply(text)
|
||||
irc.reply(self._hebrew_remover(text))
|
||||
hebrew = wrap(hebrew, ['text'])
|
||||
|
||||
@internationalizeDocstring
|
||||
@ -174,6 +175,7 @@ class Filter(callbacks.Plugin):
|
||||
irc.reply(''.join(L))
|
||||
unbinary = wrap(unbinary, ['text'])
|
||||
|
||||
_hex_encoder = staticmethod(codecs.getencoder('hex_codec'))
|
||||
@internationalizeDocstring
|
||||
def hexlify(self, irc, msg, args, text):
|
||||
"""<text>
|
||||
@ -181,9 +183,10 @@ class Filter(callbacks.Plugin):
|
||||
Returns a hexstring from the given string; a hexstring is a string
|
||||
composed of the hexadecimal value of each character in the string
|
||||
"""
|
||||
irc.reply(text.encode('hex_codec'))
|
||||
irc.reply(self._hex_encoder(text.encode('utf8'))[0].decode('utf8'))
|
||||
hexlify = wrap(hexlify, ['text'])
|
||||
|
||||
_hex_decoder = staticmethod(codecs.getdecoder('hex_codec'))
|
||||
@internationalizeDocstring
|
||||
def unhexlify(self, irc, msg, args, text):
|
||||
"""<hexstring>
|
||||
@ -192,11 +195,12 @@ class Filter(callbacks.Plugin):
|
||||
<hexstring> must be a string of hexadecimal digits.
|
||||
"""
|
||||
try:
|
||||
irc.reply(text.decode('hex_codec'))
|
||||
irc.reply(self._hex_decoder(text.encode('utf8'))[0].decode('utf8'))
|
||||
except TypeError:
|
||||
irc.error(_('Invalid input.'))
|
||||
unhexlify = wrap(unhexlify, ['text'])
|
||||
|
||||
_rot13_encoder = codecs.getencoder('rot-13')
|
||||
@internationalizeDocstring
|
||||
def rot13(self, irc, msg, args, text):
|
||||
"""<text>
|
||||
@ -205,7 +209,7 @@ class Filter(callbacks.Plugin):
|
||||
commonly used for text that simply needs to be hidden from inadvertent
|
||||
reading by roaming eyes, since it's easily reversible.
|
||||
"""
|
||||
irc.reply(text.encode('rot13'))
|
||||
irc.reply(self._rot13_encoder(text)[0])
|
||||
rot13 = wrap(rot13, ['text'])
|
||||
|
||||
@internationalizeDocstring
|
||||
@ -232,7 +236,8 @@ class Filter(callbacks.Plugin):
|
||||
irc.reply(text)
|
||||
lithp = wrap(lithp, ['text'])
|
||||
|
||||
_leettrans = string.maketrans('oOaAeElBTiIts', '004433187!1+5')
|
||||
_leettrans = utils.str.MultipleReplacer(dict(zip('oOaAeElBTiIts',
|
||||
'004433187!1+5')))
|
||||
_leetres = [(re.compile(r'\b(?:(?:[yY][o0O][oO0uU])|u)\b'), 'j00'),
|
||||
(re.compile(r'fear'), 'ph33r'),
|
||||
(re.compile(r'[aA][tT][eE]'), '8'),
|
||||
@ -247,7 +252,7 @@ class Filter(callbacks.Plugin):
|
||||
"""
|
||||
for (r, sub) in self._leetres:
|
||||
text = re.sub(r, sub, text)
|
||||
text = text.translate(self._leettrans)
|
||||
text = self._leettrans(text)
|
||||
irc.reply(text)
|
||||
leet = wrap(leet, ['text'])
|
||||
|
||||
@ -648,14 +653,14 @@ class Filter(callbacks.Plugin):
|
||||
irc.reply(text)
|
||||
shrink = wrap(shrink, ['text'])
|
||||
|
||||
_azn_trans = string.maketrans('rlRL', 'lrLR')
|
||||
_azn_trans = utils.str.MultipleReplacer(dict(zip('rlRL', 'lrLR')))
|
||||
@internationalizeDocstring
|
||||
def azn(self, irc, msg, args, text):
|
||||
"""<text>
|
||||
|
||||
Returns <text> with the l's made into r's and r's made into l's.
|
||||
"""
|
||||
text = text.translate(self._azn_trans)
|
||||
text = self._azn_trans(text)
|
||||
irc.reply(text)
|
||||
azn = wrap(azn, ['text'])
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
from supybot.test import *
|
||||
|
||||
import re
|
||||
import codecs
|
||||
|
||||
import supybot.utils as utils
|
||||
import supybot.callbacks as callbacks
|
||||
@ -134,8 +135,9 @@ class FilterTest(ChannelPluginTestCase):
|
||||
self.assertResponse('spellit asasdfasdf12345@#$!%^',
|
||||
'asasdfasdf12345@#$!%^')
|
||||
|
||||
_rot13_encoder = codecs.getencoder('rot-13')
|
||||
def testOutfilter(self):
|
||||
s = self.nick.encode('rot13')
|
||||
s = self._rot13_encoder(self.nick)[0]
|
||||
self.assertNotError('outfilter rot13')
|
||||
self.assertResponse('rot13 foobar', '%s: foobar' % s)
|
||||
self.assertNotError('outfilter rot13')
|
||||
@ -148,7 +150,7 @@ class FilterTest(ChannelPluginTestCase):
|
||||
self.assertResponse('rot13 foobar', 'sbbone')
|
||||
|
||||
def testOutfilterAction(self):
|
||||
s = self.nick.encode('rot13')
|
||||
s = self._rot13_encoder(self.nick)[0]
|
||||
self.assertNotError('outfilter rot13')
|
||||
self.assertResponse('rot13 foobar', '%s: foobar' % s)
|
||||
m = self.getMsg('action foobar')
|
||||
|
@ -95,7 +95,7 @@ class Format(callbacks.Plugin):
|
||||
if len(bad) != len(good):
|
||||
irc.error(_('<chars to translate> must be the same length as '
|
||||
'<chars to replace those with>.'), Raise=True)
|
||||
irc.reply(text.translate(string.maketrans(bad, good)))
|
||||
irc.reply(utils.str.MultipleReplacer(dict(zip(bad, good)))(text))
|
||||
translate = wrap(translate, ['something', 'something', 'text'])
|
||||
|
||||
@internationalizeDocstring
|
||||
|
@ -30,6 +30,7 @@
|
||||
|
||||
import re
|
||||
import random
|
||||
from itertools import imap
|
||||
|
||||
import supybot.utils as utils
|
||||
from supybot.commands import *
|
||||
@ -61,7 +62,7 @@ class Games(callbacks.Plugin):
|
||||
For example, 2d6 will roll 2 six-sided dice; 10d10 will roll 10
|
||||
ten-sided dice.
|
||||
"""
|
||||
(dice, sides) = utils.iter.imap(int, m.groups())
|
||||
(dice, sides) = imap(int, m.groups())
|
||||
if dice > 1000:
|
||||
irc.error(_('You can\'t roll more than 1000 dice.'))
|
||||
elif sides > 100:
|
||||
|
@ -44,26 +44,7 @@ import supybot.callbacks as callbacks
|
||||
from supybot.i18n import PluginInternationalization, internationalizeDocstring
|
||||
_ = PluginInternationalization('Google')
|
||||
|
||||
simplejson = None
|
||||
|
||||
try:
|
||||
simplejson = utils.python.universalImport('json')
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
try:
|
||||
# The 3rd party simplejson module was included in Python 2.6 and renamed to
|
||||
# json. Unfortunately, this conflicts with the 3rd party json module.
|
||||
# Luckily, the 3rd party json module has a different interface so we test
|
||||
# to make sure we aren't using it.
|
||||
if simplejson is None or hasattr(simplejson, 'read'):
|
||||
simplejson = utils.python.universalImport('simplejson',
|
||||
'local.simplejson')
|
||||
except ImportError:
|
||||
raise callbacks.Error, \
|
||||
'You need Python2.6 or the simplejson module installed to use ' \
|
||||
'this plugin. Download the module at ' \
|
||||
'<http://undefined.org/python/#simplejson>.'
|
||||
import json
|
||||
|
||||
class Google(callbacks.PluginRegexp):
|
||||
threaded = True
|
||||
@ -132,14 +113,13 @@ class Google(callbacks.PluginRegexp):
|
||||
if 'rsz' not in opts:
|
||||
opts['rsz'] = 'large'
|
||||
|
||||
fd = utils.web.getUrlFd('%s?%s' % (self._gsearchUrl,
|
||||
text = utils.web.getUrl('%s?%s' % (self._gsearchUrl,
|
||||
urllib.urlencode(opts)),
|
||||
headers)
|
||||
json = simplejson.load(fd)
|
||||
fd.close()
|
||||
if json['responseStatus'] != 200:
|
||||
headers=headers).decode('utf8')
|
||||
data = json.loads(text)
|
||||
if data['responseStatus'] != 200:
|
||||
raise callbacks.Error, _('We broke The Google!')
|
||||
return json
|
||||
return data
|
||||
|
||||
def formatData(self, data, bold=True, max=0, onetoone=False):
|
||||
if isinstance(data, basestring):
|
||||
@ -174,9 +154,9 @@ class Google(callbacks.PluginRegexp):
|
||||
opts = dict(opts)
|
||||
data = self.search(text, msg.args[0], {'smallsearch': True})
|
||||
if data['responseData']['results']:
|
||||
url = data['responseData']['results'][0]['unescapedUrl'].encode('utf-8')
|
||||
url = data['responseData']['results'][0]['unescapedUrl']
|
||||
if opts.has_key('snippet'):
|
||||
snippet = data['responseData']['results'][0]['content'].encode('utf-8')
|
||||
snippet = data['responseData']['results'][0]['content']
|
||||
snippet = " | " + utils.web.htmlToText(snippet, tagReplace='')
|
||||
else:
|
||||
snippet = ""
|
||||
@ -288,7 +268,7 @@ class Google(callbacks.PluginRegexp):
|
||||
Uses Google's calculator to calculate the value of <expression>.
|
||||
"""
|
||||
urlig = self._googleUrlIG(expr)
|
||||
js = utils.web.getUrl(urlig)
|
||||
js = utils.web.getUrl(urlig).decode('utf8')
|
||||
# fix bad google json
|
||||
js = js \
|
||||
.replace('lhs:','"lhs":') \
|
||||
@ -296,7 +276,7 @@ class Google(callbacks.PluginRegexp):
|
||||
.replace('error:','"error":') \
|
||||
.replace('icc:','"icc":') \
|
||||
.replace('\\', '\\\\')
|
||||
js = simplejson.loads(js)
|
||||
js = json.loads(js)
|
||||
|
||||
# Currency conversion
|
||||
if js['icc'] == True:
|
||||
@ -304,7 +284,7 @@ class Google(callbacks.PluginRegexp):
|
||||
return
|
||||
|
||||
url = self._googleUrl(expr)
|
||||
html = utils.web.getUrl(url)
|
||||
html = utils.web.getUrl(url).decode('utf8')
|
||||
match = self._calcRe1.search(html)
|
||||
if match is None:
|
||||
match = self._calcRe2.search(html)
|
||||
@ -328,7 +308,7 @@ class Google(callbacks.PluginRegexp):
|
||||
Looks <phone number> up on Google.
|
||||
"""
|
||||
url = self._googleUrl(phonenumber)
|
||||
html = utils.web.getUrl(url)
|
||||
html = utils.web.getUrl(url).decode('utf8')
|
||||
m = self._phoneRe.search(html)
|
||||
if m is not None:
|
||||
s = m.group(1)
|
||||
|
@ -83,12 +83,12 @@ class Internet(callbacks.Plugin):
|
||||
except socket.error, e:
|
||||
irc.error(str(e))
|
||||
return
|
||||
t.write(domain)
|
||||
t.write('\r\n')
|
||||
t.write(domain.encode('ascii'))
|
||||
t.write(b'\r\n')
|
||||
s = t.read_all()
|
||||
server = registrar = updated = created = expires = status = ''
|
||||
for line in s.splitlines():
|
||||
line = line.strip()
|
||||
line = line.decode('ascii').strip()
|
||||
if not line or ':' not in line:
|
||||
continue
|
||||
if not server and any(line.startswith, self._domain):
|
||||
@ -121,13 +121,13 @@ class Internet(callbacks.Plugin):
|
||||
except socket.error, e:
|
||||
irc.error(str(e))
|
||||
return
|
||||
t.write('registrar ')
|
||||
t.write(registrar.split('(')[0].strip())
|
||||
t.write('\n')
|
||||
t.write(b'registrar ')
|
||||
t.write(registrar.split('(')[0].strip().encode('ascii'))
|
||||
t.write(b'\n')
|
||||
s = t.read_all()
|
||||
url = ''
|
||||
for line in s.splitlines():
|
||||
line = line.strip()
|
||||
line = line.decode('ascii').strip()
|
||||
if not line:
|
||||
continue
|
||||
if line.startswith('Email'):
|
||||
|
@ -199,7 +199,7 @@ class SqliteKarmaDB(object):
|
||||
|
||||
def load(self, channel, filename):
|
||||
filename = conf.supybot.directories.data.dirize(filename)
|
||||
fd = file(filename)
|
||||
fd = open(filename)
|
||||
reader = csv.reader(fd)
|
||||
db = self._getDb(channel)
|
||||
cursor = db.cursor()
|
||||
|
@ -68,7 +68,7 @@ class Later(callbacks.Plugin):
|
||||
|
||||
def _openNotes(self):
|
||||
try:
|
||||
fd = file(self.filename)
|
||||
fd = open(self.filename)
|
||||
except EnvironmentError, e:
|
||||
self.log.warning('Couldn\'t open %s: %s', self.filename, e)
|
||||
return
|
||||
|
@ -1177,6 +1177,12 @@ class Unit:
|
||||
def __cmp__(self, other):
|
||||
return cmp(self.name, other.name)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.name < other.name
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.name == other.name
|
||||
|
||||
############################################################################
|
||||
# Wrapper functionality
|
||||
#
|
||||
|
@ -78,7 +78,7 @@ class Math(callbacks.Plugin):
|
||||
while number != 0:
|
||||
digit = number % base
|
||||
if digit >= 10:
|
||||
digit = string.uppercase[digit - 10]
|
||||
digit = string.ascii_uppercase[digit - 10]
|
||||
else:
|
||||
digit = str(digit)
|
||||
digits.append(digit)
|
||||
@ -148,6 +148,8 @@ class Math(callbacks.Plugin):
|
||||
else:
|
||||
return '%s%s' % (realS, imagS)
|
||||
|
||||
_calc_match_forbidden_chars = re.compile('[_[\]]')
|
||||
_calc_remover = utils.str.MultipleRemover('_[] \t')
|
||||
###
|
||||
# So this is how the 'calc' command works:
|
||||
# First, we make a nice little safe environment for evaluation; basically,
|
||||
@ -167,12 +169,12 @@ class Math(callbacks.Plugin):
|
||||
crash to the bot with something like '10**10**10**10'. One consequence
|
||||
is that large values such as '10**24' might not be exact.
|
||||
"""
|
||||
if text != text.translate(utils.str.chars, '_[]'):
|
||||
if self._calc_match_forbidden_chars.match(text):
|
||||
irc.error(_('There\'s really no reason why you should have '
|
||||
'underscores or brackets in your mathematical '
|
||||
'expression. Please remove them.'))
|
||||
return
|
||||
#text = text.translate(utils.str.chars, '_[] \t')
|
||||
text = self._calc_remover(text)
|
||||
if 'lambda' in text:
|
||||
irc.error(_('You can\'t use lambda in this command.'))
|
||||
return
|
||||
@ -221,14 +223,14 @@ class Math(callbacks.Plugin):
|
||||
math, and can thus cause the bot to suck up CPU. Hence it requires
|
||||
the 'trusted' capability to use.
|
||||
"""
|
||||
if text != text.translate(utils.str.chars, '_[]'):
|
||||
if self._calc_match_forbidden_chars.match(text):
|
||||
irc.error(_('There\'s really no reason why you should have '
|
||||
'underscores or brackets in your mathematical '
|
||||
'expression. Please remove them.'))
|
||||
return
|
||||
# This removes spaces, too, but we'll leave the removal of _[] for
|
||||
# safety's sake.
|
||||
text = text.translate(utils.str.chars, '_[] \t')
|
||||
text = self._calc_remover(text)
|
||||
if 'lambda' in text:
|
||||
irc.error(_('You can\'t use lambda in this command.'))
|
||||
return
|
||||
|
@ -34,6 +34,7 @@ import imp
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
from itertools import ifilter
|
||||
|
||||
import supybot
|
||||
|
||||
@ -47,7 +48,6 @@ import supybot.ircutils as ircutils
|
||||
import supybot.callbacks as callbacks
|
||||
from supybot import commands
|
||||
|
||||
from supybot.utils.iter import ifilter
|
||||
from supybot.i18n import PluginInternationalization, internationalizeDocstring
|
||||
_ = PluginInternationalization('Misc')
|
||||
|
||||
@ -296,7 +296,8 @@ class Misc(callbacks.Plugin):
|
||||
'commits/%s'
|
||||
versions = {}
|
||||
for branch in ('master', 'testing'):
|
||||
data = json.load(utils.web.getUrlFd(newestUrl % branch))
|
||||
data = json.loads(utils.web.getUrl(newestUrl % branch)
|
||||
.decode('utf8'))
|
||||
version = data['commit']['committer']['date']
|
||||
# Strip the last ':':
|
||||
version = ''.join(version.rsplit(':', 1))
|
||||
|
@ -29,7 +29,6 @@
|
||||
|
||||
import os
|
||||
import time
|
||||
import shlex
|
||||
import string
|
||||
|
||||
from cStringIO import StringIO
|
||||
@ -37,6 +36,7 @@ from cStringIO import StringIO
|
||||
import supybot.conf as conf
|
||||
import supybot.ircdb as ircdb
|
||||
import supybot.utils as utils
|
||||
import supybot.shlex as shlex
|
||||
from supybot.commands import *
|
||||
import supybot.plugins as plugins
|
||||
import supybot.ircutils as ircutils
|
||||
@ -44,9 +44,8 @@ import supybot.callbacks as callbacks
|
||||
from supybot.i18n import PluginInternationalization, internationalizeDocstring
|
||||
_ = PluginInternationalization('MoobotFactoids')
|
||||
|
||||
allchars = string.maketrans('', '')
|
||||
class OptionList(object):
|
||||
validChars = allchars.translate(allchars, '|()')
|
||||
separators = '|()'
|
||||
def _insideParens(self, lexer):
|
||||
ret = []
|
||||
while True:
|
||||
@ -73,7 +72,7 @@ class OptionList(object):
|
||||
lexer.commenters = ''
|
||||
lexer.quotes = ''
|
||||
lexer.whitespace = ''
|
||||
lexer.wordchars = self.validChars
|
||||
lexer.separators += self.separators
|
||||
ret = []
|
||||
while True:
|
||||
token = lexer.get_token()
|
||||
|
@ -37,7 +37,7 @@ try:
|
||||
except ImportError:
|
||||
sqlite = None
|
||||
|
||||
MF = plugin.loadPluginModule('MoobotFactoids')
|
||||
import plugin
|
||||
MFconf = conf.supybot.plugins.MoobotFactoids
|
||||
|
||||
class OptionListTestCase(SupyTestCase):
|
||||
@ -47,7 +47,7 @@ class OptionListTestCase(SupyTestCase):
|
||||
original = L[:]
|
||||
while max and L:
|
||||
max -= 1
|
||||
option = MF.plugin.pickOptions(s)
|
||||
option = plugin.pickOptions(s)
|
||||
self.failUnless(option in original,
|
||||
'Option %s not in %s' % (option, original))
|
||||
if option in L:
|
||||
|
@ -45,12 +45,15 @@
|
||||
# #
|
||||
###
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import supybot
|
||||
|
||||
import re
|
||||
import math
|
||||
import string
|
||||
|
||||
import supybot.utils as utils
|
||||
import supybot.callbacks as callbacks
|
||||
from supybot.commands import wrap, additional
|
||||
from supybot.i18n import PluginInternationalization, internationalizeDocstring
|
||||
@ -116,11 +119,12 @@ class Nickometer(callbacks.Plugin):
|
||||
('\\[rkx]0', 1000),
|
||||
('\\0[rkx]', 1000)]
|
||||
|
||||
letterNumberTranslator = string.maketrans('023457+8', 'ozeasttb')
|
||||
letterNumberTranslator = utils.str.MultipleReplacer(dict(zip(
|
||||
'023457+8', 'ozeasttb')))
|
||||
for special in specialCost:
|
||||
tempNick = nick
|
||||
if special[0][0] != '\\':
|
||||
tempNick = tempNick.translate(letterNumberTranslator)
|
||||
tempNick = letterNumberTranslator(tempNick)
|
||||
|
||||
if tempNick and re.search(special[0], tempNick, re.IGNORECASE):
|
||||
score += self.punish(special[1], 'matched special case /%s/' %
|
||||
@ -218,7 +222,7 @@ class Nickometer(callbacks.Plugin):
|
||||
|
||||
# Use an appropriate function to map [0, +inf) to [0, 100)
|
||||
percentage = 100 * (1 + math.tanh((score - 400.0) / 400.0)) * \
|
||||
(1 - 1 / (1 + score / 5.0)) / 2
|
||||
(1 - 1 / (1 + score / 5.0)) // 2
|
||||
|
||||
# if it's above 99.9%, show as many digits as is interesting
|
||||
score_string=re.sub('(99\\.9*\\d|\\.\\d).*','\\1',`percentage`)
|
||||
|
@ -459,7 +459,7 @@ class Owner(callbacks.Plugin):
|
||||
x = module.reload()
|
||||
try:
|
||||
module = plugin.loadPluginModule(name)
|
||||
if hasattr(module, 'reload'):
|
||||
if hasattr(module, 'reload') and 'x' in locals():
|
||||
module.reload(x)
|
||||
for callback in callbacks:
|
||||
callback.die()
|
||||
|
@ -29,6 +29,8 @@
|
||||
###
|
||||
|
||||
import os
|
||||
import io
|
||||
import sys
|
||||
import json
|
||||
import shutil
|
||||
import urllib
|
||||
@ -36,6 +38,7 @@ import urllib2
|
||||
import tarfile
|
||||
from cStringIO import StringIO
|
||||
|
||||
BytesIO = StringIO if sys.version_info[0] < 3 else io.BytesIO
|
||||
|
||||
import supybot.log as log
|
||||
import supybot.conf as conf
|
||||
@ -79,7 +82,7 @@ class GithubRepository(GitRepository):
|
||||
args = dict([(x,y) for x,y in args.items() if y is not None])
|
||||
url = '%s/%s/%s?%s' % (self._apiUrl, type_, uri_end,
|
||||
urllib.urlencode(args))
|
||||
return json.load(utils.web.getUrlFd(url))
|
||||
return json.loads(utils.web.getUrl(url).decode('utf8'))
|
||||
|
||||
def getPluginList(self):
|
||||
plugins = self._query(
|
||||
@ -101,14 +104,17 @@ class GithubRepository(GitRepository):
|
||||
|
||||
def _download(self, plugin):
|
||||
try:
|
||||
fileObject = urllib2.urlopen(self._downloadUrl)
|
||||
fileObject2 = StringIO()
|
||||
fileObject2.write(fileObject.read())
|
||||
fileObject.close()
|
||||
fileObject2.seek(0)
|
||||
return tarfile.open(fileobj=fileObject2, mode='r:gz')
|
||||
finally:
|
||||
del fileObject
|
||||
response = utils.web.getUrlFd(self._downloadUrl)
|
||||
if sys.version_info[0] < 3:
|
||||
assert response.getcode() == 200, response.getcode()
|
||||
else:
|
||||
assert response.status == 200, response.status
|
||||
fileObject = BytesIO()
|
||||
fileObject.write(response.read())
|
||||
finally: # urllib does not handle 'with' statements :(
|
||||
response.close()
|
||||
fileObject.seek(0)
|
||||
return tarfile.open(fileobj=fileObject, mode='r:gz')
|
||||
def install(self, plugin):
|
||||
archive = self._download(plugin)
|
||||
prefix = archive.getnames()[0]
|
||||
@ -132,7 +138,7 @@ class GithubRepository(GitRepository):
|
||||
if extractedFile is None:
|
||||
os.mkdir(newFileName)
|
||||
else:
|
||||
open(newFileName, 'a').write(extractedFile.read())
|
||||
open(newFileName, 'ab').write(extractedFile.read())
|
||||
finally:
|
||||
archive.close()
|
||||
del archive
|
||||
|
@ -28,10 +28,9 @@
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
###
|
||||
|
||||
import new
|
||||
import time
|
||||
import types
|
||||
import socket
|
||||
import sgmllib
|
||||
import threading
|
||||
|
||||
import supybot.conf as conf
|
||||
@ -45,7 +44,8 @@ from supybot.i18n import PluginInternationalization, internationalizeDocstring
|
||||
_ = PluginInternationalization('RSS')
|
||||
|
||||
try:
|
||||
feedparser = utils.python.universalImport('feedparser', 'local.feedparser')
|
||||
feedparser = utils.python.universalImport('feedparser.feedparser',
|
||||
'local.feedparser.feedparser', 'feedparser', 'local.feedparser')
|
||||
except ImportError:
|
||||
raise callbacks.Error, \
|
||||
'You the feedparser module installed to use this plugin. ' \
|
||||
@ -261,7 +261,7 @@ class RSS(callbacks.Plugin):
|
||||
results = feedparser.parse(url)
|
||||
if 'bozo_exception' in results:
|
||||
raise results['bozo_exception']
|
||||
except sgmllib.SGMLParseError:
|
||||
except feedparser.sgmllib.SGMLParseError:
|
||||
self.log.exception('Uncaught exception from feedparser:')
|
||||
raise callbacks.Error, 'Invalid (unparsable) RSS feed.'
|
||||
except socket.timeout:
|
||||
@ -349,7 +349,7 @@ class RSS(callbacks.Plugin):
|
||||
args.insert(0, url)
|
||||
self.rss(irc, msg, args)
|
||||
f = utils.python.changeFunctionName(f, name, docstring)
|
||||
f = new.instancemethod(f, self, RSS)
|
||||
f = types.MethodType(f, self)
|
||||
self.feedNames[name] = (url, f)
|
||||
self._registerFeed(name, url)
|
||||
|
||||
|
@ -30,7 +30,6 @@
|
||||
|
||||
import re
|
||||
import json
|
||||
import httplib2
|
||||
|
||||
import supybot.conf as conf
|
||||
import supybot.utils as utils
|
||||
@ -158,6 +157,7 @@ class ShrinkUrl(callbacks.PluginRegexp):
|
||||
return self.db.get('ln', url)
|
||||
except KeyError:
|
||||
text = utils.web.getUrl('http://ln-s.net/home/api.jsp?url=' + url)
|
||||
text = text.decode()
|
||||
(code, text) = text.split(None, 1)
|
||||
text = text.strip()
|
||||
if code == '200':
|
||||
@ -186,6 +186,7 @@ class ShrinkUrl(callbacks.PluginRegexp):
|
||||
return self.db.get('tiny', url)
|
||||
except KeyError:
|
||||
text = utils.web.getUrl('http://tinyurl.com/api-create.php?url=' + url)
|
||||
text = text.decode()
|
||||
if text.startswith('Error'):
|
||||
raise ShrinkError, text[5:]
|
||||
self.db.set('tiny', url, text)
|
||||
@ -213,7 +214,7 @@ class ShrinkUrl(callbacks.PluginRegexp):
|
||||
return self.db.get('xrl', quotedurl)
|
||||
except KeyError:
|
||||
data = utils.web.urlencode({'long_url': url})
|
||||
text = utils.web.getUrl(self._xrlApi, data=data)
|
||||
text = utils.web.getUrl(self._xrlApi, data=data).decode()
|
||||
if text.startswith('ERROR:'):
|
||||
raise ShrinkError, text[6:]
|
||||
self.db.set('xrl', quotedurl, text)
|
||||
@ -240,11 +241,10 @@ class ShrinkUrl(callbacks.PluginRegexp):
|
||||
try:
|
||||
return self.db.get('goo', url)
|
||||
except KeyError:
|
||||
text = httplib2.Http().request(self._gooApi,
|
||||
'POST',
|
||||
text = utils.web.getUrl(self._gooApi,
|
||||
headers={'content-type':'application/json'},
|
||||
body=json.dumps({'longUrl': url}))[1]
|
||||
googl = json.loads(text)['id']
|
||||
data=json.dumps({'longUrl': url}).encode())
|
||||
googl = json.loads(text.decode())['id']
|
||||
if len(googl) > 0 :
|
||||
self.db.set('goo', url, googl)
|
||||
return googl
|
||||
@ -270,7 +270,7 @@ class ShrinkUrl(callbacks.PluginRegexp):
|
||||
try:
|
||||
return self.db.get('x0', url)
|
||||
except KeyError:
|
||||
text = utils.web.getUrl(self._x0Api % url)
|
||||
text = utils.web.getUrl(self._x0Api % url).decode()
|
||||
if text.startswith('ERROR:'):
|
||||
raise ShrinkError, text[6:]
|
||||
self.db.set('x0', url, text)
|
||||
|
@ -153,7 +153,7 @@ class Status(callbacks.Plugin):
|
||||
cmd = 'ps -o rss -p %s' % pid
|
||||
try:
|
||||
inst = subprocess.Popen(cmd.split(), close_fds=True,
|
||||
stdin=file(os.devnull),
|
||||
stdin=open(os.devnull),
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
|
@ -28,7 +28,10 @@
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
###
|
||||
|
||||
import sys
|
||||
import types
|
||||
import codecs
|
||||
import base64
|
||||
import binascii
|
||||
|
||||
import supybot.utils as utils
|
||||
@ -72,10 +75,31 @@ class String(callbacks.Plugin):
|
||||
available in the documentation of the Python codecs module:
|
||||
<http://docs.python.org/library/codecs.html#standard-encodings>.
|
||||
"""
|
||||
# Binary codecs are prefixed with _codec in Python 3
|
||||
if encoding in 'base64 bz2 hex quopri uu zlib':
|
||||
encoding += '_codec'
|
||||
if encoding.endswith('_codec'):
|
||||
text = text.encode()
|
||||
|
||||
# Do the encoding
|
||||
try:
|
||||
irc.reply(text.encode(encoding).rstrip('\n'))
|
||||
encoder = codecs.getencoder(encoding)
|
||||
except LookupError:
|
||||
irc.errorInvalid(_('encoding'), encoding)
|
||||
text = encoder(text)[0]
|
||||
|
||||
# If this is a binary codec, re-encode it with base64
|
||||
if encoding.endswith('_codec') and encoding != 'base64_codec':
|
||||
text = codecs.getencoder('base64_codec')(text)[0].decode()
|
||||
|
||||
# Change result into a string
|
||||
if sys.version_info[0] < 3 and isinstance(text, unicode):
|
||||
text = text.encode('utf-8')
|
||||
elif sys.version_info[0] >= 3 and isinstance(text, bytes):
|
||||
text = text.decode()
|
||||
|
||||
# Reply
|
||||
irc.reply(text.rstrip('\n'))
|
||||
encode = wrap(encode, ['something', 'text'])
|
||||
|
||||
@internationalizeDocstring
|
||||
@ -86,19 +110,37 @@ class String(callbacks.Plugin):
|
||||
available in the documentation of the Python codecs module:
|
||||
<http://docs.python.org/library/codecs.html#standard-encodings>.
|
||||
"""
|
||||
# Binary codecs are prefixed with _codec in Python 3
|
||||
if encoding in 'base64 bz2 hex quopri uu zlib':
|
||||
encoding += '_codec'
|
||||
|
||||
# If this is a binary codec, pre-decode it with base64
|
||||
if encoding.endswith('_codec') and encoding != 'base64_codec':
|
||||
text = codecs.getdecoder('base64_codec')(text.encode())[0]
|
||||
|
||||
# Do the decoding
|
||||
try:
|
||||
s = text.decode(encoding)
|
||||
# Not all encodings decode to a unicode object. Only encode those
|
||||
# that do.
|
||||
if isinstance(s, unicode):
|
||||
s = s.encode('utf-8')
|
||||
irc.reply(s)
|
||||
decoder = codecs.getdecoder(encoding)
|
||||
except LookupError:
|
||||
irc.errorInvalid(_('encoding'), encoding)
|
||||
if sys.version_info[0] >= 3 and not isinstance(text, bytes):
|
||||
text = text.encode()
|
||||
try:
|
||||
text = decoder(text)[0]
|
||||
except binascii.Error:
|
||||
irc.errorInvalid(_('base64 string'),
|
||||
s=_('Base64 strings must be a multiple of 4 in '
|
||||
'length, padded with \'=\' if necessary.'))
|
||||
return
|
||||
|
||||
# Change result into a string
|
||||
if sys.version_info[0] < 3 and isinstance(text, unicode):
|
||||
text = text.encode('utf-8')
|
||||
elif sys.version_info[0] >= 3 and isinstance(text, bytes):
|
||||
text = text.decode()
|
||||
|
||||
# Reply
|
||||
irc.reply(text)
|
||||
decode = wrap(decode, ['something', 'text'])
|
||||
|
||||
@internationalizeDocstring
|
||||
|
@ -48,6 +48,7 @@ import supybot.callbacks as callbacks
|
||||
from supybot.i18n import PluginInternationalization, internationalizeDocstring
|
||||
_ = PluginInternationalization('Unix')
|
||||
|
||||
_progstats_endline_remover = utils.str.MultipleRemover('\r\n')
|
||||
def progstats():
|
||||
pw = pwd.getpwuid(os.getuid())
|
||||
response = format('Process ID %i running as user %q and as group %q '
|
||||
@ -55,7 +56,7 @@ def progstats():
|
||||
'Running on Python %s.',
|
||||
os.getpid(), pw[0], pw[3],
|
||||
os.getcwd(), ' '.join(sys.argv),
|
||||
sys.version.translate(utils.str.chars, '\r\n'))
|
||||
_progstats_endline_remover(sys.version))
|
||||
return response
|
||||
|
||||
class TimeoutError(IOError):
|
||||
@ -108,7 +109,7 @@ class Unix(callbacks.Plugin):
|
||||
irc.reply(format('%i', os.getpid()), private=True)
|
||||
pid = wrap(pid, [('checkCapability', 'owner')])
|
||||
|
||||
_cryptre = re.compile(r'[./0-9A-Za-z]')
|
||||
_cryptre = re.compile(b'[./0-9A-Za-z]')
|
||||
@internationalizeDocstring
|
||||
def crypt(self, irc, msg, args, password, salt):
|
||||
"""<password> [<salt>]
|
||||
@ -119,12 +120,12 @@ class Unix(callbacks.Plugin):
|
||||
based crypt rather than the standard DES based crypt.
|
||||
"""
|
||||
def makeSalt():
|
||||
s = '\x00'
|
||||
while self._cryptre.sub('', s) != '':
|
||||
s = b'\x00'
|
||||
while self._cryptre.sub(b'', s) != b'':
|
||||
s = struct.pack('<h', random.randrange(-(2**15), 2**15))
|
||||
return s
|
||||
if not salt:
|
||||
salt = makeSalt()
|
||||
salt = makeSalt().decode()
|
||||
irc.reply(crypt.crypt(password, salt))
|
||||
crypt = wrap(crypt, ['something', additional('something')])
|
||||
|
||||
@ -156,15 +157,15 @@ class Unix(callbacks.Plugin):
|
||||
irc.error(e, Raise=True)
|
||||
ret = inst.poll()
|
||||
if ret is not None:
|
||||
s = inst.stderr.readline()
|
||||
s = inst.stderr.readline().decode('utf8')
|
||||
if not s:
|
||||
s = inst.stdout.readline()
|
||||
s = inst.stdout.readline().decode('utf8')
|
||||
s = s.rstrip('\r\n')
|
||||
s = s.lstrip('Error: ')
|
||||
irc.error(s, Raise=True)
|
||||
(out, err) = inst.communicate(word)
|
||||
(out, err) = inst.communicate(word.encode())
|
||||
inst.wait()
|
||||
lines = filter(None, out.splitlines())
|
||||
lines = [x.decode('utf8') for x in out.splitlines() if x]
|
||||
lines.pop(0) # Banner
|
||||
if not lines:
|
||||
irc.error(_('No results found.'), Raise=True)
|
||||
@ -212,7 +213,7 @@ class Unix(callbacks.Plugin):
|
||||
inst = subprocess.Popen(args, close_fds=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=file(os.devnull))
|
||||
stdin=open(os.devnull))
|
||||
except OSError, e:
|
||||
irc.error(_('It seems the configured fortune command was '
|
||||
'not available.'), Raise=True)
|
||||
@ -242,15 +243,15 @@ class Unix(callbacks.Plugin):
|
||||
try:
|
||||
inst = subprocess.Popen([wtfCmd, something], close_fds=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=file(os.devnull),
|
||||
stdin=file(os.devnull))
|
||||
stderr=open(os.devnull),
|
||||
stdin=open(os.devnull))
|
||||
except OSError:
|
||||
irc.error(_('It seems the configured wtf command was not '
|
||||
'available.'), Raise=True)
|
||||
(out, _) = inst.communicate()
|
||||
inst.wait()
|
||||
if out:
|
||||
response = out.splitlines()[0].strip()
|
||||
response = out.decode('utf8').splitlines()[0].strip()
|
||||
response = utils.str.normalizeWhitespace(response)
|
||||
irc.reply(response)
|
||||
else:
|
||||
@ -292,15 +293,15 @@ class Unix(callbacks.Plugin):
|
||||
try:
|
||||
inst = subprocess.Popen(args, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=file(os.devnull))
|
||||
stdin=open(os.devnull))
|
||||
except OSError, e:
|
||||
irc.error('It seems the configured ping command was '
|
||||
'not available (%s).' % e, Raise=True)
|
||||
result = inst.communicate()
|
||||
if result[1]: # stderr
|
||||
irc.error(' '.join(result[1].split()))
|
||||
irc.error(' '.join(result[1].decode('utf8').split()))
|
||||
else:
|
||||
response = result[0].split("\n");
|
||||
response = result[0].decode('utf8').split("\n");
|
||||
if response[1]:
|
||||
irc.reply(' '.join(response[1].split()[3:5]).split(':')[0]
|
||||
+ ': ' + ' '.join(response[-3:]))
|
||||
@ -325,14 +326,14 @@ class Unix(callbacks.Plugin):
|
||||
inst = subprocess.Popen(args, close_fds=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=file(os.devnull))
|
||||
stdin=open(os.devnull))
|
||||
except OSError, e:
|
||||
irc.error('It seems the configured uptime command was '
|
||||
'not available.', Raise=True)
|
||||
(out, err) = inst.communicate()
|
||||
inst.wait()
|
||||
lines = out.splitlines()
|
||||
lines = map(str.rstrip, lines)
|
||||
lines = [x.decode('utf8').rstrip() for x in lines]
|
||||
lines = filter(None, lines)
|
||||
irc.replies(lines, joiner=' ')
|
||||
else:
|
||||
@ -353,14 +354,14 @@ class Unix(callbacks.Plugin):
|
||||
inst = subprocess.Popen(args, close_fds=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=file(os.devnull))
|
||||
stdin=open(os.devnull))
|
||||
except OSError, e:
|
||||
irc.error('It seems the configured uptime command was '
|
||||
'not available.', Raise=True)
|
||||
(out, err) = inst.communicate()
|
||||
inst.wait()
|
||||
lines = out.splitlines()
|
||||
lines = map(str.rstrip, lines)
|
||||
lines = [x.decode('utf8').rstrip() for x in lines]
|
||||
lines = filter(None, lines)
|
||||
irc.replies(lines, joiner=' ')
|
||||
else:
|
||||
@ -382,15 +383,15 @@ class Unix(callbacks.Plugin):
|
||||
try:
|
||||
inst = subprocess.Popen(args, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=file(os.devnull))
|
||||
stdin=open(os.devnull))
|
||||
except OSError, e:
|
||||
irc.error('It seems the requested command was '
|
||||
'not available (%s).' % e, Raise=True)
|
||||
result = inst.communicate()
|
||||
if result[1]: # stderr
|
||||
irc.error(' '.join(result[1].split()))
|
||||
irc.error(' '.join(result[1].decode('utf8').split()))
|
||||
if result[0]: # stdout
|
||||
response = result[0].split("\n");
|
||||
response = result[0].decode('utf8').split("\n");
|
||||
response = [l for l in response if l]
|
||||
irc.replies(response)
|
||||
call = thread(wrap(call, ["owner", "text"]))
|
||||
|
@ -91,7 +91,8 @@ class Web(callbacks.PluginRegexp):
|
||||
return
|
||||
try:
|
||||
size = conf.supybot.protocols.http.peekSize()
|
||||
text = utils.web.getUrl(url, size=size)
|
||||
text = utils.web.getUrl(url, size=size) \
|
||||
.decode('utf8', errors='replace')
|
||||
except utils.web.Error, e:
|
||||
self.log.info('Couldn\'t snarf title of %u: %s.', url, e)
|
||||
return
|
||||
@ -134,7 +135,8 @@ class Web(callbacks.PluginRegexp):
|
||||
course.
|
||||
"""
|
||||
size = conf.supybot.protocols.http.peekSize()
|
||||
s = utils.web.getUrl(url, size=size)
|
||||
s = utils.web.getUrl(url, size=size) \
|
||||
.decode('utf8', errors='replace')
|
||||
m = self._doctypeRe.search(s)
|
||||
if m:
|
||||
s = utils.str.normalizeWhitespace(m.group(0))
|
||||
@ -175,7 +177,8 @@ class Web(callbacks.PluginRegexp):
|
||||
Returns the HTML <title>...</title> of a URL.
|
||||
"""
|
||||
size = conf.supybot.protocols.http.peekSize()
|
||||
text = utils.web.getUrl(url, size=size)
|
||||
text = utils.web.getUrl(url, size=size) \
|
||||
.decode('utf8', errors='replace')
|
||||
parser = Title()
|
||||
try:
|
||||
parser.feed(text)
|
||||
@ -201,7 +204,8 @@ class Web(callbacks.PluginRegexp):
|
||||
webserver is running on the host given.
|
||||
"""
|
||||
url = 'http://uptime.netcraft.com/up/graph/?host=' + hostname
|
||||
html = utils.web.getUrl(url)
|
||||
html = utils.web.getUrl(url) \
|
||||
.decode('utf8', errors='replace')
|
||||
m = self._netcraftre.search(html)
|
||||
if m:
|
||||
html = m.group(1)
|
||||
@ -246,7 +250,8 @@ class Web(callbacks.PluginRegexp):
|
||||
irc.error(_('This command is disabled '
|
||||
'(supybot.plugins.Web.fetch.maximum is set to 0).'),
|
||||
Raise=True)
|
||||
fd = utils.web.getUrlFd(url)
|
||||
fd = utils.web.getUrlFd(url) \
|
||||
.decode('utf8', errors='replace')
|
||||
irc.reply(fd.read(max))
|
||||
fetch = wrap(fetch, ['url'])
|
||||
|
||||
|
@ -38,8 +38,8 @@ import time
|
||||
import random
|
||||
import fnmatch
|
||||
import os.path
|
||||
import UserDict
|
||||
import threading
|
||||
import collections
|
||||
|
||||
import supybot.log as log
|
||||
import supybot.dbi as dbi
|
||||
@ -230,7 +230,7 @@ class DbiChannelDB(object):
|
||||
return _getDbAndDispatcher
|
||||
|
||||
|
||||
class ChannelUserDictionary(UserDict.DictMixin):
|
||||
class ChannelUserDictionary(collections.MutableMapping):
|
||||
IdDict = dict
|
||||
def __init__(self):
|
||||
self.channels = ircutils.IrcDict()
|
||||
@ -246,6 +246,15 @@ class ChannelUserDictionary(UserDict.DictMixin):
|
||||
def __delitem__(self, (channel, id)):
|
||||
del self.channels[channel][id]
|
||||
|
||||
def __iter__(self):
|
||||
for channel, ids in self.channels.items():
|
||||
for id_, value in ids.items():
|
||||
yield (channel, id_)
|
||||
raise StopIteration()
|
||||
|
||||
def __len__(self):
|
||||
return sum([len(x) for x in self.channels])
|
||||
|
||||
def iteritems(self):
|
||||
for (channel, ids) in self.channels.iteritems():
|
||||
for (id, v) in ids.iteritems():
|
||||
@ -267,7 +276,7 @@ class ChannelUserDB(ChannelUserDictionary):
|
||||
ChannelUserDictionary.__init__(self)
|
||||
self.filename = filename
|
||||
try:
|
||||
fd = file(self.filename)
|
||||
fd = open(self.filename)
|
||||
except EnvironmentError, e:
|
||||
log.warning('Couldn\'t open %s: %s.', self.filename, e)
|
||||
return
|
||||
@ -304,7 +313,12 @@ class ChannelUserDB(ChannelUserDictionary):
|
||||
self.__class__.__name__)
|
||||
fd.rollback()
|
||||
return
|
||||
items.sort()
|
||||
try:
|
||||
items.sort()
|
||||
except TypeError:
|
||||
# FIXME: Implement an algorithm that can order dictionnaries
|
||||
# with both strings and integers as keys.
|
||||
pass
|
||||
for ((channel, id), v) in items:
|
||||
L = self.serialize(v)
|
||||
L.insert(0, id)
|
||||
@ -564,7 +578,7 @@ class PeriodicFileDownloader(object):
|
||||
return
|
||||
confDir = conf.supybot.directories.data()
|
||||
newFilename = os.path.join(confDir, utils.file.mktemp())
|
||||
outfd = file(newFilename, 'wb')
|
||||
outfd = open(newFilename, 'wb')
|
||||
start = time.time()
|
||||
s = infd.read(4096)
|
||||
while s:
|
||||
|
4
sandbox/run_2to3.sh
Executable file
4
sandbox/run_2to3.sh
Executable file
@ -0,0 +1,4 @@
|
||||
rm -f src/version.py # Prevent 2to3 from copying it, since py3k/src/version.py was probably written by root.
|
||||
cp locale/ py3k/ -R
|
||||
cp plugins/ py3k/ -R # copy plugins data
|
||||
python 2to3/run.py src/ plugins/ test/ scripts/* setup.py -wWno py3k -f all -f def_iteritems -f def_itervalues -f def_iterkeys -f reload "$@"
|
@ -184,7 +184,7 @@ if __name__ == '__main__':
|
||||
i18n.getLocaleFromRegistryFilename(registryFilename)
|
||||
try:
|
||||
# The registry *MUST* be opened before importing log or conf.
|
||||
registry.open(registryFilename)
|
||||
registry.open_registry(registryFilename)
|
||||
shutil.copy(registryFilename, registryFilename + '.bak')
|
||||
except registry.InvalidRegistryFile, e:
|
||||
s = '%s in %s. Please fix this error and start supybot again.' % \
|
||||
@ -290,7 +290,7 @@ if __name__ == '__main__':
|
||||
pidFile = conf.supybot.pidFile()
|
||||
if pidFile:
|
||||
try:
|
||||
fd = file(pidFile, 'w')
|
||||
fd = open(pidFile, 'w')
|
||||
pid = os.getpid()
|
||||
fd.write('%s%s' % (pid, os.linesep))
|
||||
fd.close()
|
||||
@ -319,10 +319,10 @@ if __name__ == '__main__':
|
||||
'userdata.conf')
|
||||
# Let's open this now since we've got our directories setup.
|
||||
if not os.path.exists(userdataFilename):
|
||||
fd = file(userdataFilename, 'w')
|
||||
fd = open(userdataFilename, 'w')
|
||||
fd.write('\n')
|
||||
fd.close()
|
||||
registry.open(userdataFilename)
|
||||
registry.open_registry(userdataFilename)
|
||||
|
||||
import supybot.irclib as irclib
|
||||
import supybot.ircmsgs as ircmsgs
|
||||
|
@ -33,7 +33,7 @@
|
||||
VERBOSE = False
|
||||
|
||||
def readPid(filename):
|
||||
fd = file(filename)
|
||||
fd = open(filename)
|
||||
try:
|
||||
return int(fd.read().strip())
|
||||
finally:
|
||||
|
@ -266,7 +266,7 @@ def main():
|
||||
os.mkdir(pathname)
|
||||
|
||||
def writeFile(filename, s):
|
||||
fd = file(os.path.join(pathname, filename), 'w')
|
||||
fd = open(os.path.join(pathname, filename), 'w')
|
||||
try:
|
||||
fd.write(s)
|
||||
finally:
|
||||
|
@ -46,7 +46,7 @@ if not os.path.exists('doc-conf'):
|
||||
|
||||
registryFilename = os.path.join('doc-conf', 'doc.conf')
|
||||
try:
|
||||
fd = file(registryFilename, 'w')
|
||||
fd = open(registryFilename, 'w')
|
||||
fd.write("""
|
||||
supybot.directories.data: doc-data
|
||||
supybot.directories.conf: doc-conf
|
||||
@ -62,7 +62,7 @@ except EnvironmentError, e:
|
||||
error('Unable to open %s for writing.' % registryFilename)
|
||||
|
||||
import supybot.registry as registry
|
||||
registry.open(registryFilename)
|
||||
registry.open_registry(registryFilename)
|
||||
|
||||
import supybot.log as log
|
||||
import supybot.conf as conf
|
||||
@ -228,7 +228,7 @@ def genDoc(m, options):
|
||||
path = os.path.join(options.outputDir, '%s.%s' % (Plugin.name,
|
||||
options.format))
|
||||
try:
|
||||
fd = file(path, 'w')
|
||||
fd = open(path, 'w')
|
||||
except EnvironmentError, e:
|
||||
error('Unable to open %s for writing.' % path)
|
||||
f = getattr(Plugin, 'render%s' % options.format.upper(), None)
|
||||
|
@ -43,7 +43,7 @@ if not os.path.exists('test-conf'):
|
||||
os.mkdir('test-conf')
|
||||
|
||||
registryFilename = os.path.join('test-conf', 'test.conf')
|
||||
fd = file(registryFilename, 'w')
|
||||
fd = open(registryFilename, 'w')
|
||||
fd.write("""
|
||||
supybot.directories.data: test-data
|
||||
supybot.directories.conf: test-conf
|
||||
@ -62,7 +62,7 @@ supybot.databases.users.allowUnregistration: True
|
||||
fd.close()
|
||||
|
||||
import supybot.registry as registry
|
||||
registry.open(registryFilename)
|
||||
registry.open_registry(registryFilename)
|
||||
|
||||
import supybot.log as log
|
||||
import supybot.conf as conf
|
||||
@ -159,7 +159,7 @@ if __name__ == '__main__':
|
||||
|
||||
if options.trace:
|
||||
traceFilename = conf.supybot.directories.log.dirize('trace.log')
|
||||
fd = file(traceFilename, 'w')
|
||||
fd = open(traceFilename, 'w')
|
||||
sys.settrace(utils.gen.callTracer(fd))
|
||||
atexit.register(fd.close)
|
||||
atexit.register(lambda : sys.settrace(None))
|
||||
|
@ -35,6 +35,7 @@ This module contains the basic callbacks for handling PRIVMSGs.
|
||||
import supybot
|
||||
|
||||
import re
|
||||
import sys
|
||||
import copy
|
||||
import time
|
||||
import shlex
|
||||
@ -151,14 +152,16 @@ def canonicalName(command):
|
||||
Currently, this makes everything lowercase and removes all dashes and
|
||||
underscores.
|
||||
"""
|
||||
if isinstance(command, unicode):
|
||||
if sys.version_info[0] < 3 and isinstance(command, unicode):
|
||||
command = command.encode('utf-8')
|
||||
elif sys.version_info[0] >= 3 and isinstance(command, bytes):
|
||||
command = command.decode()
|
||||
special = '\t -_'
|
||||
reAppend = ''
|
||||
while command and command[-1] in special:
|
||||
reAppend = command[-1] + reAppend
|
||||
command = command[:-1]
|
||||
return command.translate(utils.str.chars, special).lower() + reAppend
|
||||
return ''.join([x for x in command if x not in special]).lower() + reAppend
|
||||
|
||||
def reply(msg, s, prefixNick=None, private=None,
|
||||
notice=None, to=None, action=None, error=False):
|
||||
@ -262,10 +265,10 @@ class Tokenizer(object):
|
||||
#
|
||||
# These are the characters valid in a token. Everything printable except
|
||||
# double-quote, left-bracket, and right-bracket.
|
||||
validChars = utils.str.chars.translate(utils.str.chars, '\x00\r\n \t')
|
||||
separators = '\x00\r\n \t'
|
||||
def __init__(self, brackets='', pipe=False, quotes='"'):
|
||||
if brackets:
|
||||
self.validChars=self.validChars.translate(utils.str.chars, brackets)
|
||||
self.separators += brackets
|
||||
self.left = brackets[0]
|
||||
self.right = brackets[1]
|
||||
else:
|
||||
@ -273,15 +276,16 @@ class Tokenizer(object):
|
||||
self.right = ''
|
||||
self.pipe = pipe
|
||||
if self.pipe:
|
||||
self.validChars = self.validChars.translate(utils.str.chars, '|')
|
||||
self.separators += '|'
|
||||
self.quotes = quotes
|
||||
self.validChars = self.validChars.translate(utils.str.chars, quotes)
|
||||
self.separators += quotes
|
||||
|
||||
|
||||
def _handleToken(self, token):
|
||||
if token[0] == token[-1] and token[0] in self.quotes:
|
||||
token = token[1:-1]
|
||||
token = token.decode('string_escape')
|
||||
encoding_prefix = 'string' if sys.version_info[0]<3 else 'unicode'
|
||||
token = token.encode().decode(encoding_prefix + '_escape')
|
||||
return token
|
||||
|
||||
def _insideBrackets(self, lexer):
|
||||
@ -307,7 +311,7 @@ class Tokenizer(object):
|
||||
lexer = shlex.shlex(StringIO(s))
|
||||
lexer.commenters = ''
|
||||
lexer.quotes = self.quotes
|
||||
lexer.wordchars = self.validChars
|
||||
lexer.separators = self.separators
|
||||
args = []
|
||||
ends = []
|
||||
while True:
|
||||
|
31
src/cdb.py
31
src/cdb.py
@ -32,6 +32,8 @@ Database module, similar to dbhash. Uses a format similar to (if not entirely
|
||||
the same as) DJB's CDB <http://cr.yp.to/cdb.html>.
|
||||
"""
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import os
|
||||
import sys
|
||||
import struct
|
||||
@ -60,7 +62,7 @@ def dump(map, fd=sys.stdout):
|
||||
for (key, value) in map.iteritems():
|
||||
fd.write('+%s,%s:%s->%s\n' % (len(key), len(value), key, value))
|
||||
|
||||
def open(filename, mode='r', **kwargs):
|
||||
def open_db(filename, mode='r', **kwargs):
|
||||
"""Opens a database; used for compatibility with other database modules."""
|
||||
if mode == 'r':
|
||||
return Reader(filename, **kwargs)
|
||||
@ -114,7 +116,7 @@ def make(dbFilename, readFilename=None):
|
||||
if readFilename is None:
|
||||
readfd = sys.stdin
|
||||
else:
|
||||
readfd = file(readFilename, 'r')
|
||||
readfd = open(readFilename, 'rb')
|
||||
maker = Maker(dbFilename)
|
||||
while 1:
|
||||
(initchar, key, value) = _readKeyValue(readfd)
|
||||
@ -129,7 +131,7 @@ def make(dbFilename, readFilename=None):
|
||||
class Maker(object):
|
||||
"""Class for making CDB databases."""
|
||||
def __init__(self, filename):
|
||||
self.fd = utils.file.AtomicFile(filename)
|
||||
self.fd = utils.file.AtomicFile(filename, 'wb')
|
||||
self.filename = filename
|
||||
self.fd.seek(2048)
|
||||
self.hashPointers = [(0, 0)] * 256
|
||||
@ -144,8 +146,8 @@ class Maker(object):
|
||||
hashPointer = h % 256
|
||||
startPosition = self.fd.tell()
|
||||
self.fd.write(pack2Ints(len(key), len(data)))
|
||||
self.fd.write(key)
|
||||
self.fd.write(data)
|
||||
self.fd.write(key.encode())
|
||||
self.fd.write(data.encode())
|
||||
self.hashes[hashPointer].append((h, startPosition))
|
||||
|
||||
def finish(self):
|
||||
@ -164,7 +166,7 @@ class Maker(object):
|
||||
hashLen = len(hash) * 2
|
||||
a = [(0, 0)] * hashLen
|
||||
for (h, pos) in hash:
|
||||
i = (h / 256) % hashLen
|
||||
i = (h // 256) % hashLen
|
||||
while a[i] != (0, 0):
|
||||
i = (i + 1) % hashLen
|
||||
a[i] = (h, pos)
|
||||
@ -182,7 +184,7 @@ class Reader(utils.IterableMap):
|
||||
"""Class for reading from a CDB database."""
|
||||
def __init__(self, filename):
|
||||
self.filename = filename
|
||||
self.fd = file(filename, 'r')
|
||||
self.fd = open(filename, 'rb')
|
||||
self.loop = 0
|
||||
self.khash = 0
|
||||
self.kpos = 0
|
||||
@ -208,7 +210,8 @@ class Reader(utils.IterableMap):
|
||||
while self.hslots < self.loop:
|
||||
(klen, dlen) = unpack2Ints(self._read(8, self.hslots))
|
||||
dpos = self.hslots + 8 + klen
|
||||
ret = (self._read(klen, self.hslots+8), self._read(dlen, dpos))
|
||||
ret = (self._read(klen, self.hslots+8).decode(),
|
||||
self._read(dlen, dpos).decode())
|
||||
self.hslots = dpos + dlen
|
||||
yield ret
|
||||
self.loop = 0
|
||||
@ -221,7 +224,7 @@ class Reader(utils.IterableMap):
|
||||
(self.khash * 8) & 2047))
|
||||
if not self.hslots:
|
||||
return False
|
||||
self.kpos = self.hpos + (((self.khash / 256) % self.hslots) * 8)
|
||||
self.kpos = self.hpos + (((self.khash // 256) % self.hslots) * 8)
|
||||
while self.loop < self.hslots:
|
||||
(h, p) = unpack2Ints(self._read(8, self.kpos))
|
||||
if p == 0:
|
||||
@ -243,7 +246,7 @@ class Reader(utils.IterableMap):
|
||||
return self._findnext(key)
|
||||
|
||||
def _getCurrentData(self):
|
||||
return self._read(self.dlen, self.dpos)
|
||||
return self._read(self.dlen, self.dpos).decode()
|
||||
|
||||
def find(self, key, loop=0):
|
||||
if self._find(key, loop=loop):
|
||||
@ -269,7 +272,7 @@ class Reader(utils.IterableMap):
|
||||
def __len__(self):
|
||||
(start,) = struct.unpack('<i', self._read(4, 0))
|
||||
self.fd.seek(0, 2)
|
||||
return ((self.fd.tell() - start) / 16)
|
||||
return ((self.fd.tell() - start) // 16)
|
||||
|
||||
has_key = _find
|
||||
__contains__ = has_key
|
||||
@ -292,7 +295,7 @@ class ReaderWriter(utils.IterableMap):
|
||||
|
||||
def _openFiles(self):
|
||||
self.cdb = Reader(self.filename)
|
||||
self.journal = file(self.journalName, 'w')
|
||||
self.journal = open(self.journalName, 'w')
|
||||
|
||||
def _closeFiles(self):
|
||||
self.cdb.close()
|
||||
@ -312,7 +315,7 @@ class ReaderWriter(utils.IterableMap):
|
||||
removals = set()
|
||||
adds = {}
|
||||
try:
|
||||
fd = file(self.journalName, 'r')
|
||||
fd = open(self.journalName, 'r')
|
||||
while 1:
|
||||
(initchar, key, value) = _readKeyValue(fd)
|
||||
if initchar is None:
|
||||
@ -457,7 +460,7 @@ class Shelf(ReaderWriter):
|
||||
if __name__ == '__main__':
|
||||
if sys.argv[0] == 'cdbdump':
|
||||
if len(sys.argv) == 2:
|
||||
fd = file(sys.argv[1], 'r')
|
||||
fd = open(sys.argv[1], 'rb')
|
||||
else:
|
||||
fd = sys.stdin
|
||||
db = Reader(fd)
|
||||
|
18
src/conf.py
18
src/conf.py
@ -91,7 +91,7 @@ def registerChannelValue(group, name, value):
|
||||
value.channelValue = True
|
||||
g = group.register(name, value)
|
||||
gname = g._name.lower()
|
||||
for name in registry._cache.iterkeys():
|
||||
for name in registry._cache.keys():
|
||||
if name.lower().startswith(gname) and len(gname) < len(name):
|
||||
name = name[len(gname)+1:] # +1 for .
|
||||
parts = registry.split(name)
|
||||
@ -187,7 +187,7 @@ class ValidChannel(registry.String):
|
||||
def error(self):
|
||||
try:
|
||||
super(ValidChannel, self).error()
|
||||
except registry.InvalidRegistryValue, e:
|
||||
except registry.InvalidRegistryValue as e:
|
||||
e.channel = self.channel
|
||||
raise e
|
||||
|
||||
@ -245,7 +245,7 @@ class Servers(registry.SpaceSeparatedListOfStrings):
|
||||
|
||||
def __call__(self):
|
||||
L = registry.SpaceSeparatedListOfStrings.__call__(self)
|
||||
return map(self.convert, L)
|
||||
return list(map(self.convert, L))
|
||||
|
||||
def __str__(self):
|
||||
return ' '.join(registry.SpaceSeparatedListOfStrings.__call__(self))
|
||||
@ -259,7 +259,7 @@ class SpaceSeparatedSetOfChannels(registry.SpaceSeparatedListOf):
|
||||
List = ircutils.IrcSet
|
||||
Value = ValidChannel
|
||||
def join(self, channel):
|
||||
import ircmsgs # Don't put this globally! It's recursive.
|
||||
from . import ircmsgs # Don't put this globally! It's recursive.
|
||||
key = self.key.get(channel)()
|
||||
if key:
|
||||
return ircmsgs.join(channel, key)
|
||||
@ -304,7 +304,7 @@ def registerNetwork(name, password='', ssl=False, sasl_username='',
|
||||
return network
|
||||
|
||||
# Let's fill our networks.
|
||||
for (name, s) in registry._cache.iteritems():
|
||||
for (name, s) in registry._cache.items():
|
||||
if name.startswith('supybot.networks.'):
|
||||
parts = name.split('.')
|
||||
name = parts[2]
|
||||
@ -460,7 +460,7 @@ registerChannelValue(supybot.reply, 'showSimpleSyntax',
|
||||
class ValidPrefixChars(registry.String):
|
||||
"""Value must contain only ~!@#$%^&*()_-+=[{}]\\|'\";:,<.>/?"""
|
||||
def setValue(self, v):
|
||||
if v.translate(utils.str.chars, '`~!@#$%^&*()_-+=[{}]\\|\'";:,<.>/?'):
|
||||
if any([x not in '`~!@#$%^&*()_-+=[{}]\\|\'";:,<.>/?' for x in v]):
|
||||
self.error()
|
||||
registry.String.setValue(self, v)
|
||||
|
||||
@ -905,7 +905,7 @@ class CDB(registry.Boolean):
|
||||
import supybot.cdb as cdb
|
||||
basename = os.path.basename(filename)
|
||||
journalName = supybot.directories.data.tmp.dirize(basename+'.journal')
|
||||
return cdb.open(filename, 'c',
|
||||
return cdb.open_db(filename, 'c',
|
||||
journalName=journalName,
|
||||
maxmods=self.maximumModifications())
|
||||
|
||||
@ -948,7 +948,7 @@ class Banmask(registry.SpaceSeparatedSetOfStrings):
|
||||
self.__parent = super(Banmask, self)
|
||||
self.__parent.__init__(*args, **kwargs)
|
||||
self.__doc__ = format('Valid values include %L.',
|
||||
map(repr, self.validStrings))
|
||||
list(map(repr, self.validStrings)))
|
||||
|
||||
def help(self):
|
||||
strings = [s for s in self.validStrings if s]
|
||||
@ -964,7 +964,7 @@ class Banmask(registry.SpaceSeparatedSetOfStrings):
|
||||
return self.validStrings[i]
|
||||
|
||||
def setValue(self, v):
|
||||
v = map(self.normalize, v)
|
||||
v = list(map(self.normalize, v))
|
||||
for s in v:
|
||||
if s not in self.validStrings:
|
||||
self.error()
|
||||
|
28
src/dbi.py
28
src/dbi.py
@ -97,14 +97,14 @@ class DirMapping(MappingInterface):
|
||||
self._setMax(1)
|
||||
|
||||
def _setMax(self, id):
|
||||
fd = file(os.path.join(self.dirname, 'max'), 'w')
|
||||
fd = open(os.path.join(self.dirname, 'max'), 'w')
|
||||
try:
|
||||
fd.write(str(id))
|
||||
finally:
|
||||
fd.close()
|
||||
|
||||
def _getMax(self):
|
||||
fd = file(os.path.join(self.dirname, 'max'))
|
||||
fd = open(os.path.join(self.dirname, 'max'))
|
||||
try:
|
||||
i = int(fd.read())
|
||||
return i
|
||||
@ -116,7 +116,7 @@ class DirMapping(MappingInterface):
|
||||
|
||||
def get(self, id):
|
||||
try:
|
||||
fd = file(self._makeFilename(id))
|
||||
fd = open(self._makeFilename(id))
|
||||
return fd.read()
|
||||
except EnvironmentError, e:
|
||||
exn = NoRecordError(id)
|
||||
@ -124,13 +124,13 @@ class DirMapping(MappingInterface):
|
||||
raise exn
|
||||
|
||||
def set(self, id, s):
|
||||
fd = file(self._makeFilename(id), 'w')
|
||||
fd = open(self._makeFilename(id), 'w')
|
||||
fd.write(s)
|
||||
fd.close()
|
||||
|
||||
def add(self, s):
|
||||
id = self._getMax()
|
||||
fd = file(self._makeFilename(id), 'w')
|
||||
fd = open(self._makeFilename(id), 'w')
|
||||
try:
|
||||
fd.write(s)
|
||||
return id
|
||||
@ -147,7 +147,7 @@ class FlatfileMapping(MappingInterface):
|
||||
def __init__(self, filename, maxSize=10**6):
|
||||
self.filename = filename
|
||||
try:
|
||||
fd = file(self.filename)
|
||||
fd = open(self.filename)
|
||||
strId = fd.readline().rstrip()
|
||||
self.maxSize = len(strId)
|
||||
try:
|
||||
@ -169,7 +169,7 @@ class FlatfileMapping(MappingInterface):
|
||||
def _incrementCurrentId(self, fd=None):
|
||||
fdWasNone = fd is None
|
||||
if fdWasNone:
|
||||
fd = file(self.filename, 'a')
|
||||
fd = open(self.filename, 'a')
|
||||
fd.seek(0)
|
||||
self.currentId += 1
|
||||
fd.write(self._canonicalId(self.currentId))
|
||||
@ -187,7 +187,7 @@ class FlatfileMapping(MappingInterface):
|
||||
|
||||
def add(self, s):
|
||||
line = self._joinLine(self.currentId, s)
|
||||
fd = file(self.filename, 'r+')
|
||||
fd = open(self.filename, 'r+')
|
||||
try:
|
||||
fd.seek(0, 2) # End.
|
||||
fd.write(line)
|
||||
@ -199,7 +199,7 @@ class FlatfileMapping(MappingInterface):
|
||||
def get(self, id):
|
||||
strId = self._canonicalId(id)
|
||||
try:
|
||||
fd = file(self.filename)
|
||||
fd = open(self.filename)
|
||||
fd.readline() # First line, nextId.
|
||||
for line in fd:
|
||||
(lineId, s) = self._splitLine(line)
|
||||
@ -215,7 +215,7 @@ class FlatfileMapping(MappingInterface):
|
||||
def set(self, id, s):
|
||||
strLine = self._joinLine(id, s)
|
||||
try:
|
||||
fd = file(self.filename, 'r+')
|
||||
fd = open(self.filename, 'r+')
|
||||
self.remove(id, fd)
|
||||
fd.seek(0, 2) # End.
|
||||
fd.write(strLine)
|
||||
@ -227,7 +227,7 @@ class FlatfileMapping(MappingInterface):
|
||||
strId = self._canonicalId(id)
|
||||
try:
|
||||
if fdWasNone:
|
||||
fd = file(self.filename, 'r+')
|
||||
fd = open(self.filename, 'r+')
|
||||
fd.seek(0)
|
||||
fd.readline() # First line, nextId
|
||||
pos = fd.tell()
|
||||
@ -247,7 +247,7 @@ class FlatfileMapping(MappingInterface):
|
||||
fd.close()
|
||||
|
||||
def __iter__(self):
|
||||
fd = file(self.filename)
|
||||
fd = open(self.filename)
|
||||
fd.readline() # First line, nextId.
|
||||
for line in fd:
|
||||
(id, s) = self._splitLine(line)
|
||||
@ -256,7 +256,7 @@ class FlatfileMapping(MappingInterface):
|
||||
fd.close()
|
||||
|
||||
def vacuum(self):
|
||||
infd = file(self.filename)
|
||||
infd = open(self.filename)
|
||||
outfd = utils.file.AtomicFile(self.filename,makeBackupIfSmaller=False)
|
||||
outfd.write(infd.readline()) # First line, nextId.
|
||||
for line in infd:
|
||||
@ -280,7 +280,7 @@ class CdbMapping(MappingInterface):
|
||||
self.db['nextId'] = '1'
|
||||
|
||||
def _openCdb(self, *args, **kwargs):
|
||||
self.db = cdb.open(self.filename, 'c', **kwargs)
|
||||
self.db = cdb.open_db(self.filename, 'c', **kwargs)
|
||||
|
||||
def _getNextId(self):
|
||||
i = int(self.db['nextId'])
|
||||
|
@ -34,6 +34,7 @@ Contains simple socket drivers. Asyncore bugged (haha, pun!) me.
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import sys
|
||||
import time
|
||||
import select
|
||||
import socket
|
||||
@ -52,7 +53,7 @@ import supybot.utils as utils
|
||||
import supybot.world as world
|
||||
import supybot.drivers as drivers
|
||||
import supybot.schedule as schedule
|
||||
from supybot.utils.iter import imap
|
||||
from itertools import imap
|
||||
|
||||
class SocketDriver(drivers.IrcDriver, drivers.ServersMixin):
|
||||
def __init__(self, irc):
|
||||
@ -62,7 +63,7 @@ class SocketDriver(drivers.IrcDriver, drivers.ServersMixin):
|
||||
self.conn = None
|
||||
self.servers = ()
|
||||
self.eagains = 0
|
||||
self.inbuffer = ''
|
||||
self.inbuffer = b''
|
||||
self.outbuffer = ''
|
||||
self.zombie = False
|
||||
self.connected = False
|
||||
@ -117,7 +118,10 @@ class SocketDriver(drivers.IrcDriver, drivers.ServersMixin):
|
||||
self.outbuffer += ''.join(imap(str, msgs))
|
||||
if self.outbuffer:
|
||||
try:
|
||||
sent = self.conn.send(self.outbuffer)
|
||||
if sys.version_info[0] < 3:
|
||||
sent = self.conn.send(self.outbuffer)
|
||||
else:
|
||||
sent = self.conn.send(self.outbuffer.encode())
|
||||
self.outbuffer = self.outbuffer[sent:]
|
||||
self.eagains = 0
|
||||
except socket.error, e:
|
||||
@ -140,9 +144,11 @@ class SocketDriver(drivers.IrcDriver, drivers.ServersMixin):
|
||||
try:
|
||||
self.inbuffer += self.conn.recv(1024)
|
||||
self.eagains = 0 # If we successfully recv'ed, we can reset this.
|
||||
lines = self.inbuffer.split('\n')
|
||||
lines = self.inbuffer.split(b'\n')
|
||||
self.inbuffer = lines.pop()
|
||||
for line in lines:
|
||||
if sys.version_info[0] >= 3:
|
||||
line = line.decode(errors='replace')
|
||||
msg = drivers.parseMsg(line)
|
||||
if msg is not None:
|
||||
self.irc.feedMsg(msg)
|
||||
|
13
src/i18n.py
13
src/i18n.py
@ -166,6 +166,7 @@ class _PluginInternationalization:
|
||||
self.translations = {}
|
||||
for line in translationFile:
|
||||
line = line[0:-1] # Remove the ending \n
|
||||
line = line
|
||||
|
||||
if line.startswith(MSGID):
|
||||
# Don't check if step is WAITING_FOR_MSGID
|
||||
@ -217,10 +218,10 @@ class _PluginInternationalization:
|
||||
|
||||
def _unescape(self, string, removeNewline=False):
|
||||
import supybot.utils as utils
|
||||
string = str.replace(string, '\\n\\n', '\n\n')
|
||||
string = str.replace(string, '\\n', ' ')
|
||||
string = str.replace(string, '\\"', '"')
|
||||
string = str.replace(string, "\'", "'")
|
||||
string = string.replace('\\n\\n', '\n\n')
|
||||
string = string.replace('\\n', ' ')
|
||||
string = string.replace('\\"', '"')
|
||||
string = string.replace("\'", "'")
|
||||
string = utils.str.normalizeWhitespace(string, removeNewline)
|
||||
return string
|
||||
|
||||
@ -317,7 +318,6 @@ class internationalizedFunction:
|
||||
def __init__(self, internationalizer, name, function):
|
||||
self._internationalizer = internationalizer
|
||||
self._name = name
|
||||
self.__call__ = function
|
||||
self._origin = function
|
||||
internationalizedFunctions.append(self)
|
||||
def loadLocale(self):
|
||||
@ -327,6 +327,9 @@ class internationalizedFunction:
|
||||
def restore(self):
|
||||
self.__call__ = self._origin
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self._origin(*args, **kwargs)
|
||||
|
||||
class internationalizedString(str):
|
||||
"""Simple subclass to str, that allow to add attributes. Also used to
|
||||
know if a string is already localized"""
|
||||
|
@ -42,7 +42,7 @@ import supybot.world as world
|
||||
import supybot.ircutils as ircutils
|
||||
import supybot.registry as registry
|
||||
import supybot.unpreserve as unpreserve
|
||||
from utils.iter import imap, ilen, ifilter
|
||||
from itertools import imap, ifilter
|
||||
|
||||
def isCapability(capability):
|
||||
return len(capability.split(None, 1)) == 1
|
||||
@ -109,8 +109,9 @@ def canonicalCapability(capability):
|
||||
assert isCapability(capability), 'got %s' % capability
|
||||
return capability.lower()
|
||||
|
||||
_unwildcard_remover = utils.str.MultipleRemover('!@*?')
|
||||
def unWildcardHostmask(hostmask):
|
||||
return hostmask.translate(utils.str.chars, '!@*?')
|
||||
return _unwildcard_remover(hostmask)
|
||||
|
||||
_invert = invertCapability
|
||||
class CapabilitySet(set):
|
||||
@ -844,7 +845,7 @@ class IgnoresDB(object):
|
||||
|
||||
def open(self, filename):
|
||||
self.filename = filename
|
||||
fd = file(self.filename)
|
||||
fd = open(self.filename)
|
||||
for line in utils.file.nonCommentNonEmptyLines(fd):
|
||||
try:
|
||||
line = line.rstrip('\r\n')
|
||||
|
@ -42,7 +42,7 @@ import supybot.ircmsgs as ircmsgs
|
||||
import supybot.ircutils as ircutils
|
||||
|
||||
from utils.str import rsplit
|
||||
from utils.iter import imap, chain, cycle
|
||||
from utils.iter import chain, cycle
|
||||
from utils.structures import queue, smallqueue, RingBuffer
|
||||
|
||||
###
|
||||
@ -1075,7 +1075,7 @@ class Irc(IrcCommandDispatcher):
|
||||
if isinstance(other, self.__class__):
|
||||
return id(self) == id(other)
|
||||
else:
|
||||
return other == self
|
||||
return other.__eq__(self)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
@ -602,15 +602,8 @@ def join(channel, key=None, prefix='', msg=None):
|
||||
return IrcMsg(prefix=prefix, command='JOIN', args=(channel,), msg=msg)
|
||||
else:
|
||||
if conf.supybot.protocols.irc.strictRfc():
|
||||
assert key.translate(utils.str.chars,
|
||||
utils.str.chars[128:]) == key and \
|
||||
'\x00' not in key and \
|
||||
'\r' not in key and \
|
||||
'\n' not in key and \
|
||||
'\f' not in key and \
|
||||
'\t' not in key and \
|
||||
'\v' not in key and \
|
||||
' ' not in key
|
||||
chars = '\x00\r\n\f\t\v '
|
||||
assert not any([(ord(x) >= 128 or x in chars) for x in key])
|
||||
return IrcMsg(prefix=prefix, command='JOIN',
|
||||
args=(channel, key), msg=msg)
|
||||
|
||||
@ -628,17 +621,10 @@ def joins(channels, keys=None, prefix='', msg=None):
|
||||
command='JOIN',
|
||||
args=(','.join(channels),), msg=msg)
|
||||
else:
|
||||
for key in keys:
|
||||
if conf.supybot.protocols.irc.strictRfc():
|
||||
assert key.translate(utils.str.chars,
|
||||
utils.str.chars[128:])==key and \
|
||||
'\x00' not in key and \
|
||||
'\r' not in key and \
|
||||
'\n' not in key and \
|
||||
'\f' not in key and \
|
||||
'\t' not in key and \
|
||||
'\v' not in key and \
|
||||
' ' not in key
|
||||
if conf.supybot.protocols.irc.strictRfc():
|
||||
chars = '\x00\r\n\f\t\v '
|
||||
for key in keys:
|
||||
assert not any([(ord(x) >= 128 or x in chars) for x in key])
|
||||
return IrcMsg(prefix=prefix,
|
||||
command='JOIN',
|
||||
args=(','.join(channels), ','.join(keys)), msg=msg)
|
||||
|
@ -35,7 +35,10 @@ dicts, a nick class to handle nicks (so comparisons and hashing and whatnot
|
||||
work in an IRC-case-insensitive fashion), and numerous other things.
|
||||
"""
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import random
|
||||
import string
|
||||
@ -43,6 +46,7 @@ import textwrap
|
||||
from cStringIO import StringIO as sio
|
||||
|
||||
import supybot.utils as utils
|
||||
from itertools import imap
|
||||
|
||||
def debug(s, *args):
|
||||
"""Prints a debug string. Most likely replaced by our logging debug."""
|
||||
@ -90,13 +94,14 @@ def joinHostmask(nick, ident, host):
|
||||
assert nick and ident and host
|
||||
return intern('%s!%s@%s' % (nick, ident, host))
|
||||
|
||||
_rfc1459trans = string.maketrans(string.ascii_uppercase + r'\[]~',
|
||||
string.ascii_lowercase + r'|{}^')
|
||||
_rfc1459trans = utils.str.MultipleReplacer(dict(zip(
|
||||
string.ascii_uppercase + r'\[]~',
|
||||
string.ascii_lowercase + r'|{}^')))
|
||||
def toLower(s, casemapping=None):
|
||||
"""s => s
|
||||
Returns the string s lowered according to IRC case rules."""
|
||||
if casemapping is None or casemapping == 'rfc1459':
|
||||
return s.translate(_rfc1459trans)
|
||||
return _rfc1459trans(s)
|
||||
elif casemapping == 'ascii': # freenode
|
||||
return s.lower()
|
||||
else:
|
||||
@ -456,9 +461,10 @@ def isValidArgument(s):
|
||||
|
||||
def safeArgument(s):
|
||||
"""If s is unsafe for IRC, returns a safe version."""
|
||||
if isinstance(s, unicode):
|
||||
if sys.version_info[0] < 3 and isinstance(s, unicode):
|
||||
s = s.encode('utf-8')
|
||||
elif not isinstance(s, basestring):
|
||||
elif (sys.version_info[0] < 3 and not isinstance(s, basestring)) or \
|
||||
(sys.version_info[0] >= 3 and not isinstance(s, str)):
|
||||
debug('Got a non-string in safeArgument: %r', s)
|
||||
s = str(s)
|
||||
if isValidArgument(s):
|
||||
@ -481,7 +487,7 @@ def dccIP(ip):
|
||||
x = 256**3
|
||||
for quad in ip.split('.'):
|
||||
i += int(quad)*x
|
||||
x /= 256
|
||||
x //= 256
|
||||
return i
|
||||
|
||||
def unDccIP(i):
|
||||
@ -490,9 +496,9 @@ def unDccIP(i):
|
||||
L = []
|
||||
while len(L) < 4:
|
||||
L.append(i % 256)
|
||||
i /= 256
|
||||
i //= 256
|
||||
L.reverse()
|
||||
return '.'.join(utils.iter.imap(str, L))
|
||||
return '.'.join(imap(str, L))
|
||||
|
||||
class IrcString(str):
|
||||
"""This class does case-insensitive comparison and hashing of nicks."""
|
||||
|
@ -65,6 +65,8 @@ class Formatter(logging.Formatter):
|
||||
|
||||
def format(self, record):
|
||||
self._fmt = self._fmtConf()
|
||||
if hasattr(self, '_style'): # Python 3
|
||||
self._style._fmt = self._fmtConf()
|
||||
return logging.Formatter.format(self, record)
|
||||
|
||||
|
||||
|
@ -115,7 +115,7 @@ def yn(prompt, default=None):
|
||||
else:
|
||||
default = 'n'
|
||||
s = expect(prompt, ['y', 'n'], default=default)
|
||||
if s is 'y':
|
||||
if s == 'y':
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
@ -30,7 +30,9 @@
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import codecs
|
||||
import string
|
||||
import textwrap
|
||||
|
||||
@ -57,17 +59,24 @@ class InvalidRegistryName(RegistryException):
|
||||
class InvalidRegistryValue(RegistryException):
|
||||
pass
|
||||
|
||||
class NonExistentRegistryEntry(RegistryException):
|
||||
class NonExistentRegistryEntry(RegistryException, AttributeError):
|
||||
# If we use hasattr() on a configuration group/value, Python 3 calls
|
||||
# __getattr__ and looks for an AttributeError, so __getattr__ has to
|
||||
# raise an AttributeError if a registry entry does not exist.
|
||||
pass
|
||||
|
||||
ENCODING = 'string_escape' if sys.version_info[0] < 3 else 'unicode_escape'
|
||||
decoder = codecs.getdecoder(ENCODING)
|
||||
encoder = codecs.getencoder(ENCODING)
|
||||
|
||||
_cache = utils.InsensitivePreservingDict()
|
||||
_lastModified = 0
|
||||
def open(filename, clear=False):
|
||||
def open_registry(filename, clear=False):
|
||||
"""Initializes the module by loading the registry file into memory."""
|
||||
global _lastModified
|
||||
if clear:
|
||||
_cache.clear()
|
||||
_fd = file(filename)
|
||||
_fd = open(filename)
|
||||
fd = utils.file.nonCommentNonEmptyLines(_fd)
|
||||
acc = ''
|
||||
slashEnd = re.compile(r'\\*$')
|
||||
@ -89,7 +98,8 @@ def open(filename, clear=False):
|
||||
try:
|
||||
(key, value) = re.split(r'(?<!\\):', acc, 1)
|
||||
key = key.strip()
|
||||
value = value.strip().decode('string_escape')
|
||||
value = value.strip()
|
||||
value = decoder(value)[0]
|
||||
acc = ''
|
||||
except ValueError:
|
||||
raise InvalidRegistryFile, 'Error unpacking line %r' % acc
|
||||
@ -145,7 +155,7 @@ def isValidRegistryName(name):
|
||||
return len(name.split()) == 1 and not name.startswith('_')
|
||||
|
||||
def escape(name):
|
||||
name = name.encode('string_escape')
|
||||
name = encoder(name)[0].decode()
|
||||
name = name.replace(':', '\\:')
|
||||
name = name.replace('.', '\\.')
|
||||
return name
|
||||
@ -153,7 +163,7 @@ def escape(name):
|
||||
def unescape(name):
|
||||
name = name.replace('\\.', '.')
|
||||
name = name.replace('\\:', ':')
|
||||
name = name.decode('string_escape')
|
||||
name = decoder(name.encode())[0]
|
||||
return name
|
||||
|
||||
_splitRe = re.compile(r'(?<!\\)\.')
|
||||
@ -364,7 +374,7 @@ class Value(Group):
|
||||
return repr(self())
|
||||
|
||||
def serialize(self):
|
||||
return str(self).encode('string_escape')
|
||||
return encoder(str(self))[0].decode()
|
||||
|
||||
# We tried many, *many* different syntactic methods here, and this one was
|
||||
# simply the best -- not very intrusive, easily overridden by subclasses,
|
||||
@ -463,7 +473,7 @@ class String(Value):
|
||||
|
||||
_printable = string.printable[:-4]
|
||||
def _needsQuoting(self, s):
|
||||
return s.translate(utils.str.chars, self._printable) and s.strip() != s
|
||||
return any([x not in self._printable for x in s]) and s.strip() != s
|
||||
|
||||
def __str__(self):
|
||||
s = self.value
|
||||
|
@ -19,9 +19,8 @@ class shlex:
|
||||
self.instream = sys.stdin
|
||||
self.infile = None
|
||||
self.commenters = '#'
|
||||
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
|
||||
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
|
||||
self.whitespace = ' \t\r\n'
|
||||
self.separators = self.whitespace
|
||||
self.quotes = '\'"'
|
||||
self.state = ' '
|
||||
self.pushback = []
|
||||
@ -121,7 +120,7 @@ class shlex:
|
||||
elif nextchar in self.commenters:
|
||||
self.instream.readline()
|
||||
self.lineno = self.lineno + 1
|
||||
elif nextchar in self.wordchars:
|
||||
elif nextchar not in self.separators:
|
||||
self.token = nextchar
|
||||
self.state = 'a'
|
||||
elif nextchar in self.quotes:
|
||||
@ -166,7 +165,7 @@ class shlex:
|
||||
elif nextchar in self.commenters:
|
||||
self.instream.readline()
|
||||
self.lineno = self.lineno + 1
|
||||
elif nextchar in self.wordchars or nextchar in self.quotes:
|
||||
elif nextchar not in self.separators or nextchar in self.quotes:
|
||||
self.token = self.token + nextchar
|
||||
else:
|
||||
self.pushback = [nextchar] + self.pushback
|
||||
|
25
src/test.py
25
src/test.py
@ -535,19 +535,19 @@ def open_http(url, data=None):
|
||||
auth = base64.b64encode(user_passwd).strip()
|
||||
else:
|
||||
auth = None
|
||||
h = HTTP(host)
|
||||
c = FakeHTTPConnection(host)
|
||||
if data is not None:
|
||||
h.putrequest('POST', selector)
|
||||
h.putheader('Content-Type', 'application/x-www-form-urlencoded')
|
||||
h.putheader('Content-Length', '%d' % len(data))
|
||||
c.putrequest('POST', selector)
|
||||
c.putheader('Content-Type', 'application/x-www-form-urlencoded')
|
||||
c.putheader('Content-Length', '%d' % len(data))
|
||||
else:
|
||||
h.putrequest('GET', selector)
|
||||
if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth)
|
||||
if auth: h.putheader('Authorization', 'Basic %s' % auth)
|
||||
if realhost: h.putheader('Host', realhost)
|
||||
for args in urllib.URLopener().addheaders: h.putheader(*args)
|
||||
h.endheaders()
|
||||
return h
|
||||
c.putrequest('GET', selector)
|
||||
if proxy_auth: c.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth)
|
||||
if auth: c.putheader('Authorization', 'Basic %s' % auth)
|
||||
if realhost: c.putheader('Host', realhost)
|
||||
for args in urllib.URLopener().addheaders: c.putheader(*args)
|
||||
c.endheaders()
|
||||
return c
|
||||
|
||||
class FakeHTTPConnection(httplib.HTTPConnection):
|
||||
_data = ''
|
||||
@ -566,9 +566,6 @@ class FakeHTTPConnection(httplib.HTTPConnection):
|
||||
#def getresponse(self, *args, **kwargs):
|
||||
# pass
|
||||
|
||||
class HTTP(httplib.HTTP):
|
||||
_connection_class = FakeHTTPConnection
|
||||
|
||||
class HTTPPluginTestCase(PluginTestCase):
|
||||
def setUp(self):
|
||||
PluginTestCase.setUp(self, forceSetup=True)
|
||||
|
@ -40,7 +40,7 @@ class Reader(object):
|
||||
return s.lower()
|
||||
|
||||
def readFile(self, filename):
|
||||
self.read(file(filename))
|
||||
self.read(open(filename))
|
||||
|
||||
def read(self, fd):
|
||||
lineno = 0
|
||||
|
@ -34,14 +34,14 @@ import random
|
||||
import shutil
|
||||
import os.path
|
||||
|
||||
from iter import ifilter
|
||||
from itertools import ifilter
|
||||
|
||||
import crypt
|
||||
|
||||
def contents(filename):
|
||||
return file(filename).read()
|
||||
return open(filename).read()
|
||||
|
||||
def open(filename, mode='wb', *args, **kwargs):
|
||||
def open_mkdir(filename, mode='wb', *args, **kwargs):
|
||||
"""filename -> file object.
|
||||
|
||||
Returns a file object for filename, creating as many directories as may be
|
||||
@ -53,15 +53,15 @@ def open(filename, mode='wb', *args, **kwargs):
|
||||
raise ValueError, 'utils.file.open expects to write.'
|
||||
(dirname, basename) = os.path.split(filename)
|
||||
os.makedirs(dirname)
|
||||
return file(filename, mode, *args, **kwargs)
|
||||
return open(filename, mode, *args, **kwargs)
|
||||
|
||||
def copy(src, dst):
|
||||
"""src, dst -> None
|
||||
|
||||
Copies src to dst, using this module's 'open' function to open dst.
|
||||
"""
|
||||
srcfd = file(src)
|
||||
dstfd = open(dst, 'wb')
|
||||
srcfd = open(src)
|
||||
dstfd = open_mkdir(dst, 'wb')
|
||||
shutil.copyfileobj(srcfd, dstfd)
|
||||
|
||||
def writeLine(fd, line):
|
||||
@ -70,20 +70,20 @@ def writeLine(fd, line):
|
||||
fd.write('\n')
|
||||
|
||||
def readLines(filename):
|
||||
fd = file(filename)
|
||||
fd = open(filename)
|
||||
try:
|
||||
return [line.rstrip('\r\n') for line in fd.readlines()]
|
||||
finally:
|
||||
fd.close()
|
||||
|
||||
def touch(filename):
|
||||
fd = file(filename, 'w')
|
||||
fd = open(filename, 'w')
|
||||
fd.close()
|
||||
|
||||
def mktemp(suffix=''):
|
||||
"""Gives a decent random string, suitable for a filename."""
|
||||
r = random.Random()
|
||||
m = crypt.md5(suffix)
|
||||
m = crypt.md5(suffix.encode('utf8'))
|
||||
r.seed(time.time())
|
||||
s = str(r.getstate())
|
||||
period = random.random()
|
||||
@ -95,7 +95,7 @@ def mktemp(suffix=''):
|
||||
m.update(s)
|
||||
m.update(str(now))
|
||||
s = m.hexdigest()
|
||||
return crypt.sha(s + str(time.time())).hexdigest() + suffix
|
||||
return crypt.sha((s + str(time.time())).encode('utf8')).hexdigest()+suffix
|
||||
|
||||
def nonCommentLines(fd):
|
||||
for line in fd:
|
||||
@ -115,7 +115,7 @@ def chunks(fd, size):
|
||||
## yield chunk
|
||||
## chunk = fd.read(size)
|
||||
|
||||
class AtomicFile(file):
|
||||
class AtomicFile(object):
|
||||
"""Used for files that need to be atomically written -- i.e., if there's a
|
||||
failure, the original file remains, unmodified. mode must be 'w' or 'wb'"""
|
||||
class default(object): # Holder for values.
|
||||
@ -152,18 +152,40 @@ class AtomicFile(file):
|
||||
self.tempFilename = os.path.join(tmpDir, tempFilename)
|
||||
# This doesn't work because of the uncollectable garbage effect.
|
||||
# self.__parent = super(AtomicFile, self)
|
||||
super(AtomicFile, self).__init__(self.tempFilename, mode)
|
||||
self._fd = open(self.tempFilename, mode)
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self._fd.closed
|
||||
|
||||
def close(self):
|
||||
return self._fd.close()
|
||||
|
||||
def write(self, data):
|
||||
return self._fd.write(data)
|
||||
|
||||
def writelines(self, lines):
|
||||
return self._fd.writelines(lines)
|
||||
|
||||
def rollback(self):
|
||||
if not self.closed:
|
||||
super(AtomicFile, self).close()
|
||||
self._fd.close()
|
||||
if os.path.exists(self.tempFilename):
|
||||
os.remove(self.tempFilename)
|
||||
self.rolledback = True
|
||||
|
||||
def seek(self, offset):
|
||||
return self._fd.seek(offset)
|
||||
|
||||
def tell(self):
|
||||
return self._fd.tell()
|
||||
|
||||
def flush(self):
|
||||
return self._fd.flush()
|
||||
|
||||
def close(self):
|
||||
if not self.rolledback:
|
||||
super(AtomicFile, self).close()
|
||||
self._fd.close()
|
||||
# We don't mind writing an empty file if the file we're overwriting
|
||||
# doesn't exist.
|
||||
newSize = os.path.getsize(self.tempFilename)
|
||||
@ -190,7 +212,7 @@ class AtomicFile(file):
|
||||
# rename a file (and shutil.move will use os.rename if
|
||||
# possible), we first check if we have the write permission
|
||||
# and only then do we write.
|
||||
fd = file(self.filename, 'a')
|
||||
fd = open(self.filename, 'a')
|
||||
fd.close()
|
||||
shutil.move(self.tempFilename, self.filename)
|
||||
|
||||
|
@ -30,17 +30,16 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
import new
|
||||
import ast
|
||||
import time
|
||||
import types
|
||||
import compiler
|
||||
import textwrap
|
||||
import UserDict
|
||||
import traceback
|
||||
import collections
|
||||
from itertools import imap
|
||||
|
||||
from str import format
|
||||
from file import mktemp
|
||||
from iter import imap, all
|
||||
|
||||
import crypt
|
||||
|
||||
@ -148,34 +147,38 @@ def saltHash(password, salt=None, hash='sha'):
|
||||
hasher = crypt.sha
|
||||
elif hash == 'md5':
|
||||
hasher = crypt.md5
|
||||
return '|'.join([salt, hasher(salt + password).hexdigest()])
|
||||
return '|'.join([salt, hasher((salt + password).encode('utf8')).hexdigest()])
|
||||
|
||||
_astStr2 = ast.Str if sys.version_info[0] < 3 else ast.Bytes
|
||||
def safeEval(s, namespace={'True': True, 'False': False, 'None': None}):
|
||||
"""Evaluates s, safely. Useful for turning strings into tuples/lists/etc.
|
||||
without unsafely using eval()."""
|
||||
try:
|
||||
node = compiler.parse(s)
|
||||
node = ast.parse(s)
|
||||
except SyntaxError, e:
|
||||
raise ValueError, 'Invalid string: %s.' % e
|
||||
nodes = compiler.parse(s).node.nodes
|
||||
nodes = ast.parse(s).body
|
||||
if not nodes:
|
||||
if node.__class__ is compiler.ast.Module:
|
||||
if node.__class__ is ast.Module:
|
||||
return node.doc
|
||||
else:
|
||||
raise ValueError, format('Unsafe string: %q', s)
|
||||
node = nodes[0]
|
||||
if node.__class__ is not compiler.ast.Discard:
|
||||
raise ValueError, format('Invalid expression: %q', s)
|
||||
node = node.getChildNodes()[0]
|
||||
def checkNode(node):
|
||||
if node.__class__ is compiler.ast.Const:
|
||||
if node.__class__ is ast.Expr:
|
||||
node = node.value
|
||||
if node.__class__ in (ast.Num,
|
||||
ast.Str,
|
||||
_astStr2):
|
||||
return True
|
||||
if node.__class__ in (compiler.ast.List,
|
||||
compiler.ast.Tuple,
|
||||
compiler.ast.Dict):
|
||||
return all(checkNode, node.getChildNodes())
|
||||
if node.__class__ is compiler.ast.Name:
|
||||
if node.name in namespace:
|
||||
elif node.__class__ in (ast.List,
|
||||
ast.Tuple):
|
||||
return all([checkNode(x) for x in node.elts])
|
||||
elif node.__class__ is ast.Dict:
|
||||
return all([checkNode(x) for x in node.values]) and \
|
||||
all([checkNode(x) for x in node.values])
|
||||
elif node.__class__ is ast.Name:
|
||||
if node.id in namespace:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@ -203,20 +206,25 @@ class IterableMap(object):
|
||||
def iterkeys(self):
|
||||
for (key, _) in self.iteritems():
|
||||
yield key
|
||||
__iter__ = iterkeys
|
||||
|
||||
def itervalues(self):
|
||||
for (_, value) in self.iteritems():
|
||||
yield value
|
||||
|
||||
def items(self):
|
||||
return list(self.iteritems())
|
||||
if sys.version_info[0] < 3:
|
||||
# Our 2to3 fixers automatically rename iteritems/iterkeys/itervalues
|
||||
# to items/keys/values
|
||||
def items(self):
|
||||
return list(self.iteritems())
|
||||
|
||||
def keys(self):
|
||||
return list(self.iterkeys())
|
||||
def keys(self):
|
||||
return list(self.iterkeys())
|
||||
|
||||
def values(self):
|
||||
return list(self.itervalues())
|
||||
def values(self):
|
||||
return list(self.itervalues())
|
||||
__iter__ = iterkeys
|
||||
else:
|
||||
__iter__ = items
|
||||
|
||||
def __len__(self):
|
||||
ret = 0
|
||||
@ -230,7 +238,7 @@ class IterableMap(object):
|
||||
return False
|
||||
|
||||
|
||||
class InsensitivePreservingDict(UserDict.DictMixin, object):
|
||||
class InsensitivePreservingDict(collections.MutableMapping):
|
||||
def key(self, s):
|
||||
"""Override this if you wish."""
|
||||
if s is not None:
|
||||
@ -264,6 +272,12 @@ class InsensitivePreservingDict(UserDict.DictMixin, object):
|
||||
def __delitem__(self, k):
|
||||
del self.data[self.key(k)]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.data)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
def iteritems(self):
|
||||
return self.data.itervalues()
|
||||
|
||||
|
@ -30,7 +30,6 @@
|
||||
from __future__ import division
|
||||
|
||||
import sys
|
||||
import new
|
||||
import random
|
||||
|
||||
from itertools import *
|
||||
@ -99,13 +98,13 @@ def choice(iterable):
|
||||
return random.choice(iterable)
|
||||
else:
|
||||
n = 1
|
||||
m = new.module('') # Guaranteed unique value.
|
||||
ret = m
|
||||
found = False
|
||||
for x in iterable:
|
||||
if random.random() < 1/n:
|
||||
ret = x
|
||||
found = True
|
||||
n += 1
|
||||
if ret is m:
|
||||
if not found:
|
||||
raise IndexError
|
||||
return ret
|
||||
|
||||
@ -148,8 +147,8 @@ def ilen(iterable):
|
||||
i += 1
|
||||
return i
|
||||
|
||||
def startswith(long, short):
|
||||
longI = iter(long)
|
||||
def startswith(long_, short):
|
||||
longI = iter(long_)
|
||||
shortI = iter(short)
|
||||
try:
|
||||
while True:
|
||||
|
@ -31,7 +31,6 @@
|
||||
import sys
|
||||
import types
|
||||
import fnmatch
|
||||
import UserDict
|
||||
import threading
|
||||
|
||||
def universalImport(*names):
|
||||
|
@ -34,7 +34,6 @@ Simple utility functions related to strings.
|
||||
"""
|
||||
|
||||
import re
|
||||
import new
|
||||
import sys
|
||||
import string
|
||||
import textwrap
|
||||
@ -45,9 +44,6 @@ from structures import TwoWayDictionary
|
||||
from supybot.i18n import PluginInternationalization
|
||||
internationalizeFunction=PluginInternationalization().internationalizeFunction
|
||||
|
||||
curry = new.instancemethod
|
||||
chars = string.maketrans('', '')
|
||||
|
||||
def rsplit(s, sep=None, maxsplit=-1):
|
||||
"""Equivalent to str.split, except splitting from the right."""
|
||||
if sys.version_info < (2, 4, 0):
|
||||
@ -96,17 +92,42 @@ def distance(s, t):
|
||||
d[i][j] = min(d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1]+cost)
|
||||
return d[n][m]
|
||||
|
||||
_soundextrans = string.maketrans(string.ascii_uppercase,
|
||||
'01230120022455012623010202')
|
||||
_notUpper = chars.translate(chars, string.ascii_uppercase)
|
||||
class MultipleReplacer:
|
||||
"""Return a callable that replaces all dict keys by the associated
|
||||
value. More efficient than multiple .replace()."""
|
||||
|
||||
# We use an object instead of a lambda function because it avoids the
|
||||
# need for using the staticmethod() on the lambda function if assigning
|
||||
# it to a class in Python 3.
|
||||
def __init__(self, dict_):
|
||||
self._dict = dict_
|
||||
dict_ = {re.escape(key): val for key,val in dict_.items()}
|
||||
self._matcher = re.compile('|'.join(dict_.keys()))
|
||||
def __call__(self, s):
|
||||
return self._matcher.sub(lambda m: self._dict[m.group(0)], s)
|
||||
def multipleReplacer(dict_):
|
||||
return MultipleReplacer(dict_)
|
||||
|
||||
class MultipleRemover:
|
||||
"""Return a callable that removes all words in the list. A bit more
|
||||
efficient than multipleReplacer"""
|
||||
# See comment of MultipleReplacer
|
||||
def __init__(self, list_):
|
||||
list_ = [re.escape(x) for x in list_]
|
||||
self._matcher = re.compile('|'.join(list_))
|
||||
def __call__(self, s):
|
||||
return self._matcher.sub(lambda m: '', s)
|
||||
|
||||
_soundextrans = MultipleReplacer(dict(zip(string.ascii_uppercase,
|
||||
'01230120022455012623010202')))
|
||||
def soundex(s, length=4):
|
||||
"""Returns the soundex hash of a given string."""
|
||||
s = s.upper() # Make everything uppercase.
|
||||
s = s.translate(chars, _notUpper) # Delete non-letters.
|
||||
s = ''.join([x for x in s if x in string.ascii_uppercase])
|
||||
if not s:
|
||||
raise ValueError, 'Invalid string for soundex: %s'
|
||||
firstChar = s[0] # Save the first character.
|
||||
s = s.translate(_soundextrans) # Convert to soundex numbers.
|
||||
s = _soundextrans(s) # Convert to soundex numbers.
|
||||
s = s.lstrip(s[0]) # Remove all repeated first characters.
|
||||
L = [firstChar]
|
||||
for c in s:
|
||||
@ -120,7 +141,8 @@ def dqrepr(s):
|
||||
"""Returns a repr() of s guaranteed to be in double quotes."""
|
||||
# The wankers-that-be decided not to use double-quotes anymore in 2.3.
|
||||
# return '"' + repr("'\x00" + s)[6:]
|
||||
return '"%s"' % s.encode('string_escape').replace('"', '\\"')
|
||||
encoding = 'string_escape' if sys.version_info[0] < 3 else 'unicode_escape'
|
||||
return '"%s"' % s.encode(encoding).decode().replace('"', '\\"')
|
||||
|
||||
def quoted(s):
|
||||
"""Returns a quoted s."""
|
||||
@ -194,9 +216,11 @@ def perlReToReplacer(s):
|
||||
if 'g' in flags:
|
||||
g = True
|
||||
flags = filter('g'.__ne__, flags)
|
||||
if isinstance(flags, list):
|
||||
flags = ''.join(flags)
|
||||
r = perlReToPythonRe(sep.join(('', regexp, flags)))
|
||||
if g:
|
||||
return curry(r.sub, replace)
|
||||
return lambda s: r.sub(replace, s)
|
||||
else:
|
||||
return lambda s: r.sub(replace, s, 1)
|
||||
|
||||
|
@ -33,7 +33,7 @@ Data structures for Python.
|
||||
|
||||
import time
|
||||
import types
|
||||
import UserDict
|
||||
import collections
|
||||
from itertools import imap
|
||||
|
||||
class RingBuffer(object):
|
||||
@ -121,11 +121,11 @@ class RingBuffer(object):
|
||||
if self.full:
|
||||
oidx = idx
|
||||
if type(oidx) == types.SliceType:
|
||||
range = xrange(*slice.indices(oidx, len(self)))
|
||||
if len(range) != len(elt):
|
||||
range_ = xrange(*slice.indices(oidx, len(self)))
|
||||
if len(range_) != len(elt):
|
||||
raise ValueError, 'seq must be the same length as slice.'
|
||||
else:
|
||||
for (i, x) in zip(range, elt):
|
||||
for (i, x) in zip(range_, elt):
|
||||
self[i] = x
|
||||
else:
|
||||
(m, idx) = divmod(oidx, len(self.L))
|
||||
@ -135,11 +135,11 @@ class RingBuffer(object):
|
||||
self.L[idx] = elt
|
||||
else:
|
||||
if type(idx) == types.SliceType:
|
||||
range = xrange(*slice.indices(idx, len(self)))
|
||||
if len(range) != len(elt):
|
||||
range_ = xrange(*slice.indices(idx, len(self)))
|
||||
if len(range_) != len(elt):
|
||||
raise ValueError, 'seq must be the same length as slice.'
|
||||
else:
|
||||
for (i, x) in zip(range, elt):
|
||||
for (i, x) in zip(range_, elt):
|
||||
self[i] = x
|
||||
else:
|
||||
self.L[idx] = elt
|
||||
@ -240,15 +240,15 @@ class queue(object):
|
||||
if len(self) == 0:
|
||||
raise IndexError, 'queue index out of range'
|
||||
if type(oidx) == types.SliceType:
|
||||
range = xrange(*slice.indices(oidx, len(self)))
|
||||
if len(range) != len(value):
|
||||
range_ = xrange(*slice.indices(oidx, len(self)))
|
||||
if len(range_) != len(value):
|
||||
raise ValueError, 'seq must be the same length as slice.'
|
||||
else:
|
||||
for i in range:
|
||||
for i in range_:
|
||||
(m, idx) = divmod(oidx, len(self))
|
||||
if m and m != -1:
|
||||
raise IndexError, oidx
|
||||
for (i, x) in zip(range, value):
|
||||
for (i, x) in zip(range_, value):
|
||||
self[i] = x
|
||||
else:
|
||||
(m, idx) = divmod(oidx, len(self))
|
||||
@ -261,8 +261,8 @@ class queue(object):
|
||||
|
||||
def __delitem__(self, oidx):
|
||||
if type(oidx) == types.SliceType:
|
||||
range = xrange(*slice.indices(oidx, len(self)))
|
||||
for i in range:
|
||||
range_ = xrange(*slice.indices(oidx, len(self)))
|
||||
for i in range_:
|
||||
del self[i]
|
||||
else:
|
||||
(m, idx) = divmod(oidx, len(self))
|
||||
@ -418,7 +418,7 @@ class MultiSet(object):
|
||||
return elt in self.d
|
||||
|
||||
|
||||
class CacheDict(UserDict.DictMixin):
|
||||
class CacheDict(collections.MutableMapping):
|
||||
def __init__(self, max, **kwargs):
|
||||
self.d = dict(**kwargs)
|
||||
self.max = max
|
||||
@ -443,5 +443,8 @@ class CacheDict(UserDict.DictMixin):
|
||||
def __iter__(self):
|
||||
return iter(self.d)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.d)
|
||||
|
||||
|
||||
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
||||
|
@ -84,7 +84,7 @@ class TransactionMixin(python.Object):
|
||||
raise InvalidCwd(expected)
|
||||
|
||||
def _journalCommands(self):
|
||||
journal = file(self._journalName)
|
||||
journal = open(self._journalName)
|
||||
for line in journal:
|
||||
line = line.rstrip('\n')
|
||||
(command, rest) = line.split(None, 1)
|
||||
@ -112,8 +112,8 @@ class Transaction(TransactionMixin):
|
||||
raise FailedAcquisition(self.txnDir, e)
|
||||
os.mkdir(self.dirize(self.ORIGINALS))
|
||||
os.mkdir(self.dirize(self.REPLACEMENTS))
|
||||
self._journal = file(self._journalName, 'a')
|
||||
cwd = file(self.dirize('cwd'), 'w')
|
||||
self._journal = open(self._journalName, 'a')
|
||||
cwd = open(self.dirize('cwd'), 'w')
|
||||
cwd.write(os.getcwd())
|
||||
cwd.close()
|
||||
|
||||
@ -179,7 +179,7 @@ class Transaction(TransactionMixin):
|
||||
self._journalCommand('append', filename, length)
|
||||
replacement = self._replacement(filename)
|
||||
File.copy(filename, replacement)
|
||||
return file(replacement, 'a')
|
||||
return open(replacement, 'a')
|
||||
|
||||
def commit(self, removeWhenComplete=True):
|
||||
self._journal.close()
|
||||
@ -218,7 +218,7 @@ class Rollback(TransactionMixin):
|
||||
shutil.copy(self._original(filename), filename)
|
||||
|
||||
def rollbackAppend(self, filename, length):
|
||||
fd = file(filename, 'a')
|
||||
fd = open(filename, 'a')
|
||||
fd.truncate(int(length))
|
||||
fd.close()
|
||||
|
||||
|
@ -29,13 +29,14 @@
|
||||
###
|
||||
|
||||
import re
|
||||
import sys
|
||||
import socket
|
||||
import urllib
|
||||
import urllib2
|
||||
import httplib
|
||||
import sgmllib
|
||||
import urlparse
|
||||
import htmlentitydefs
|
||||
from HTMLParser import HTMLParser
|
||||
|
||||
sockerrors = (socket.error,)
|
||||
try:
|
||||
@ -48,7 +49,9 @@ from str import normalizeWhitespace
|
||||
Request = urllib2.Request
|
||||
urlquote = urllib.quote
|
||||
urlunquote = urllib.unquote
|
||||
urlencode = urllib.urlencode
|
||||
|
||||
def urlencode(*args, **kwargs):
|
||||
return urllib.urlencode(*args, **kwargs).encode()
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
@ -150,19 +153,19 @@ def getUrl(url, size=None, headers=None, data=None):
|
||||
def getDomain(url):
|
||||
return urlparse.urlparse(url)[1]
|
||||
|
||||
class HtmlToText(sgmllib.SGMLParser):
|
||||
class HtmlToText(HTMLParser, object):
|
||||
"""Taken from some eff-bot code on c.l.p."""
|
||||
entitydefs = htmlentitydefs.entitydefs.copy()
|
||||
entitydefs['nbsp'] = ' '
|
||||
def __init__(self, tagReplace=' '):
|
||||
self.data = []
|
||||
self.tagReplace = tagReplace
|
||||
sgmllib.SGMLParser.__init__(self)
|
||||
super(HtmlToText, self).__init__()
|
||||
|
||||
def unknown_starttag(self, tag, attr):
|
||||
def handle_starttag(self, tag, attr):
|
||||
self.data.append(self.tagReplace)
|
||||
|
||||
def unknown_endtag(self, tag):
|
||||
def handle_endtag(self, tag):
|
||||
self.data.append(self.tagReplace)
|
||||
|
||||
def handle_data(self, data):
|
||||
@ -175,6 +178,8 @@ class HtmlToText(sgmllib.SGMLParser):
|
||||
def htmlToText(s, tagReplace=' '):
|
||||
"""Turns HTML into text. tagReplace is a string to replace HTML tags with.
|
||||
"""
|
||||
if sys.version_info[0] >= 3 and isinstance(s, bytes):
|
||||
s = s.decode()
|
||||
x = HtmlToText(tagReplace)
|
||||
x.feed(s)
|
||||
return x.getText()
|
||||
|
11
src/world.py
11
src/world.py
@ -113,7 +113,13 @@ def debugFlush(s=''):
|
||||
|
||||
def upkeep():
|
||||
"""Does upkeep (like flushing, garbage collection, etc.)"""
|
||||
sys.exc_clear() # Just in case, let's clear the exception info.
|
||||
# Just in case, let's clear the exception info.
|
||||
try:
|
||||
sys.exc_clear()
|
||||
except AttributeError:
|
||||
# Python 3 does not have sys.exc_clear. The except statement clears
|
||||
# the info itself (and we've just entered an except statement)
|
||||
pass
|
||||
if os.name == 'nt':
|
||||
try:
|
||||
import msvcrt
|
||||
@ -153,7 +159,8 @@ def upkeep():
|
||||
#if registryFilename is not None:
|
||||
# registry.open(registryFilename)
|
||||
if not dying:
|
||||
log.debug('Regexp cache size: %s', len(sre._cache))
|
||||
if sys.version_info[0] < 3:
|
||||
log.debug('Regexp cache size: %s', len(sre._cache))
|
||||
log.debug('Pattern cache size: %s', len(ircutils._patternCache))
|
||||
log.debug('HostmaskPatternEqual cache size: %s',
|
||||
len(ircutils._hostmaskPatternEqualCache))
|
||||
|
@ -27,6 +27,7 @@
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
###
|
||||
|
||||
import sys
|
||||
import os.path
|
||||
import unittest
|
||||
|
||||
@ -36,6 +37,7 @@ load = unittest.defaultTestLoader.loadTestsFromModule
|
||||
|
||||
GLOBALS = globals()
|
||||
dirname = os.path.dirname(__file__)
|
||||
sys.path.append(dirname)
|
||||
filenames = os.listdir(dirname)
|
||||
# Uncomment these if you need some consistency in the order these tests run.
|
||||
# filenames.sort()
|
||||
@ -43,8 +45,8 @@ filenames = os.listdir(dirname)
|
||||
for filename in filenames:
|
||||
if filename.startswith('test_') and filename.endswith('.py'):
|
||||
name = filename[:-3]
|
||||
exec 'import %s' % name in GLOBALS
|
||||
test.suites.append(load(GLOBALS[name]))
|
||||
plugin = __import__(name)
|
||||
test.suites.append(load(plugin))
|
||||
|
||||
module = None
|
||||
|
||||
|
@ -171,7 +171,7 @@ class ValuesTestCase(SupyTestCase):
|
||||
conf.supybot.reply.whenAddressedBy.chars.set('\\')
|
||||
filename = conf.supybot.directories.conf.dirize('backslashes.conf')
|
||||
registry.close(conf.supybot, filename)
|
||||
registry.open(filename)
|
||||
registry.open_registry(filename)
|
||||
self.assertEqual(conf.supybot.reply.whenAddressedBy.chars(), '\\')
|
||||
|
||||
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
||||
|
@ -145,9 +145,7 @@ class GenTest(SupyTestCase):
|
||||
L = ['a', 'c', 'b']
|
||||
self.assertEqual(sorted(L), ['a', 'b', 'c'])
|
||||
self.assertEqual(L, ['a', 'c', 'b'])
|
||||
def mycmp(x, y):
|
||||
return -cmp(x, y)
|
||||
self.assertEqual(sorted(L, mycmp), ['c', 'b', 'a'])
|
||||
self.assertEqual(sorted(L, reverse=True), ['c', 'b', 'a'])
|
||||
|
||||
def testTimeElapsed(self):
|
||||
self.assertRaises(ValueError, utils.timeElapsed, 0,
|
||||
@ -309,6 +307,14 @@ class StrTest(SupyTestCase):
|
||||
f = PRTR('s/^/foo/')
|
||||
self.assertEqual(f('bar'), 'foobar')
|
||||
|
||||
def testMultipleReplacer(self):
|
||||
replacer = utils.str.MultipleReplacer({'foo': 'bar', 'a': 'b'})
|
||||
self.assertEqual(replacer('hi foo hi'), 'hi bar hi')
|
||||
|
||||
def testMultipleRemover(self):
|
||||
remover = utils.str.MultipleRemover(['foo', 'bar'])
|
||||
self.assertEqual(remover('testfoobarbaz'), 'testbaz')
|
||||
|
||||
def testPReToReplacerDifferentSeparator(self):
|
||||
f = utils.str.perlReToReplacer('s#foo#bar#')
|
||||
self.assertEqual(f('foobarbaz'), 'barbarbaz')
|
||||
|
Loading…
Reference in New Issue
Block a user