2015-12-07 02:40:13 +01:00
|
|
|
"""
|
|
|
|
classes.py - Base classes for PyLink IRC Services.
|
|
|
|
|
|
|
|
This module contains the base classes used by PyLink, including threaded IRC
|
|
|
|
connections and objects used to represent IRC servers, users, and channels.
|
|
|
|
|
|
|
|
Here be dragons.
|
|
|
|
"""
|
|
|
|
|
2015-07-18 07:52:55 +02:00
|
|
|
import threading
|
2015-08-26 05:37:15 +02:00
|
|
|
import time
|
|
|
|
import socket
|
|
|
|
import ssl
|
|
|
|
import hashlib
|
2016-05-01 01:54:11 +02:00
|
|
|
import inspect
|
2017-02-25 07:27:11 +01:00
|
|
|
import ipaddress
|
2017-04-01 02:41:56 +02:00
|
|
|
import queue
|
2017-07-07 23:33:00 +02:00
|
|
|
import functools
|
2017-08-07 04:21:55 +02:00
|
|
|
import string
|
2017-08-07 05:02:09 +02:00
|
|
|
import re
|
2018-03-24 08:10:00 +01:00
|
|
|
import collections
|
|
|
|
import collections.abc
|
2018-05-11 23:38:21 +02:00
|
|
|
import textwrap
|
2015-07-08 03:07:20 +02:00
|
|
|
|
2016-07-20 02:44:22 +02:00
|
|
|
try:
|
|
|
|
import ircmatch
|
|
|
|
except ImportError:
|
2016-12-10 02:15:53 +01:00
|
|
|
raise ImportError("PyLink requires ircmatch to function; please install it and try again.")
|
2016-07-07 08:11:36 +02:00
|
|
|
|
2018-03-17 19:01:32 +01:00
|
|
|
from . import world, utils, structures, conf, __version__, selectdriver
|
2016-06-21 03:18:54 +02:00
|
|
|
from .log import *
|
2017-08-22 07:20:20 +02:00
|
|
|
from .utils import ProtocolError # Compatibility with PyLink 1.x
|
2015-08-26 05:37:15 +02:00
|
|
|
|
|
|
|
### Internal classes (users, servers, channels)
|
|
|
|
|
2017-08-22 06:31:50 +02:00
|
|
|
class ChannelState(structures.IRCCaseInsensitiveDict):
|
2017-08-07 02:52:52 +02:00
|
|
|
"""
|
|
|
|
A dictionary storing channels case insensitively. Channel objects are initialized on access.
|
|
|
|
"""
|
|
|
|
def __getitem__(self, key):
|
|
|
|
key = self._keymangle(key)
|
|
|
|
|
|
|
|
if key not in self._data:
|
|
|
|
log.debug('(%s) ChannelState: creating new channel %s in memory', self._irc.name, key)
|
2017-08-25 22:53:45 +02:00
|
|
|
self._data[key] = newchan = Channel(self._irc, key)
|
2017-08-07 02:52:52 +02:00
|
|
|
return newchan
|
|
|
|
|
|
|
|
return self._data[key]
|
|
|
|
|
2018-03-24 08:10:00 +01:00
|
|
|
|
|
|
|
class User():
|
|
|
|
"""PyLink IRC user class."""
|
|
|
|
def __init__(self, irc, nick, ts, uid, server, ident='null', host='null',
|
|
|
|
realname='PyLink dummy client', realhost='null',
|
|
|
|
ip='0.0.0.0', manipulatable=False, opertype='IRC Operator'):
|
|
|
|
self._nick = nick
|
|
|
|
self.lower_nick = irc.to_lower(nick)
|
|
|
|
|
|
|
|
self.ts = ts
|
|
|
|
self.uid = uid
|
|
|
|
self.ident = ident
|
|
|
|
self.host = host
|
|
|
|
self.realhost = realhost
|
|
|
|
self.ip = ip
|
|
|
|
self.realname = realname
|
|
|
|
self.modes = set() # Tracks user modes
|
|
|
|
self.server = server
|
|
|
|
self._irc = irc
|
|
|
|
|
|
|
|
# Tracks PyLink identification status
|
|
|
|
self.account = ''
|
|
|
|
|
|
|
|
# Tracks oper type (for display only)
|
|
|
|
self.opertype = opertype
|
|
|
|
|
|
|
|
# Tracks external services identification status
|
|
|
|
self.services_account = ''
|
|
|
|
|
|
|
|
# Tracks channels the user is in
|
|
|
|
self.channels = structures.IRCCaseInsensitiveSet(self._irc)
|
|
|
|
|
|
|
|
# Tracks away message status
|
|
|
|
self.away = ''
|
|
|
|
|
|
|
|
# This sets whether the client should be marked as manipulatable.
|
|
|
|
# Plugins like bots.py's commands should take caution against
|
|
|
|
# manipulating these "protected" clients, to prevent desyncs and such.
|
|
|
|
# For "serious" service clients, this should always be False.
|
|
|
|
self.manipulatable = manipulatable
|
|
|
|
|
|
|
|
# Cloaked host for IRCds that use it
|
|
|
|
self.cloaked_host = None
|
|
|
|
|
|
|
|
# Stores service bot name if applicable
|
|
|
|
self.service = None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def nick(self):
|
|
|
|
return self._nick
|
|
|
|
|
|
|
|
@nick.setter
|
|
|
|
def nick(self, newnick):
|
|
|
|
oldnick = self.lower_nick
|
|
|
|
self._nick = newnick
|
|
|
|
self.lower_nick = self._irc.to_lower(newnick)
|
|
|
|
|
|
|
|
# Update the irc.users bynick index:
|
|
|
|
if oldnick in self._irc.users.bynick:
|
|
|
|
# Remove existing value -> key mappings.
|
|
|
|
self._irc.users.bynick[oldnick].remove(self.uid)
|
|
|
|
|
|
|
|
# Remove now-empty keys as well.
|
|
|
|
if not self._irc.users.bynick[oldnick]:
|
|
|
|
del self._irc.users.bynick[oldnick]
|
|
|
|
|
|
|
|
# Update the new nick.
|
|
|
|
self._irc.users.bynick.setdefault(self.lower_nick, []).append(self.uid)
|
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
return 'User(%s/%s)' % (self.uid, self.nick)
|
|
|
|
IrcUser = User
|
|
|
|
|
|
|
|
# Bidirectional dict based off https://stackoverflow.com/a/21894086
|
|
|
|
class UserMapping(collections.abc.MutableMapping, structures.CopyWrapper):
|
|
|
|
"""
|
|
|
|
A mapping storing User objects by UID, as well as UIDs by nick via
|
|
|
|
the 'bynick' attribute
|
|
|
|
"""
|
|
|
|
def __init__(self, *, data=None):
|
|
|
|
if data is not None:
|
|
|
|
assert isinstance(data, dict)
|
|
|
|
self._data = data
|
|
|
|
else:
|
|
|
|
self._data = {}
|
|
|
|
self.bynick = collections.defaultdict(list)
|
|
|
|
|
|
|
|
def __getitem__(self, key):
|
|
|
|
return self._data[key]
|
|
|
|
|
|
|
|
def __setitem__(self, key, userobj):
|
2018-03-30 19:46:49 +02:00
|
|
|
assert hasattr(userobj, 'lower_nick'), "Cannot add object without lower_nick attribute to UserMapping"
|
2018-03-24 08:10:00 +01:00
|
|
|
if key in self._data:
|
|
|
|
log.warning('(%s) Attempting to replace User object for %r: %r -> %r', self.name,
|
|
|
|
key, self._data.get(key), userobj)
|
|
|
|
|
|
|
|
self._data[key] = userobj
|
|
|
|
self.bynick.setdefault(userobj.lower_nick, []).append(key)
|
|
|
|
|
|
|
|
def __delitem__(self, key):
|
|
|
|
# Remove this entry from the bynick index
|
|
|
|
if self[key].lower_nick in self.bynick:
|
|
|
|
self.bynick[self[key].lower_nick].remove(key)
|
|
|
|
|
|
|
|
if not self.bynick[self[key].lower_nick]:
|
|
|
|
del self.bynick[self[key].lower_nick]
|
|
|
|
|
|
|
|
del self._data[key]
|
|
|
|
|
|
|
|
# Generic container methods. XXX: consider abstracting this out in structures?
|
|
|
|
def __repr__(self):
|
|
|
|
return "%s(%s)" % (self.__class__.__name__, self._data)
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return iter(self._data)
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self._data)
|
|
|
|
|
|
|
|
def __contains__(self, key):
|
|
|
|
return self._data.__contains__(key)
|
|
|
|
|
|
|
|
def __copy__(self):
|
|
|
|
return self.__class__(data=self._data.copy())
|
|
|
|
|
2018-03-17 23:49:48 +01:00
|
|
|
class PyLinkNetworkCore(structures.CamelCaseToSnakeCase):
|
2015-12-07 02:40:13 +01:00
|
|
|
"""Base IRC object for PyLink."""
|
|
|
|
|
2017-06-25 08:27:24 +02:00
|
|
|
def __init__(self, netname):
|
2017-02-25 07:28:26 +01:00
|
|
|
|
2016-01-31 08:33:03 +01:00
|
|
|
self.loghandlers = []
|
2016-04-02 21:10:56 +02:00
|
|
|
self.name = netname
|
2017-06-25 08:27:24 +02:00
|
|
|
self.conf = conf.conf
|
2016-07-29 07:22:47 +02:00
|
|
|
self.sid = None
|
2017-06-25 08:27:24 +02:00
|
|
|
self.serverdata = conf.conf['servers'][netname]
|
2017-07-14 14:50:07 +02:00
|
|
|
self.botdata = conf.conf['pylink']
|
2017-06-28 01:15:37 +02:00
|
|
|
self.protoname = self.__class__.__module__.split('.')[-1] # Remove leading pylinkirc.protocols.
|
2017-06-25 08:27:24 +02:00
|
|
|
self.proto = self.irc = self # Backwards compat
|
|
|
|
|
|
|
|
# Protocol stuff
|
|
|
|
self.casemapping = 'rfc1459'
|
|
|
|
self.hook_map = {}
|
|
|
|
|
|
|
|
# Lists required conf keys for the server block.
|
|
|
|
self.conf_keys = {'ip', 'port', 'hostname', 'sid', 'sidrange', 'protocol', 'sendpass',
|
|
|
|
'recvpass'}
|
|
|
|
|
|
|
|
# Defines a set of PyLink protocol capabilities
|
|
|
|
self.protocol_caps = set()
|
2017-06-02 16:30:20 +02:00
|
|
|
|
|
|
|
# These options depend on self.serverdata from above to be set.
|
|
|
|
self.encoding = None
|
2015-12-07 02:40:13 +01:00
|
|
|
|
|
|
|
self.connected = threading.Event()
|
2017-08-06 07:16:39 +02:00
|
|
|
self._aborted = threading.Event()
|
2018-04-08 06:46:05 +02:00
|
|
|
self._aborted_send = threading.Event()
|
2017-07-14 14:22:05 +02:00
|
|
|
self._reply_lock = threading.RLock()
|
2015-12-07 02:40:13 +01:00
|
|
|
|
2017-03-11 09:21:30 +01:00
|
|
|
# Sets the multiplier for autoconnect delay (grows with time).
|
|
|
|
self.autoconnect_active_multiplier = 1
|
|
|
|
|
2017-06-17 01:49:45 +02:00
|
|
|
self.was_successful = False
|
|
|
|
|
2017-07-14 14:22:05 +02:00
|
|
|
self._init_vars()
|
2016-03-25 22:54:29 +01:00
|
|
|
|
2017-06-03 08:17:14 +02:00
|
|
|
def log_setup(self):
|
2016-01-23 22:13:38 +01:00
|
|
|
"""
|
|
|
|
Initializes any channel loggers defined for the current network.
|
|
|
|
"""
|
|
|
|
try:
|
2017-03-05 09:00:11 +01:00
|
|
|
channels = conf.conf['logging']['channels'][self.name]
|
2017-09-09 04:04:49 +02:00
|
|
|
except (KeyError, TypeError): # Not set up; just ignore.
|
2016-01-23 22:13:38 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
log.debug('(%s) Setting up channel logging to channels %r', self.name,
|
|
|
|
channels)
|
2016-01-31 08:33:03 +01:00
|
|
|
|
2017-09-09 04:04:49 +02:00
|
|
|
# Only create handlers if they haven't already been set up.
|
2016-01-31 08:33:03 +01:00
|
|
|
if not self.loghandlers:
|
2017-09-09 04:04:49 +02:00
|
|
|
if not isinstance(channels, dict):
|
|
|
|
log.warning('(%s) Got invalid channel logging configuration %r; are your indentation '
|
|
|
|
'and block commenting consistent?', self.name, channels)
|
|
|
|
return
|
2016-01-31 08:33:03 +01:00
|
|
|
|
|
|
|
for channel, chandata in channels.items():
|
|
|
|
# Fetch the log level for this channel block.
|
|
|
|
level = None
|
2017-09-09 04:04:49 +02:00
|
|
|
if isinstance(chandata, dict):
|
2016-01-31 08:33:03 +01:00
|
|
|
level = chandata.get('loglevel')
|
2017-09-09 04:04:49 +02:00
|
|
|
else:
|
|
|
|
log.warning('(%s) Got invalid channel logging pair %r: %r; are your indentation '
|
|
|
|
'and block commenting consistent?', self.name, filename, config)
|
2016-01-31 08:33:03 +01:00
|
|
|
|
|
|
|
handler = PyLinkChannelLogger(self, channel, level=level)
|
|
|
|
self.loghandlers.append(handler)
|
|
|
|
log.addHandler(handler)
|
2016-01-23 22:13:38 +01:00
|
|
|
|
2017-07-14 14:22:05 +02:00
|
|
|
def _init_vars(self):
|
2015-12-07 02:40:13 +01:00
|
|
|
"""
|
|
|
|
(Re)sets an IRC object to its default state. This should be called when
|
|
|
|
an IRC object is first created, and on every reconnection to a network.
|
|
|
|
"""
|
2017-06-02 16:30:20 +02:00
|
|
|
self.encoding = self.serverdata.get('encoding') or 'utf-8'
|
2015-10-03 08:07:57 +02:00
|
|
|
|
2017-06-28 01:05:46 +02:00
|
|
|
# Tracks the main PyLink client's UID.
|
2015-08-26 05:37:15 +02:00
|
|
|
self.pseudoclient = None
|
2016-08-22 01:46:57 +02:00
|
|
|
|
2016-07-01 03:22:45 +02:00
|
|
|
# Internal variable to set the place and caller of the last command (in PM
|
2015-09-26 18:05:44 +02:00
|
|
|
# or in a channel), used by fantasy command support.
|
|
|
|
self.called_by = None
|
2016-07-01 03:22:45 +02:00
|
|
|
self.called_in = None
|
2015-09-26 18:05:44 +02:00
|
|
|
|
2015-12-07 02:40:13 +01:00
|
|
|
# Intialize the server, channel, and user indexes to be populated by
|
2017-08-25 11:11:48 +02:00
|
|
|
# our protocol module.
|
2016-07-29 06:49:16 +02:00
|
|
|
self.servers = {}
|
2018-03-24 08:10:00 +01:00
|
|
|
self.users = UserMapping()
|
2017-08-25 11:11:48 +02:00
|
|
|
|
|
|
|
# Two versions of the channels index exist in PyLink 2.0, and they are joined together
|
|
|
|
# - irc._channels which implicitly creates channels on access (mostly used
|
|
|
|
# in protocol modules)
|
|
|
|
# - irc.channels which does not (recommended for use by plugins)
|
|
|
|
self._channels = ChannelState(self)
|
|
|
|
self.channels = structures.IRCCaseInsensitiveDict(self, data=self._channels._data)
|
2015-12-07 02:40:13 +01:00
|
|
|
|
|
|
|
# This sets the list of supported channel and user modes: the default
|
|
|
|
# RFC1459 modes are implied. Named modes are used here to make
|
|
|
|
# protocol-independent code easier to write, as mode chars vary by
|
|
|
|
# IRCd.
|
|
|
|
# Protocol modules should add to and/or replace this with what their
|
|
|
|
# protocol supports. This can be a hardcoded list or something
|
|
|
|
# negotiated on connect, depending on the nature of their protocol.
|
2015-08-26 05:37:15 +02:00
|
|
|
self.cmodes = {'op': 'o', 'secret': 's', 'private': 'p',
|
|
|
|
'noextmsg': 'n', 'moderated': 'm', 'inviteonly': 'i',
|
|
|
|
'topiclock': 't', 'limit': 'l', 'ban': 'b',
|
|
|
|
'voice': 'v', 'key': 'k',
|
2015-12-07 02:40:13 +01:00
|
|
|
# This fills in the type of mode each mode character is.
|
|
|
|
# A-type modes are list modes (i.e. bans, ban exceptions, etc.),
|
|
|
|
# B-type modes require an argument to both set and unset,
|
|
|
|
# but there can only be one value at a time
|
|
|
|
# (i.e. cmode +k).
|
|
|
|
# C-type modes require an argument to set but not to unset
|
|
|
|
# (one sets "+l limit" and # "-l"),
|
|
|
|
# and D-type modes take no arguments at all.
|
2015-08-26 05:37:15 +02:00
|
|
|
'*A': 'b',
|
|
|
|
'*B': 'k',
|
|
|
|
'*C': 'l',
|
2017-07-30 18:43:19 +02:00
|
|
|
'*D': 'imnpst'}
|
2015-08-26 05:37:15 +02:00
|
|
|
self.umodes = {'invisible': 'i', 'snomask': 's', 'wallops': 'w',
|
|
|
|
'oper': 'o',
|
2016-08-12 03:11:41 +02:00
|
|
|
'*A': '', '*B': '', '*C': '', '*D': 'iosw'}
|
2015-08-26 05:37:15 +02:00
|
|
|
|
2017-08-06 06:52:34 +02:00
|
|
|
# Acting extbans such as +b m:n!u@h on InspIRCd
|
|
|
|
self.extbans_acting = {}
|
2017-08-24 06:18:44 +02:00
|
|
|
# Matching extbans such as R:account on InspIRCd and $a:account on TS6.
|
2017-08-24 06:47:30 +02:00
|
|
|
self.extbans_matching = {}
|
2017-08-06 06:52:34 +02:00
|
|
|
|
2015-08-26 05:37:15 +02:00
|
|
|
# This max nick length starts off as the config value, but may be
|
|
|
|
# overwritten later by the protocol module if such information is
|
2016-06-28 07:39:18 +02:00
|
|
|
# received. It defaults to 30.
|
|
|
|
self.maxnicklen = self.serverdata.get('maxnicklen', 30)
|
2015-12-07 02:40:13 +01:00
|
|
|
|
|
|
|
# Defines a list of supported prefix modes.
|
2015-08-26 05:37:15 +02:00
|
|
|
self.prefixmodes = {'o': '@', 'v': '+'}
|
|
|
|
|
2015-12-07 02:40:13 +01:00
|
|
|
# Defines the uplink SID (to be filled in by protocol module).
|
2015-08-26 05:37:15 +02:00
|
|
|
self.uplink = None
|
|
|
|
self.start_ts = int(time.time())
|
|
|
|
|
2016-01-23 22:13:38 +01:00
|
|
|
# Set up channel logging for the network
|
2017-06-03 08:17:14 +02:00
|
|
|
self.log_setup()
|
2016-01-23 22:13:38 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def __repr__(self):
|
2017-07-05 08:32:30 +02:00
|
|
|
return "<%s object for network %r>" % (self.__class__.__name__, self.name)
|
2016-07-29 06:34:00 +02:00
|
|
|
|
2017-08-07 06:49:41 +02:00
|
|
|
## Stubs
|
|
|
|
def validate_server_conf(self):
|
|
|
|
return
|
|
|
|
|
2017-08-07 06:49:52 +02:00
|
|
|
def connect(self):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def disconnect(self):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2017-08-07 06:49:41 +02:00
|
|
|
## General utility functions
|
2017-06-16 06:31:03 +02:00
|
|
|
def call_hooks(self, hook_args):
|
|
|
|
"""Calls a hook function with the given hook args."""
|
|
|
|
numeric, command, parsed_args = hook_args
|
|
|
|
# Always make sure TS is sent.
|
|
|
|
if 'ts' not in parsed_args:
|
|
|
|
parsed_args['ts'] = int(time.time())
|
|
|
|
hook_cmd = command
|
2017-06-25 10:45:16 +02:00
|
|
|
hook_map = self.hook_map
|
2016-07-29 06:34:00 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# If the hook name is present in the protocol module's hook_map, then we
|
|
|
|
# should set the hook name to the name that points to instead.
|
|
|
|
# For example, plugins will read SETHOST as CHGHOST, EOS (end of sync)
|
|
|
|
# as ENDBURST, etc.
|
|
|
|
if command in hook_map:
|
|
|
|
hook_cmd = hook_map[command]
|
2015-12-07 02:40:13 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# However, individual handlers can also return a 'parse_as' key to send
|
|
|
|
# their payload to a different hook. An example of this is "/join 0"
|
|
|
|
# being interpreted as leaving all channels (PART).
|
|
|
|
hook_cmd = parsed_args.get('parse_as') or hook_cmd
|
2015-12-07 02:40:13 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
log.debug('(%s) Raw hook data: [%r, %r, %r] received from %s handler '
|
|
|
|
'(calling hook %s)', self.name, numeric, hook_cmd, parsed_args,
|
|
|
|
command, hook_cmd)
|
2017-01-02 21:30:24 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# Iterate over registered hook functions, catching errors accordingly.
|
2017-09-23 22:39:43 +02:00
|
|
|
for hook_pair in world.hooks[hook_cmd].copy():
|
2017-09-03 06:15:59 +02:00
|
|
|
hook_func = hook_pair[1]
|
2017-06-16 06:31:03 +02:00
|
|
|
try:
|
|
|
|
log.debug('(%s) Calling hook function %s from plugin "%s"', self.name,
|
|
|
|
hook_func, hook_func.__module__)
|
2018-02-19 07:42:39 +01:00
|
|
|
retcode = hook_func(self, numeric, command, parsed_args)
|
|
|
|
|
|
|
|
if retcode is False:
|
|
|
|
log.debug('(%s) Stopping hook loop for %r (command=%r)', self.name,
|
|
|
|
hook_func, command)
|
|
|
|
break
|
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
except Exception:
|
|
|
|
# We don't want plugins to crash our servers...
|
|
|
|
log.exception('(%s) Unhandled exception caught in hook %r from plugin "%s"',
|
|
|
|
self.name, hook_func, hook_func.__module__)
|
|
|
|
log.error('(%s) The offending hook data was: %s', self.name,
|
|
|
|
hook_args)
|
|
|
|
continue
|
2015-12-07 02:40:13 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def call_command(self, source, text):
|
|
|
|
"""
|
|
|
|
Calls a PyLink bot command. source is the caller's UID, and text is the
|
|
|
|
full, unparsed text of the message.
|
|
|
|
"""
|
|
|
|
world.services['pylink'].call_cmd(self, source, text)
|
2016-06-11 20:29:11 +02:00
|
|
|
|
2018-05-11 23:38:21 +02:00
|
|
|
def msg(self, target, text, notice=None, source=None, loopback=True, wrap=True):
|
2017-06-16 06:31:03 +02:00
|
|
|
"""Handy function to send messages/notices to clients. Source
|
|
|
|
is optional, and defaults to the main PyLink client if not specified."""
|
|
|
|
if not text:
|
|
|
|
return
|
2016-07-29 07:49:05 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
if not (source or self.pseudoclient):
|
|
|
|
# No explicit source set and our main client wasn't available; abort.
|
|
|
|
return
|
|
|
|
source = source or self.pseudoclient.uid
|
2016-07-29 07:49:05 +02:00
|
|
|
|
2018-05-11 23:38:21 +02:00
|
|
|
def _msg(text):
|
|
|
|
if notice:
|
|
|
|
self.notice(source, target, text)
|
|
|
|
cmd = 'PYLINK_SELF_NOTICE'
|
|
|
|
else:
|
|
|
|
self.message(source, target, text)
|
|
|
|
cmd = 'PYLINK_SELF_PRIVMSG'
|
2016-07-29 07:49:05 +02:00
|
|
|
|
2018-05-11 23:38:21 +02:00
|
|
|
# Determines whether we should send a hook for this msg(), to forward things like services
|
2017-06-16 06:31:03 +02:00
|
|
|
# replies across relay.
|
2018-05-11 23:38:21 +02:00
|
|
|
if loopback:
|
|
|
|
self.call_hooks([source, cmd, {'target': target, 'text': text}])
|
|
|
|
|
|
|
|
# Optionally wrap the text output.
|
|
|
|
if wrap:
|
|
|
|
for line in self.wrap_message(source, target, text):
|
|
|
|
_msg(line)
|
|
|
|
else:
|
|
|
|
_msg(text)
|
2015-12-07 02:40:13 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def _reply(self, text, notice=None, source=None, private=None, force_privmsg_in_private=False,
|
2018-05-11 23:38:21 +02:00
|
|
|
loopback=True, wrap=True):
|
2017-06-16 06:31:03 +02:00
|
|
|
"""
|
|
|
|
Core of the reply() function - replies to the last caller in the right context
|
|
|
|
(channel or PM).
|
|
|
|
"""
|
|
|
|
if private is None:
|
|
|
|
# Allow using private replies as the default, if no explicit setting was given.
|
2017-07-14 14:50:07 +02:00
|
|
|
private = conf.conf['pylink'].get("prefer_private_replies")
|
2015-08-26 05:37:15 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# Private reply is enabled, or the caller was originally a PM
|
|
|
|
if private or (self.called_in in self.users):
|
|
|
|
if not force_privmsg_in_private:
|
|
|
|
# For private replies, the default is to override the notice=True/False argument,
|
|
|
|
# and send replies as notices regardless. This is standard behaviour for most
|
|
|
|
# IRC services, but can be disabled if force_privmsg_in_private is given.
|
|
|
|
notice = True
|
|
|
|
target = self.called_by
|
|
|
|
else:
|
|
|
|
target = self.called_in
|
2016-01-31 08:04:13 +01:00
|
|
|
|
2018-05-11 23:38:21 +02:00
|
|
|
self.msg(target, text, notice=notice, source=source, loopback=loopback, wrap=wrap)
|
2016-01-31 08:04:13 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def reply(self, *args, **kwargs):
|
|
|
|
"""
|
|
|
|
Replies to the last caller in the right context (channel or PM).
|
2016-01-31 08:04:13 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
This function wraps around _reply() and can be monkey-patched in a thread-safe manner
|
|
|
|
to temporarily redirect plugin output to another target.
|
|
|
|
"""
|
2017-07-14 14:22:05 +02:00
|
|
|
with self._reply_lock:
|
2017-06-16 06:31:03 +02:00
|
|
|
self._reply(*args, **kwargs)
|
2015-08-26 05:37:15 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def error(self, text, **kwargs):
|
|
|
|
"""Replies with an error to the last caller in the right context (channel or PM)."""
|
|
|
|
# This is a stub to alias error to reply
|
|
|
|
self.reply("Error: %s" % text, **kwargs)
|
2016-08-22 01:46:57 +02:00
|
|
|
|
2017-08-07 06:49:41 +02:00
|
|
|
## Configuration-based lookup functions.
|
2017-06-16 06:31:03 +02:00
|
|
|
def version(self):
|
|
|
|
"""
|
|
|
|
Returns a detailed version string including the PyLink daemon version,
|
|
|
|
the protocol module in use, and the server hostname.
|
|
|
|
"""
|
|
|
|
fullversion = 'PyLink-%s. %s :[protocol:%s, encoding:%s]' % (__version__, self.hostname(), self.protoname, self.encoding)
|
|
|
|
return fullversion
|
2016-08-22 01:46:57 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def hostname(self):
|
|
|
|
"""
|
|
|
|
Returns the server hostname used by PyLink on the given server.
|
|
|
|
"""
|
|
|
|
return self.serverdata.get('hostname', world.fallback_hostname)
|
2016-07-29 06:49:16 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def get_full_network_name(self):
|
|
|
|
"""
|
|
|
|
Returns the full network name (as defined by the "netname" option), or the
|
|
|
|
short network name if that isn't defined.
|
|
|
|
"""
|
|
|
|
return self.serverdata.get('netname', self.name)
|
2016-07-29 07:22:47 +02:00
|
|
|
|
2018-04-08 07:20:35 +02:00
|
|
|
def get_service_option(self, servicename, option, default=None, global_option=None):
|
|
|
|
"""
|
|
|
|
Returns the value of the requested service bot option on the current network, or the
|
|
|
|
global value if it is not set for this network. This function queries and returns:
|
|
|
|
|
|
|
|
1) If present, the value of the config option servers::<NETNAME>::<SERVICENAME>_<OPTION>
|
|
|
|
2) If present, the value of the config option <SERVICENAME>::<GLOBAL_OPTION>, where
|
|
|
|
<GLOBAL_OPTION> is either the 'global_option' keyword argument or <OPTION>.
|
|
|
|
3) The default value given in the 'keyword' argument.
|
|
|
|
|
|
|
|
While service bot and config option names can technically be uppercase or mixed case,
|
|
|
|
the convention is to define them in all lowercase characters.
|
|
|
|
"""
|
|
|
|
netopt = self.serverdata.get('%s_%s' % (servicename, option))
|
|
|
|
if netopt is not None:
|
|
|
|
return netopt
|
|
|
|
|
|
|
|
if global_option is not None:
|
|
|
|
option = global_option
|
|
|
|
globalopt = conf.conf.get(servicename, {}).get(option)
|
|
|
|
if globalopt is not None:
|
|
|
|
return globalopt
|
|
|
|
|
|
|
|
return default
|
2017-08-07 06:49:41 +02:00
|
|
|
|
|
|
|
def has_cap(self, capab):
|
|
|
|
"""
|
|
|
|
Returns whether this protocol module instance has the requested capability.
|
|
|
|
"""
|
|
|
|
return capab.lower() in self.protocol_caps
|
|
|
|
|
|
|
|
## Shared helper functions
|
2017-06-17 01:49:45 +02:00
|
|
|
def _pre_connect(self):
|
2018-01-21 22:20:42 +01:00
|
|
|
"""
|
|
|
|
Implements triggers called before a network connects.
|
|
|
|
"""
|
2018-04-08 06:46:05 +02:00
|
|
|
self._aborted_send.clear()
|
2017-08-06 07:16:39 +02:00
|
|
|
self._aborted.clear()
|
2017-07-14 14:22:05 +02:00
|
|
|
self._init_vars()
|
2017-06-17 01:49:45 +02:00
|
|
|
|
|
|
|
try:
|
2017-07-11 11:22:01 +02:00
|
|
|
self.validate_server_conf()
|
2017-07-13 07:39:28 +02:00
|
|
|
except Exception as e:
|
|
|
|
log.error("(%s) Configuration error: %s", self.name, e)
|
2017-06-17 01:49:45 +02:00
|
|
|
raise
|
|
|
|
|
|
|
|
def _run_autoconnect(self):
|
|
|
|
"""Blocks for the autoconnect time and returns True if autoconnect is enabled."""
|
2017-08-31 22:40:11 +02:00
|
|
|
if world.shutting_down.is_set():
|
|
|
|
log.debug('(%s) _run_autoconnect: aborting autoconnect attempt since we are shutting down.', self.name)
|
|
|
|
return
|
|
|
|
|
2017-06-17 01:49:45 +02:00
|
|
|
autoconnect = self.serverdata.get('autoconnect')
|
|
|
|
|
|
|
|
# Sets the autoconnect growth multiplier (e.g. a value of 2 multiplies the autoconnect
|
|
|
|
# time by 2 on every failure, etc.)
|
|
|
|
autoconnect_multiplier = self.serverdata.get('autoconnect_multiplier', 2)
|
|
|
|
autoconnect_max = self.serverdata.get('autoconnect_max', 1800)
|
|
|
|
# These values must at least be 1.
|
|
|
|
autoconnect_multiplier = max(autoconnect_multiplier, 1)
|
|
|
|
autoconnect_max = max(autoconnect_max, 1)
|
|
|
|
|
|
|
|
log.debug('(%s) _run_autoconnect: Autoconnect delay set to %s seconds.', self.name, autoconnect)
|
|
|
|
if autoconnect is not None and autoconnect >= 1:
|
|
|
|
log.debug('(%s) _run_autoconnect: Multiplying autoconnect delay %s by %s.', self.name, autoconnect, self.autoconnect_active_multiplier)
|
|
|
|
autoconnect *= self.autoconnect_active_multiplier
|
|
|
|
# Add a cap on the max. autoconnect delay, so that we don't go on forever...
|
|
|
|
autoconnect = min(autoconnect, autoconnect_max)
|
|
|
|
|
|
|
|
log.info('(%s) _run_autoconnect: Going to auto-reconnect in %s seconds.', self.name, autoconnect)
|
2017-08-06 07:16:39 +02:00
|
|
|
# Continue when either self._aborted is set or the autoconnect time passes.
|
2017-06-17 01:49:45 +02:00
|
|
|
# Compared to time.sleep(), this allows us to stop connections quicker if we
|
|
|
|
# break while while for autoconnect.
|
2017-08-06 07:16:39 +02:00
|
|
|
self._aborted.clear()
|
|
|
|
self._aborted.wait(autoconnect)
|
2017-06-17 01:49:45 +02:00
|
|
|
|
|
|
|
# Store in the local state what the autoconnect multiplier currently is.
|
|
|
|
self.autoconnect_active_multiplier *= autoconnect_multiplier
|
|
|
|
|
|
|
|
if self not in world.networkobjects.values():
|
|
|
|
log.debug('(%s) _run_autoconnect: Stopping stale connect loop', self.name)
|
|
|
|
return
|
|
|
|
return True
|
|
|
|
|
|
|
|
else:
|
2017-07-03 07:26:28 +02:00
|
|
|
log.debug('(%s) _run_autoconnect: Stopping connect loop (autoconnect value %r is < 1).', self.name, autoconnect)
|
2017-06-17 01:49:45 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
def _pre_disconnect(self):
|
2018-01-21 22:20:42 +01:00
|
|
|
"""
|
|
|
|
Implements triggers called before a network disconnects.
|
|
|
|
"""
|
2017-08-06 07:16:39 +02:00
|
|
|
self._aborted.set()
|
2017-06-17 01:49:45 +02:00
|
|
|
self.was_successful = self.connected.is_set()
|
|
|
|
log.debug('(%s) _pre_disconnect: got %s for was_successful state', self.name, self.was_successful)
|
|
|
|
|
|
|
|
log.debug('(%s) _pre_disconnect: Clearing self.connected state.', self.name)
|
|
|
|
self.connected.clear()
|
|
|
|
|
|
|
|
log.debug('(%s) _pre_disconnect: Removing channel logging handlers due to disconnect.', self.name)
|
|
|
|
while self.loghandlers:
|
|
|
|
log.removeHandler(self.loghandlers.pop())
|
|
|
|
|
|
|
|
def _post_disconnect(self):
|
2018-01-21 22:20:42 +01:00
|
|
|
"""
|
|
|
|
Implements triggers called after a network disconnects.
|
|
|
|
"""
|
2017-06-17 01:49:45 +02:00
|
|
|
# Internal hook signifying that a network has disconnected.
|
|
|
|
self.call_hooks([None, 'PYLINK_DISCONNECT', {'was_successful': self.was_successful}])
|
|
|
|
|
2018-03-23 06:03:08 +01:00
|
|
|
# Clear the to_lower cache.
|
|
|
|
self.to_lower.cache_clear()
|
|
|
|
|
2017-06-28 01:21:30 +02:00
|
|
|
def _remove_client(self, numeric):
|
2017-06-25 08:27:24 +02:00
|
|
|
"""Internal function to remove a client from our internal state."""
|
2017-06-25 11:03:12 +02:00
|
|
|
for c, v in self.channels.copy().items():
|
2017-07-01 06:49:12 +02:00
|
|
|
v.remove_user(numeric)
|
2017-06-25 08:27:24 +02:00
|
|
|
# Clear empty non-permanent channels.
|
2017-06-25 11:03:12 +02:00
|
|
|
if not (self.channels[c].users or ((self.cmodes.get('permanent'), None) in self.channels[c].modes)):
|
|
|
|
del self.channels[c]
|
2017-06-25 08:27:24 +02:00
|
|
|
|
2017-06-25 11:03:12 +02:00
|
|
|
sid = self.get_server(numeric)
|
2018-04-28 05:48:38 +02:00
|
|
|
log.debug('(%s) Removing client %s from users index', self.name, numeric)
|
2018-04-21 04:37:22 +02:00
|
|
|
try:
|
|
|
|
del self.users[numeric]
|
2018-04-28 05:48:38 +02:00
|
|
|
self.servers[sid].users.discard(numeric)
|
2018-04-21 04:37:22 +02:00
|
|
|
except KeyError:
|
|
|
|
log.warning('(%s) Failed to remove %r from users index - possible desync or timing issue? (stray QUIT after KILL)',
|
|
|
|
self.name, numeric, exc_info=True)
|
2017-06-25 08:27:24 +02:00
|
|
|
|
2017-08-07 06:49:41 +02:00
|
|
|
## State checking functions
|
2017-06-28 01:17:28 +02:00
|
|
|
def nick_to_uid(self, nick):
|
|
|
|
"""Looks up the UID of a user with the given nick, if one is present."""
|
|
|
|
nick = self.to_lower(nick)
|
2018-03-24 08:10:00 +01:00
|
|
|
|
|
|
|
uids = self.users.bynick.get(nick, [])
|
|
|
|
if len(uids) > 1:
|
2018-05-13 01:01:16 +02:00
|
|
|
log.warning('(%s) Multiple UIDs found for nick %r: %r; using the last one!', self.name, nick, uids)
|
2018-03-24 08:10:00 +01:00
|
|
|
try:
|
2018-05-13 01:01:16 +02:00
|
|
|
return uids[-1]
|
2018-03-24 08:10:00 +01:00
|
|
|
except IndexError:
|
|
|
|
return None
|
2017-06-28 01:17:28 +02:00
|
|
|
|
|
|
|
def is_internal_client(self, numeric):
|
|
|
|
"""
|
|
|
|
Returns whether the given client numeric (UID) is a PyLink client.
|
|
|
|
"""
|
|
|
|
sid = self.get_server(numeric)
|
|
|
|
if sid and self.servers[sid].internal:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def is_internal_server(self, sid):
|
|
|
|
"""Returns whether the given SID is an internal PyLink server."""
|
|
|
|
return (sid in self.servers and self.servers[sid].internal)
|
|
|
|
|
|
|
|
def get_server(self, numeric):
|
|
|
|
"""Finds the SID of the server a user is on."""
|
2017-08-11 22:13:50 +02:00
|
|
|
if numeric in self.servers: # We got a server already (lazy hack)
|
|
|
|
return numeric
|
|
|
|
|
2017-06-28 01:17:28 +02:00
|
|
|
userobj = self.users.get(numeric)
|
|
|
|
if userobj:
|
|
|
|
return userobj.server
|
|
|
|
|
|
|
|
def is_manipulatable_client(self, uid):
|
|
|
|
"""
|
|
|
|
Returns whether the given user is marked as an internal, manipulatable
|
|
|
|
client. Usually, automatically spawned services clients should have this
|
|
|
|
set True to prevent interactions with opers (like mode changes) from
|
|
|
|
causing desyncs.
|
|
|
|
"""
|
|
|
|
return self.is_internal_client(uid) and self.users[uid].manipulatable
|
|
|
|
|
|
|
|
def get_service_bot(self, uid):
|
|
|
|
"""
|
|
|
|
Checks whether the given UID is a registered service bot. If True,
|
|
|
|
returns the cooresponding ServiceBot object.
|
|
|
|
"""
|
|
|
|
userobj = self.users.get(uid)
|
|
|
|
if not userobj:
|
|
|
|
return False
|
|
|
|
|
2017-08-31 04:29:26 +02:00
|
|
|
# Look for the "service" attribute in the User object,sname = userobj.service
|
|
|
|
# Warn if the service name we fetched isn't a registered service.
|
|
|
|
sname = userobj.service
|
2017-08-31 04:48:46 +02:00
|
|
|
if sname is not None and sname not in world.services.keys():
|
2017-08-31 04:29:26 +02:00
|
|
|
log.warning("(%s) User %s / %s had a service bot record to a service that doesn't "
|
|
|
|
"exist (%s)!", self.name, uid, userobj.nick, sname)
|
|
|
|
return world.services.get(sname)
|
2017-06-25 08:27:24 +02:00
|
|
|
|
2017-08-26 02:05:53 +02:00
|
|
|
structures._BLACKLISTED_COPY_TYPES.append(PyLinkNetworkCore)
|
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
class PyLinkNetworkCoreWithUtils(PyLinkNetworkCore):
|
2017-07-07 23:33:00 +02:00
|
|
|
|
2017-06-28 01:12:45 +02:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super().__init__(*args, **kwargs)
|
|
|
|
# Lock for updateTS to make sure only one thread can change the channel TS at one time.
|
|
|
|
self._ts_lock = threading.Lock()
|
|
|
|
|
2018-03-23 06:03:08 +01:00
|
|
|
@functools.lru_cache(maxsize=8192)
|
|
|
|
def to_lower(self, text):
|
2018-02-24 20:18:18 +01:00
|
|
|
if not text:
|
|
|
|
return text
|
2018-03-23 06:03:08 +01:00
|
|
|
if self.casemapping == 'rfc1459':
|
2017-06-16 06:31:03 +02:00
|
|
|
text = text.replace('{', '[')
|
|
|
|
text = text.replace('}', ']')
|
|
|
|
text = text.replace('|', '\\')
|
|
|
|
text = text.replace('~', '^')
|
|
|
|
# Encode the text as bytes first, and then lowercase it so that only ASCII characters are
|
2017-07-07 23:33:00 +02:00
|
|
|
# changed. Unicode in channel names, etc. *is* case sensitive!
|
2017-06-16 06:31:03 +02:00
|
|
|
return text.encode().lower().decode()
|
2016-07-29 06:49:16 +02:00
|
|
|
|
2017-08-29 05:01:28 +02:00
|
|
|
_NICK_REGEX = r'^[A-Za-z\|\\_\[\]\{\}\^\`][A-Z0-9a-z\-\|\\_\[\]\{\}\^\`]*$'
|
|
|
|
@classmethod
|
|
|
|
def is_nick(cls, s, nicklen=None):
|
|
|
|
"""Returns whether the string given is a valid IRC nick."""
|
|
|
|
|
|
|
|
if nicklen and len(s) > nicklen:
|
|
|
|
return False
|
|
|
|
return bool(re.match(cls._NICK_REGEX, s))
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def is_channel(s):
|
|
|
|
"""Returns whether the string given is a valid IRC channel name."""
|
|
|
|
return str(s).startswith('#')
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _isASCII(s):
|
|
|
|
"""Returns whether the given string only contains non-whitespace ASCII characters."""
|
|
|
|
chars = string.ascii_letters + string.digits + string.punctuation
|
|
|
|
return all(char in chars for char in s)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def is_server_name(cls, s):
|
|
|
|
"""Returns whether the string given is a valid IRC server name."""
|
|
|
|
return cls._isASCII(s) and '.' in s and not s.startswith('.')
|
|
|
|
|
|
|
|
_HOSTMASK_RE = re.compile(r'^\S+!\S+@\S+$')
|
|
|
|
@classmethod
|
|
|
|
def is_hostmask(cls, text):
|
|
|
|
"""Returns whether the given text is a valid IRC hostmask (nick!user@host)."""
|
|
|
|
# Band-aid patch here to prevent bad bans set by Janus forwarding people into invalid channels.
|
|
|
|
return bool(cls._HOSTMASK_RE.match(text) and '#' not in text)
|
|
|
|
|
2018-03-03 05:56:59 +01:00
|
|
|
def _parse_modes(self, args, existing, supported_modes, is_channel=False, prefixmodes=None):
|
2017-06-16 06:31:03 +02:00
|
|
|
"""
|
2018-02-11 02:28:04 +01:00
|
|
|
parse_modes() core.
|
2015-12-07 02:40:13 +01:00
|
|
|
|
2018-02-11 02:28:04 +01:00
|
|
|
args: A mode string or a mode string split by space (type list)
|
|
|
|
existing: A set or iterable of existing modes
|
|
|
|
supported_modes: a dict of PyLink supported modes (mode names mapping
|
|
|
|
to mode chars, with *ABCD keys)
|
|
|
|
prefixmodes: a dict of prefix modes (irc.prefixmodes style)
|
|
|
|
"""
|
|
|
|
prefix = ''
|
2017-07-12 23:29:34 +02:00
|
|
|
if isinstance(args, str):
|
2017-06-16 06:31:03 +02:00
|
|
|
# If the modestring was given as a string, split it into a list.
|
|
|
|
args = args.split()
|
2017-03-11 09:21:30 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
assert args, 'No valid modes were supplied!'
|
|
|
|
modestring = args[0]
|
|
|
|
args = args[1:]
|
2017-03-11 09:21:30 +01:00
|
|
|
|
2018-02-11 02:28:04 +01:00
|
|
|
existing = set(existing)
|
2016-11-03 06:34:02 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
res = []
|
|
|
|
for mode in modestring:
|
|
|
|
if mode in '+-':
|
|
|
|
prefix = mode
|
|
|
|
else:
|
|
|
|
if not prefix:
|
|
|
|
prefix = '+'
|
|
|
|
arg = None
|
|
|
|
log.debug('Current mode: %s%s; args left: %s', prefix, mode, args)
|
|
|
|
try:
|
2018-03-03 06:07:47 +01:00
|
|
|
if prefixmodes and mode in self.prefixmodes:
|
2017-06-16 06:31:03 +02:00
|
|
|
# We're setting a prefix mode on someone (e.g. +o user1)
|
|
|
|
log.debug('Mode %s: This mode is a prefix mode.', mode)
|
|
|
|
arg = args.pop(0)
|
2017-11-04 07:40:11 +01:00
|
|
|
# Convert nicks to UIDs implicitly
|
2017-06-16 06:31:03 +02:00
|
|
|
arg = self.nick_to_uid(arg) or arg
|
|
|
|
if arg not in self.users: # Target doesn't exist, skip it.
|
|
|
|
log.debug('(%s) Skipping setting mode "%s %s"; the '
|
|
|
|
'target doesn\'t seem to exist!', self.name,
|
|
|
|
mode, arg)
|
|
|
|
continue
|
|
|
|
elif mode in (supported_modes['*A'] + supported_modes['*B']):
|
|
|
|
# Must have parameter.
|
|
|
|
log.debug('Mode %s: This mode must have parameter.', mode)
|
|
|
|
arg = args.pop(0)
|
|
|
|
if prefix == '-':
|
|
|
|
if mode in supported_modes['*B'] and arg == '*':
|
|
|
|
# Charybdis allows unsetting +k without actually
|
|
|
|
# knowing the key by faking the argument when unsetting
|
|
|
|
# as a single "*".
|
|
|
|
# We'd need to know the real argument of +k for us to
|
|
|
|
# be able to unset the mode.
|
2018-02-11 02:28:04 +01:00
|
|
|
oldarg = dict(existing).get(mode)
|
2017-06-16 06:31:03 +02:00
|
|
|
if oldarg:
|
|
|
|
# Set the arg to the old one on the channel.
|
|
|
|
arg = oldarg
|
|
|
|
log.debug("Mode %s: coersing argument of '*' to %r.", mode, arg)
|
2017-03-11 09:21:30 +01:00
|
|
|
|
2018-02-11 02:28:04 +01:00
|
|
|
log.debug('(%s) parse_modes: checking if +%s %s is in old modes list: %s', self.name, mode, arg, existing)
|
2016-11-03 06:34:02 +01:00
|
|
|
|
2018-02-11 02:28:04 +01:00
|
|
|
if (mode, arg) not in existing:
|
2017-06-16 06:31:03 +02:00
|
|
|
# Ignore attempts to unset bans that don't exist.
|
|
|
|
log.debug("(%s) parse_modes(): ignoring removal of non-existent list mode +%s %s", self.name, mode, arg)
|
|
|
|
continue
|
2015-08-26 05:37:15 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
elif prefix == '+' and mode in supported_modes['*C']:
|
|
|
|
# Only has parameter when setting.
|
|
|
|
log.debug('Mode %s: Only has parameter when setting.', mode)
|
|
|
|
arg = args.pop(0)
|
|
|
|
except IndexError:
|
2018-02-11 02:28:04 +01:00
|
|
|
log.warning('(%s) Error while parsing mode %r: mode requires an '
|
2017-06-16 06:31:03 +02:00
|
|
|
'argument but none was found. (modestring: %r)',
|
2018-02-11 02:28:04 +01:00
|
|
|
self.name, mode, modestring)
|
2017-06-16 06:31:03 +02:00
|
|
|
continue # Skip this mode; don't error out completely.
|
2018-03-03 05:57:16 +01:00
|
|
|
newmode = (prefix + mode, arg)
|
|
|
|
res.append(newmode)
|
|
|
|
|
|
|
|
# Tentatively apply the new mode to the "existing" mode list.
|
|
|
|
existing = self._apply_modes(existing, [newmode], is_channel=is_channel)
|
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
return res
|
2016-01-10 04:38:27 +01:00
|
|
|
|
2018-02-11 02:28:04 +01:00
|
|
|
def parse_modes(self, target, args):
|
|
|
|
"""Parses a modestring list into a list of (mode, argument) tuples.
|
|
|
|
['+mitl-o', '3', 'person'] => [('+m', None), ('+i', None), ('+t', None), ('+l', '3'), ('-o', 'person')]
|
|
|
|
"""
|
|
|
|
# http://www.irc.org/tech_docs/005.html
|
|
|
|
# A = Mode that adds or removes a nick or address to a list. Always has a parameter.
|
|
|
|
# B = Mode that changes a setting and always has a parameter.
|
|
|
|
# C = Mode that changes a setting and only has a parameter when set.
|
|
|
|
# D = Mode that changes a setting and never has a parameter.
|
|
|
|
|
2018-03-03 05:56:59 +01:00
|
|
|
is_channel = self.is_channel(target)
|
|
|
|
if not is_channel:
|
2018-02-11 02:28:04 +01:00
|
|
|
log.debug('(%s) Using self.umodes for this query: %s', self.name, self.umodes)
|
|
|
|
|
|
|
|
if target not in self.users:
|
|
|
|
log.debug('(%s) Possible desync! Mode target %s is not in the users index.', self.name, target)
|
|
|
|
return [] # Return an empty mode list
|
|
|
|
|
|
|
|
supported_modes = self.umodes
|
|
|
|
oldmodes = self.users[target].modes
|
2018-03-03 06:07:47 +01:00
|
|
|
prefixmodes = None
|
2018-02-11 02:28:04 +01:00
|
|
|
else:
|
|
|
|
log.debug('(%s) Using self.cmodes for this query: %s', self.name, self.cmodes)
|
|
|
|
|
|
|
|
supported_modes = self.cmodes
|
|
|
|
oldmodes = self._channels[target].modes
|
2018-03-03 06:07:47 +01:00
|
|
|
prefixmodes = self._channels[target].prefixmodes
|
2018-02-11 02:28:04 +01:00
|
|
|
|
2018-03-03 05:56:59 +01:00
|
|
|
return self._parse_modes(args, oldmodes, supported_modes, is_channel=is_channel,
|
|
|
|
prefixmodes=prefixmodes)
|
2018-02-11 02:28:04 +01:00
|
|
|
|
2018-03-03 05:43:05 +01:00
|
|
|
def _apply_modes(self, old_modelist, changedmodes, is_channel=False,
|
|
|
|
prefixmodes=None):
|
|
|
|
"""
|
|
|
|
Takes a list of parsed IRC modes, and applies them onto the given target mode list.
|
|
|
|
"""
|
|
|
|
modelist = set(old_modelist)
|
2016-01-23 22:13:38 +01:00
|
|
|
|
2018-03-03 05:43:05 +01:00
|
|
|
if is_channel:
|
|
|
|
supported_modes = self.cmodes
|
|
|
|
else:
|
|
|
|
supported_modes = self.umodes
|
2016-03-26 01:03:25 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
for mode in changedmodes:
|
|
|
|
# Chop off the +/- part that parse_modes gives; it's meaningless for a mode list.
|
|
|
|
try:
|
|
|
|
real_mode = (mode[0][1], mode[1])
|
|
|
|
except IndexError:
|
|
|
|
real_mode = mode
|
2015-08-26 05:37:15 +02:00
|
|
|
|
2018-03-03 05:47:21 +01:00
|
|
|
if is_channel:
|
|
|
|
if prefixmodes is not None:
|
|
|
|
# We only handle +qaohv for now. Iterate over every supported mode:
|
|
|
|
# if the IRCd supports this mode and it is the one being set, add/remove
|
|
|
|
# the person from the corresponding prefix mode list (e.g. c.prefixmodes['op']
|
|
|
|
# for ops).
|
|
|
|
for pmode, pmodelist in prefixmodes.items():
|
|
|
|
if pmode in supported_modes and real_mode[0] == supported_modes[pmode]:
|
2018-03-03 06:07:47 +01:00
|
|
|
log.debug('(%s) Initial prefixmodes list (%s): %s', self.name, pmode, pmodelist)
|
2018-03-03 05:47:21 +01:00
|
|
|
if mode[0][0] == '+':
|
|
|
|
pmodelist.add(mode[1])
|
|
|
|
else:
|
|
|
|
pmodelist.discard(mode[1])
|
|
|
|
|
2018-03-03 06:07:47 +01:00
|
|
|
log.debug('(%s) Final prefixmodes list (%s): %s', self.name, pmode, pmodelist)
|
2015-09-03 07:05:47 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
if real_mode[0] in self.prefixmodes:
|
2017-07-01 06:49:12 +02:00
|
|
|
# Don't add prefix modes to Channel.modes; they belong in the
|
2017-06-16 06:31:03 +02:00
|
|
|
# prefixmodes mapping handled above.
|
2017-07-01 06:49:12 +02:00
|
|
|
log.debug('(%s) Not adding mode %s to Channel.modes because '
|
2017-06-16 06:31:03 +02:00
|
|
|
'it\'s a prefix mode.', self.name, str(mode))
|
|
|
|
continue
|
2016-06-15 19:55:47 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
if mode[0][0] != '-':
|
2018-03-03 05:43:05 +01:00
|
|
|
log.debug('(%s) Adding mode %r on %s', self.name, real_mode, modelist)
|
2017-06-16 06:31:03 +02:00
|
|
|
# We're adding a mode
|
|
|
|
existing = [m for m in modelist if m[0] == real_mode[0] and m[1] != real_mode[1]]
|
2018-03-03 05:43:05 +01:00
|
|
|
if existing and real_mode[1] and real_mode[0] not in supported_modes['*A']:
|
2017-06-16 06:31:03 +02:00
|
|
|
# The mode we're setting takes a parameter, but is not a list mode (like +beI).
|
|
|
|
# Therefore, only one version of it can exist at a time, and we must remove
|
|
|
|
# any old modepairs using the same letter. Otherwise, we'll get duplicates when,
|
|
|
|
# for example, someone sets mode "+l 30" on a channel already set "+l 25".
|
2018-03-03 05:43:05 +01:00
|
|
|
log.debug('(%s) Old modes for mode %r exist in %s, removing them: %s',
|
|
|
|
self.name, real_mode, modelist, str(existing))
|
2017-06-16 06:31:03 +02:00
|
|
|
[modelist.discard(oldmode) for oldmode in existing]
|
|
|
|
modelist.add(real_mode)
|
|
|
|
else:
|
2018-03-03 05:43:05 +01:00
|
|
|
log.debug('(%s) Removing mode %r from %s', self.name, real_mode, modelist)
|
2017-06-16 06:31:03 +02:00
|
|
|
# We're removing a mode
|
|
|
|
if real_mode[1] is None:
|
|
|
|
# We're removing a mode that only takes arguments when setting.
|
|
|
|
# Remove all mode entries that use the same letter as the one
|
|
|
|
# we're unsetting.
|
|
|
|
for oldmode in modelist.copy():
|
|
|
|
if oldmode[0] == real_mode[0]:
|
|
|
|
modelist.discard(oldmode)
|
|
|
|
else:
|
|
|
|
modelist.discard(real_mode)
|
|
|
|
log.debug('(%s) Final modelist: %s', self.name, modelist)
|
2018-03-03 05:43:05 +01:00
|
|
|
return modelist
|
|
|
|
|
|
|
|
def apply_modes(self, target, changedmodes):
|
|
|
|
"""Takes a list of parsed IRC modes, and applies them on the given target.
|
|
|
|
|
|
|
|
The target can be either a channel or a user; this is handled automatically."""
|
|
|
|
is_channel = self.is_channel(target)
|
|
|
|
|
|
|
|
prefixmodes = None
|
2017-06-16 06:31:03 +02:00
|
|
|
try:
|
2018-03-03 05:43:05 +01:00
|
|
|
if is_channel:
|
|
|
|
c = self._channels[target]
|
|
|
|
old_modelist = c.modes
|
|
|
|
prefixmodes = c.prefixmodes
|
2017-06-16 06:31:03 +02:00
|
|
|
else:
|
2018-03-03 05:43:05 +01:00
|
|
|
old_modelist = self.users[target].modes
|
|
|
|
except KeyError:
|
|
|
|
log.warning('(%s) Possible desync? Mode target %s is unknown.', self.name, target)
|
|
|
|
return
|
|
|
|
|
|
|
|
modelist = self._apply_modes(old_modelist, changedmodes, is_channel=is_channel,
|
|
|
|
prefixmodes=prefixmodes)
|
|
|
|
|
|
|
|
try:
|
|
|
|
if is_channel:
|
2017-08-25 11:11:48 +02:00
|
|
|
self._channels[target].modes = modelist
|
2018-03-03 05:43:05 +01:00
|
|
|
else:
|
|
|
|
self.users[target].modes = modelist
|
2017-06-16 06:31:03 +02:00
|
|
|
except KeyError:
|
2018-03-03 05:43:05 +01:00
|
|
|
log.warning("(%s) Invalid MODE target %s (is_channel=%s)", self.name, target, is_channel)
|
2017-02-26 07:06:43 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
@staticmethod
|
|
|
|
def _flip(mode):
|
|
|
|
"""Flips a mode character."""
|
2017-11-04 07:40:11 +01:00
|
|
|
# Make it a list first; strings don't support item assignment
|
2017-06-16 06:31:03 +02:00
|
|
|
mode = list(mode)
|
|
|
|
if mode[0] == '-': # Query is something like "-n"
|
|
|
|
mode[0] = '+' # Change it to "+n"
|
|
|
|
elif mode[0] == '+':
|
|
|
|
mode[0] = '-'
|
|
|
|
else: # No prefix given, assume +
|
|
|
|
mode.insert(0, '-')
|
|
|
|
return ''.join(mode)
|
2016-06-15 19:55:47 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def reverse_modes(self, target, modes, oldobj=None):
|
2017-11-04 07:40:11 +01:00
|
|
|
"""Reverses/inverts the mode string or mode list given.
|
2016-01-10 05:24:46 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
Optionally, an oldobj argument can be given to look at an earlier state of
|
|
|
|
a channel/user object, e.g. for checking the op status of a mode setter
|
|
|
|
before their modes are processed and added to the channel state.
|
2016-01-10 05:24:46 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
This function allows both mode strings or mode lists. Example uses:
|
|
|
|
"+mi-lk test => "-mi+lk test"
|
|
|
|
"mi-k test => "-mi+k test"
|
|
|
|
[('+m', None), ('+r', None), ('+l', '3'), ('-o', 'person')
|
|
|
|
=> {('-m', None), ('-r', None), ('-l', None), ('+o', 'person')})
|
|
|
|
{('s', None), ('+o', 'whoever') => {('-s', None), ('-o', 'whoever')})
|
|
|
|
"""
|
2017-07-12 23:29:34 +02:00
|
|
|
origstring = isinstance(modes, str)
|
2017-07-14 14:50:07 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# If the query is a string, we have to parse it first.
|
2017-07-12 23:29:34 +02:00
|
|
|
if origstring:
|
2017-06-16 06:31:03 +02:00
|
|
|
modes = self.parse_modes(target, modes.split(" "))
|
|
|
|
# Get the current mode list first.
|
2017-08-29 05:13:25 +02:00
|
|
|
if self.is_channel(target):
|
2017-08-25 11:11:48 +02:00
|
|
|
c = oldobj or self._channels[target]
|
2017-06-16 06:31:03 +02:00
|
|
|
oldmodes = c.modes.copy()
|
|
|
|
possible_modes = self.cmodes.copy()
|
|
|
|
# For channels, this also includes the list of prefix modes.
|
|
|
|
possible_modes['*A'] += ''.join(self.prefixmodes)
|
|
|
|
for name, userlist in c.prefixmodes.items():
|
|
|
|
try:
|
|
|
|
oldmodes.update([(self.cmodes[name], u) for u in userlist])
|
|
|
|
except KeyError:
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
oldmodes = self.users[target].modes
|
|
|
|
possible_modes = self.umodes
|
|
|
|
newmodes = []
|
|
|
|
log.debug('(%s) reverse_modes: old/current mode list for %s is: %s', self.name,
|
|
|
|
target, oldmodes)
|
|
|
|
for char, arg in modes:
|
|
|
|
# Mode types:
|
|
|
|
# A = Mode that adds or removes a nick or address to a list. Always has a parameter.
|
|
|
|
# B = Mode that changes a setting and always has a parameter.
|
|
|
|
# C = Mode that changes a setting and only has a parameter when set.
|
|
|
|
# D = Mode that changes a setting and never has a parameter.
|
|
|
|
mchar = char[-1]
|
|
|
|
if mchar in possible_modes['*B'] + possible_modes['*C']:
|
2017-11-04 07:40:11 +01:00
|
|
|
# We need to look at the current mode list to reset modes that take arguments
|
|
|
|
# For example, trying to bounce +l 30 on a channel that had +l 50 set should
|
|
|
|
# give "+l 50" and not "-l".
|
2017-06-16 06:31:03 +02:00
|
|
|
oldarg = [m for m in oldmodes if m[0] == mchar]
|
|
|
|
if oldarg: # Old mode argument for this mode existed, use that.
|
|
|
|
oldarg = oldarg[0]
|
|
|
|
mpair = ('+%s' % oldarg[0], oldarg[1])
|
|
|
|
else: # Not found, flip the mode then.
|
|
|
|
# Mode takes no arguments when unsetting.
|
|
|
|
if mchar in possible_modes['*C'] and char[0] != '-':
|
|
|
|
arg = None
|
|
|
|
mpair = (self._flip(char), arg)
|
|
|
|
else:
|
|
|
|
mpair = (self._flip(char), arg)
|
|
|
|
if char[0] != '-' and (mchar, arg) in oldmodes:
|
|
|
|
# Mode is already set.
|
|
|
|
log.debug("(%s) reverse_modes: skipping reversing '%s %s' with %s since we're "
|
|
|
|
"setting a mode that's already set.", self.name, char, arg, mpair)
|
|
|
|
continue
|
|
|
|
elif char[0] == '-' and (mchar, arg) not in oldmodes and mchar in possible_modes['*A']:
|
2017-11-04 07:40:11 +01:00
|
|
|
# We're unsetting a prefix mode that was never set - don't set it in response!
|
|
|
|
# TS6 IRCds lacks server-side verification for this and can cause annoying mode floods.
|
2017-06-16 06:31:03 +02:00
|
|
|
log.debug("(%s) reverse_modes: skipping reversing '%s %s' with %s since it "
|
|
|
|
"wasn't previously set.", self.name, char, arg, mpair)
|
|
|
|
continue
|
|
|
|
newmodes.append(mpair)
|
2017-05-27 10:27:09 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
log.debug('(%s) reverse_modes: new modes: %s', self.name, newmodes)
|
2017-07-12 23:29:34 +02:00
|
|
|
if origstring:
|
2017-06-16 06:31:03 +02:00
|
|
|
# If the original query is a string, send it back as a string.
|
|
|
|
return self.join_modes(newmodes)
|
|
|
|
else:
|
|
|
|
return set(newmodes)
|
2015-09-19 19:31:43 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
@staticmethod
|
|
|
|
def join_modes(modes, sort=False):
|
|
|
|
"""Takes a list of (mode, arg) tuples in parse_modes() format, and
|
|
|
|
joins them into a string.
|
2015-08-26 05:37:15 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
See testJoinModes in tests/test_utils.py for some examples."""
|
|
|
|
prefix = '+' # Assume we're adding modes unless told otherwise
|
|
|
|
modelist = ''
|
|
|
|
args = []
|
2015-12-22 19:46:34 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# Sort modes alphabetically like a conventional IRCd.
|
|
|
|
if sort:
|
|
|
|
modes = sorted(modes)
|
2015-12-27 01:43:40 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
for modepair in modes:
|
|
|
|
mode, arg = modepair
|
|
|
|
assert len(mode) in (1, 2), "Incorrect length of a mode (received %r)" % mode
|
|
|
|
try:
|
|
|
|
# If the mode has a prefix, use that.
|
|
|
|
curr_prefix, mode = mode
|
|
|
|
except ValueError:
|
2017-11-04 07:40:11 +01:00
|
|
|
# If not, the current prefix stays the same as the last mode pair; move on
|
|
|
|
# to the next one.
|
2017-06-16 06:31:03 +02:00
|
|
|
pass
|
|
|
|
else:
|
2017-11-04 07:40:11 +01:00
|
|
|
# Only when the prefix of this mode isn't the same as the last one do we add
|
|
|
|
# the prefix to the mode string. This prevents '+nt-lk' from turning
|
2017-06-16 06:31:03 +02:00
|
|
|
# into '+n+t-l-k' or '+ntlk'.
|
|
|
|
if prefix != curr_prefix:
|
|
|
|
modelist += curr_prefix
|
|
|
|
prefix = curr_prefix
|
|
|
|
modelist += mode
|
|
|
|
if arg is not None:
|
|
|
|
args.append(arg)
|
|
|
|
if not modelist.startswith(('+', '-')):
|
|
|
|
# Our starting mode didn't have a prefix with it. Assume '+'.
|
|
|
|
modelist = '+' + modelist
|
|
|
|
if args:
|
|
|
|
# Add the args if there are any.
|
|
|
|
modelist += ' %s' % ' '.join(args)
|
|
|
|
return modelist
|
2015-12-27 01:43:40 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
@classmethod
|
|
|
|
def wrap_modes(cls, modes, limit, max_modes_per_msg=0):
|
|
|
|
"""
|
|
|
|
Takes a list of modes and wraps it across multiple lines.
|
|
|
|
"""
|
|
|
|
strings = []
|
2015-12-27 01:43:40 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# This process is slightly trickier than just wrapping arguments, because modes create
|
|
|
|
# positional arguments that can't be separated from its character.
|
|
|
|
queued_modes = []
|
|
|
|
total_length = 0
|
2015-12-27 01:43:40 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
last_prefix = '+'
|
|
|
|
orig_modes = modes.copy()
|
|
|
|
modes = list(modes)
|
|
|
|
while modes:
|
|
|
|
# PyLink mode lists come in the form [('+t', None), ('-b', '*!*@someone'), ('+l', 3)]
|
2017-11-04 07:40:11 +01:00
|
|
|
# The +/- part is optional and is treated as the prefix of the last mode if not given,
|
|
|
|
# or + (adding modes) if it is the first mode in the list.
|
2017-06-16 06:31:03 +02:00
|
|
|
next_mode = modes.pop(0)
|
2015-08-26 05:37:15 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
modechar, arg = next_mode
|
|
|
|
prefix = modechar[0]
|
|
|
|
if prefix not in '+-':
|
|
|
|
prefix = last_prefix
|
|
|
|
# Explicitly add the prefix to the mode character to prevent
|
2017-11-04 07:40:11 +01:00
|
|
|
# ambiguity when passing it to e.g. join_modes().
|
2017-06-16 06:31:03 +02:00
|
|
|
modechar = prefix + modechar
|
2017-11-04 07:40:11 +01:00
|
|
|
# XXX: because tuples are immutable, we have to replace the entire modepair...
|
2017-06-16 06:31:03 +02:00
|
|
|
next_mode = (modechar, arg)
|
2017-05-27 11:21:12 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# Figure out the length that the next mode will add to the buffer. If we're changing
|
2017-11-04 07:40:11 +01:00
|
|
|
# from + to - (setting to removing modes) or vice versa, we'll need two characters:
|
|
|
|
# the "+" or "-" as well as the actual mode char.
|
2017-06-16 06:31:03 +02:00
|
|
|
next_length = 1
|
|
|
|
if prefix != last_prefix:
|
|
|
|
next_length += 1
|
2016-08-22 01:46:57 +02:00
|
|
|
|
2017-11-04 07:40:11 +01:00
|
|
|
# Replace the last mode prefix with the current one for the next iteration.
|
2017-06-16 06:31:03 +02:00
|
|
|
last_prefix = prefix
|
2015-08-26 05:37:15 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
if arg:
|
|
|
|
# This mode has an argument, so add the length of that and a space.
|
|
|
|
next_length += 1
|
|
|
|
next_length += len(arg)
|
|
|
|
|
|
|
|
assert next_length <= limit, \
|
|
|
|
"wrap_modes: Mode %s is too long for the given length %s" % (next_mode, limit)
|
|
|
|
|
|
|
|
# Check both message length and max. modes per msg if enabled.
|
|
|
|
if (next_length + total_length) <= limit and ((not max_modes_per_msg) or len(queued_modes) < max_modes_per_msg):
|
|
|
|
# We can fit this mode in the next message; add it.
|
|
|
|
total_length += next_length
|
|
|
|
log.debug('wrap_modes: Adding mode %s to queued modes', str(next_mode))
|
|
|
|
queued_modes.append(next_mode)
|
|
|
|
log.debug('wrap_modes: queued modes: %s', queued_modes)
|
|
|
|
else:
|
2017-11-04 07:40:11 +01:00
|
|
|
# Otherwise, create a new message by joining the previous queued modes into a message.
|
|
|
|
# Then, create a new message with our current mode.
|
2017-06-16 06:31:03 +02:00
|
|
|
strings.append(cls.join_modes(queued_modes))
|
|
|
|
queued_modes.clear()
|
|
|
|
|
|
|
|
log.debug('wrap_modes: cleared queue (length %s) and now adding %s', limit, str(next_mode))
|
|
|
|
queued_modes.append(next_mode)
|
|
|
|
total_length = next_length
|
2016-08-22 01:46:57 +02:00
|
|
|
else:
|
2017-06-16 06:31:03 +02:00
|
|
|
# Everything fit in one line, so just use that.
|
|
|
|
strings.append(cls.join_modes(queued_modes))
|
2016-08-22 01:46:57 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
log.debug('wrap_modes: returning %s for %s', strings, orig_modes)
|
|
|
|
return strings
|
2016-01-10 04:38:27 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def get_hostmask(self, user, realhost=False, ip=False):
|
|
|
|
"""
|
|
|
|
Returns the hostmask of the given user, if present. If the realhost option
|
|
|
|
is given, return the real host of the user instead of the displayed host.
|
|
|
|
If the ip option is given, return the IP address of the user (this overrides
|
|
|
|
realhost)."""
|
|
|
|
userobj = self.users.get(user)
|
2016-01-01 02:28:47 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
try:
|
|
|
|
nick = userobj.nick
|
|
|
|
except AttributeError:
|
|
|
|
nick = '<unknown-nick>'
|
2016-08-27 18:50:53 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
try:
|
|
|
|
ident = userobj.ident
|
|
|
|
except AttributeError:
|
|
|
|
ident = '<unknown-ident>'
|
2016-08-27 18:50:53 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
try:
|
|
|
|
if ip:
|
|
|
|
host = userobj.ip
|
|
|
|
elif realhost:
|
|
|
|
host = userobj.realhost
|
|
|
|
else:
|
|
|
|
host = userobj.host
|
|
|
|
except AttributeError:
|
|
|
|
host = '<unknown-host>'
|
2016-01-01 02:28:47 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
return '%s!%s@%s' % (nick, ident, host)
|
2016-09-01 03:28:13 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def get_friendly_name(self, entityid):
|
2017-04-01 01:25:28 +02:00
|
|
|
"""
|
2018-05-11 23:40:24 +02:00
|
|
|
Returns the friendly name of a SID (the server name), UID (the nick), or channel (returned as-is).
|
2017-04-01 01:25:28 +02:00
|
|
|
"""
|
2017-06-16 06:31:03 +02:00
|
|
|
if entityid in self.servers:
|
|
|
|
return self.servers[entityid].name
|
|
|
|
elif entityid in self.users:
|
|
|
|
return self.users[entityid].nick
|
2018-05-11 23:40:24 +02:00
|
|
|
elif self.is_channel(entityid):
|
|
|
|
# We assume that channels don't conflict with the SID/UID format. For IRC, this is a
|
|
|
|
# relatively safe bet.
|
|
|
|
return entityid
|
2017-04-01 01:25:28 +02:00
|
|
|
else:
|
2017-06-16 06:31:03 +02:00
|
|
|
raise KeyError("Unknown UID/SID %s" % entityid)
|
2016-07-01 03:22:45 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def is_oper(self, uid, allowAuthed=True, allowOper=True):
|
2017-04-01 01:25:28 +02:00
|
|
|
"""
|
2017-06-16 06:31:03 +02:00
|
|
|
Returns whether the given user has operator status on PyLink. This can be achieved
|
|
|
|
by either identifying to PyLink as admin (if allowAuthed is True),
|
|
|
|
or having user mode +o set (if allowOper is True). At least one of
|
|
|
|
allowAuthed or allowOper must be True for this to give any meaningful
|
|
|
|
results.
|
2017-04-01 01:25:28 +02:00
|
|
|
"""
|
2017-06-16 06:31:03 +02:00
|
|
|
if uid in self.users:
|
|
|
|
if allowOper and ("o", None) in self.users[uid].modes:
|
|
|
|
return True
|
|
|
|
elif allowAuthed and self.users[uid].account:
|
|
|
|
return True
|
|
|
|
return False
|
2016-05-01 01:57:38 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def check_authenticated(self, uid, allowAuthed=True, allowOper=True):
|
2016-04-25 06:37:23 +02:00
|
|
|
"""
|
2017-06-16 06:31:03 +02:00
|
|
|
Checks whether the given user has operator status on PyLink, raising
|
|
|
|
NotAuthorizedError and logging the access denial if not.
|
|
|
|
"""
|
2017-07-01 06:49:12 +02:00
|
|
|
log.warning("(%s) check_authenticated() is deprecated as of PyLink 1.2 and may be "
|
2017-06-16 06:31:03 +02:00
|
|
|
"removed in a future relase. Consider migrating to the PyLink Permissions API.",
|
|
|
|
self.name)
|
|
|
|
lastfunc = inspect.stack()[1][3]
|
|
|
|
if not self.is_oper(uid, allowAuthed=allowAuthed, allowOper=allowOper):
|
|
|
|
log.warning('(%s) Access denied for %s calling %r', self.name,
|
|
|
|
self.get_hostmask(uid), lastfunc)
|
|
|
|
raise utils.NotAuthorizedError("You are not authenticated!")
|
|
|
|
return True
|
2016-06-25 22:08:49 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def match_host(self, glob, target, ip=True, realhost=True):
|
|
|
|
"""
|
|
|
|
Checks whether the given host, or given UID's hostmask matches the given nick!user@host
|
|
|
|
glob.
|
2016-04-25 06:37:23 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
If the target given is a UID, and the 'ip' or 'realhost' options are True, this will also
|
|
|
|
match against the target's IP address and real host, respectively.
|
2016-04-25 06:37:23 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
This function respects IRC casemappings (rfc1459 and ascii). If the given target is a UID,
|
|
|
|
and the 'ip' option is enabled, the host portion of the glob is also matched as a CIDR
|
|
|
|
range.
|
|
|
|
"""
|
|
|
|
# Get the corresponding casemapping value used by ircmatch.
|
2017-06-25 10:45:16 +02:00
|
|
|
if self.casemapping == 'rfc1459':
|
2017-06-16 06:31:03 +02:00
|
|
|
casemapping = 0
|
2016-04-25 06:37:23 +02:00
|
|
|
else:
|
2017-06-16 06:31:03 +02:00
|
|
|
casemapping = 1
|
2017-01-09 02:31:50 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# Try to convert target into a UID. If this fails, it's probably a hostname.
|
|
|
|
target = self.nick_to_uid(target) or target
|
2017-01-09 02:31:50 +01:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# Allow queries like !$exttarget to invert the given match.
|
|
|
|
invert = glob.startswith('!')
|
|
|
|
if invert:
|
|
|
|
glob = glob.lstrip('!')
|
2016-04-25 06:37:23 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def match_host_core():
|
|
|
|
"""
|
|
|
|
Core processor for match_host(), minus the inversion check.
|
|
|
|
"""
|
|
|
|
# Work with variables in the match_host() scope, from
|
|
|
|
# http://stackoverflow.com/a/8178808
|
|
|
|
nonlocal glob
|
2016-04-25 06:37:23 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# Prepare a list of hosts to check against.
|
|
|
|
if target in self.users:
|
2017-08-08 01:31:17 +02:00
|
|
|
|
2017-08-29 05:13:25 +02:00
|
|
|
if not self.is_hostmask(glob):
|
2017-08-08 01:31:17 +02:00
|
|
|
for specialchar in '$:()':
|
|
|
|
# XXX: we should probably add proper rules on what's a valid account name
|
|
|
|
if specialchar in glob:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
# Implicitly convert matches for *sane* account names to "$pylinkacc:accountname".
|
|
|
|
log.debug('(%s) Using target $pylinkacc:%s instead of raw string %r', self.name, glob, glob)
|
|
|
|
glob = '$pylinkacc:' + glob
|
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
if glob.startswith('$'):
|
|
|
|
# Exttargets start with $. Skip regular ban matching and find the matching ban handler.
|
|
|
|
glob = glob.lstrip('$')
|
|
|
|
exttargetname = glob.split(':', 1)[0]
|
|
|
|
handler = world.exttarget_handlers.get(exttargetname)
|
2016-04-25 06:37:23 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
if handler:
|
|
|
|
# Handler exists. Return what it finds.
|
|
|
|
result = handler(self, glob, target)
|
|
|
|
log.debug('(%s) Got %s from exttarget %s in match_host() glob $%s for target %s',
|
|
|
|
self.name, result, exttargetname, glob, target)
|
|
|
|
return result
|
|
|
|
else:
|
|
|
|
log.debug('(%s) Unknown exttarget %s in match_host() glob $%s', self.name,
|
|
|
|
exttargetname, glob)
|
|
|
|
return False
|
2016-04-25 06:37:23 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
hosts = {self.get_hostmask(target)}
|
2016-04-25 06:37:23 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
if ip:
|
|
|
|
hosts.add(self.get_hostmask(target, ip=True))
|
2016-04-25 06:37:23 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# HACK: support CIDR hosts in the hosts portion
|
|
|
|
try:
|
|
|
|
header, cidrtarget = glob.split('@', 1)
|
|
|
|
# Try to parse the host portion as a CIDR range
|
|
|
|
network = ipaddress.ip_network(cidrtarget)
|
2016-04-25 06:37:23 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
real_ip = self.users[target].ip
|
|
|
|
if ipaddress.ip_address(real_ip) in network:
|
|
|
|
# If the CIDR matches, hack around the host matcher by pretending that
|
|
|
|
# the lookup target was the IP and not the CIDR range!
|
|
|
|
glob = '@'.join((header, real_ip))
|
2017-08-16 09:03:13 +02:00
|
|
|
log.debug('(%s) Found matching CIDR %s for %s, replacing target glob with IP %s', self.name,
|
2017-08-16 06:26:18 +02:00
|
|
|
cidrtarget, target, real_ip)
|
2017-06-16 06:31:03 +02:00
|
|
|
except ValueError:
|
|
|
|
pass
|
2016-04-25 06:37:23 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
if realhost:
|
|
|
|
hosts.add(self.get_hostmask(target, realhost=True))
|
2016-04-25 06:37:23 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
else: # We were given a host, use that.
|
|
|
|
hosts = [target]
|
2016-05-01 01:33:46 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# Iterate over the hosts to match using ircmatch.
|
|
|
|
for host in hosts:
|
|
|
|
if ircmatch.match(casemapping, glob, host):
|
|
|
|
return True
|
2016-05-01 01:33:46 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
return False
|
2016-05-01 01:33:46 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
result = match_host_core()
|
|
|
|
if invert:
|
|
|
|
result = not result
|
|
|
|
return result
|
2016-05-01 01:33:46 +02:00
|
|
|
|
2017-08-07 02:55:43 +02:00
|
|
|
def match_all(self, banmask, channel=None):
|
|
|
|
"""
|
|
|
|
Returns all users matching the target hostmask/exttarget. Users can also be filtered by channel.
|
|
|
|
"""
|
|
|
|
if channel:
|
|
|
|
banmask = "$and:(%s+$channel:%s)" % (banmask, channel)
|
|
|
|
|
|
|
|
for uid, userobj in self.users.copy().items():
|
|
|
|
if self.match_host(banmask, uid) and uid in self.users:
|
|
|
|
yield uid
|
|
|
|
|
2017-08-07 05:02:09 +02:00
|
|
|
def match_all_re(self, re_mask, channel=None):
|
|
|
|
"""
|
|
|
|
Returns all users whose "nick!user@host [gecos]" mask matches the given regular expression. Users can also be filtered by channel.
|
|
|
|
"""
|
|
|
|
regexp = re.compile(re_mask)
|
|
|
|
for uid, userobj in self.users.copy().items():
|
|
|
|
target = '%s [%s]' % (self.get_hostmask(uid), userobj.realname)
|
|
|
|
if regexp.fullmatch(target) and ((not channel) or channel in userobj.channels):
|
|
|
|
yield uid
|
|
|
|
|
2017-08-07 04:21:55 +02:00
|
|
|
def make_channel_ban(self, uid, ban_type='ban'):
|
|
|
|
"""Creates a hostmask-based ban for the given user.
|
|
|
|
|
|
|
|
Ban exceptions, invite exceptions quiets, and extbans are also supported by setting ban_type
|
|
|
|
to the appropriate PyLink named mode (e.g. "ban", "banexception", "invex", "quiet", "ban_nonick")."""
|
|
|
|
assert uid in self.users, "Unknown user %s" % uid
|
|
|
|
|
|
|
|
# FIXME: verify that this is a valid mask.
|
|
|
|
# XXX: support slicing hosts so things like *!ident@*.isp.net are possible. This is actually
|
|
|
|
# more annoying to do than it appears because of vHosts using /, IPv6 addresses
|
|
|
|
# (cloaked and uncloaked), etc.
|
|
|
|
ban_style = self.serverdata.get('ban_style') or conf.conf['pylink'].get('ban_style') or \
|
|
|
|
'*!*@$host'
|
|
|
|
|
|
|
|
template = string.Template(ban_style)
|
|
|
|
banhost = template.safe_substitute(ban_style, **self.users[uid].__dict__)
|
2017-08-29 05:13:25 +02:00
|
|
|
assert self.is_hostmask(banhost), "Ban mask %r is not a valid hostmask!" % banhost
|
2017-08-07 04:21:55 +02:00
|
|
|
|
|
|
|
if ban_type in self.cmodes:
|
|
|
|
return ('+%s' % self.cmodes[ban_type], banhost)
|
|
|
|
elif ban_type in self.extbans_acting: # Handle extbans, which are generally "+b prefix:banmask"
|
|
|
|
return ('+%s' % self.cmodes['ban'], self.extbans_acting[ban_type]+banhost)
|
|
|
|
else:
|
|
|
|
raise ValueError("ban_type %r is not available on IRCd %r" % (ban_type, self.protoname))
|
|
|
|
|
2017-06-25 08:27:24 +02:00
|
|
|
def updateTS(self, sender, channel, their_ts, modes=None):
|
|
|
|
"""
|
|
|
|
Merges modes of a channel given the remote TS and a list of modes.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Okay, so the situation is that we have 6 possible TS/sender combinations:
|
|
|
|
|
|
|
|
# | our TS lower | TS equal | their TS lower
|
|
|
|
# mode origin is us | OVERWRITE | MERGE | IGNORE
|
|
|
|
# mode origin is uplink | IGNORE | MERGE | OVERWRITE
|
|
|
|
|
|
|
|
if modes is None:
|
|
|
|
modes = []
|
|
|
|
|
|
|
|
def _clear():
|
2017-06-25 11:03:12 +02:00
|
|
|
log.debug("(%s) Clearing local modes from channel %s due to TS change", self.name,
|
2017-06-25 08:27:24 +02:00
|
|
|
channel)
|
2017-08-25 11:11:48 +02:00
|
|
|
self._channels[channel].modes.clear()
|
|
|
|
for p in self._channels[channel].prefixmodes.values():
|
2017-06-25 08:27:24 +02:00
|
|
|
for user in p.copy():
|
2017-06-25 11:03:12 +02:00
|
|
|
if not self.is_internal_client(user):
|
2017-06-25 08:27:24 +02:00
|
|
|
p.discard(user)
|
|
|
|
|
|
|
|
def _apply():
|
|
|
|
if modes:
|
2017-06-25 11:03:12 +02:00
|
|
|
log.debug("(%s) Applying modes on channel %s (TS ok)", self.name,
|
2017-06-25 08:27:24 +02:00
|
|
|
channel)
|
2017-06-25 11:03:12 +02:00
|
|
|
self.apply_modes(channel, modes)
|
2017-06-25 08:27:24 +02:00
|
|
|
|
|
|
|
# Use a lock so only one thread can change a channel's TS at once: this prevents race
|
2017-06-28 01:12:45 +02:00
|
|
|
# conditions that would otherwise desync channel modes.
|
|
|
|
with self._ts_lock:
|
2017-08-25 11:11:48 +02:00
|
|
|
our_ts = self._channels[channel].ts
|
2017-07-12 23:29:34 +02:00
|
|
|
assert isinstance(our_ts, int), "Wrong type for our_ts (expected int, got %s)" % type(our_ts)
|
|
|
|
assert isinstance(their_ts, int), "Wrong type for their_ts (expected int, got %s)" % type(their_ts)
|
2017-06-25 08:27:24 +02:00
|
|
|
|
|
|
|
# Check if we're the mode sender based on the UID / SID given.
|
2017-06-25 11:03:12 +02:00
|
|
|
our_mode = self.is_internal_client(sender) or self.is_internal_server(sender)
|
2017-06-25 08:27:24 +02:00
|
|
|
|
2017-06-25 11:03:12 +02:00
|
|
|
log.debug("(%s/%s) our_ts: %s; their_ts: %s; is the mode origin us? %s", self.name,
|
2017-06-25 08:27:24 +02:00
|
|
|
channel, our_ts, their_ts, our_mode)
|
|
|
|
|
|
|
|
if their_ts == our_ts:
|
|
|
|
log.debug("(%s/%s) remote TS of %s is equal to our %s; mode query %s",
|
2017-06-25 11:03:12 +02:00
|
|
|
self.name, channel, their_ts, our_ts, modes)
|
2017-06-25 08:27:24 +02:00
|
|
|
# Their TS is equal to ours. Merge modes.
|
|
|
|
_apply()
|
|
|
|
|
|
|
|
elif (their_ts < our_ts):
|
|
|
|
if their_ts < 750000:
|
2017-06-25 11:03:12 +02:00
|
|
|
log.warning('(%s) Possible desync? Not setting bogus TS %s on channel %s', self.name, their_ts, channel)
|
2017-06-25 08:27:24 +02:00
|
|
|
else:
|
|
|
|
log.debug('(%s) Resetting channel TS of %s from %s to %s (remote has lower TS)',
|
2017-06-25 11:03:12 +02:00
|
|
|
self.name, channel, our_ts, their_ts)
|
2017-08-25 11:11:48 +02:00
|
|
|
self._channels[channel].ts = their_ts
|
2017-06-25 08:27:24 +02:00
|
|
|
|
|
|
|
# Remote TS was lower and we're receiving modes. Clear the modelist and apply theirs.
|
|
|
|
|
|
|
|
_clear()
|
|
|
|
_apply()
|
|
|
|
|
2017-07-31 14:58:02 +02:00
|
|
|
def _check_nick_collision(self, nick):
|
|
|
|
"""
|
|
|
|
Nick collision checker.
|
|
|
|
"""
|
|
|
|
uid = self.nick_to_uid(nick)
|
|
|
|
# If there is a nick collision, we simply alert plugins. Relay will purposely try to
|
|
|
|
# lose fights and tag nicks instead, while other plugins can choose how to handle this.
|
|
|
|
if uid:
|
|
|
|
log.info('(%s) Nick collision on %s/%s, forwarding this to plugins', self.name,
|
|
|
|
uid, nick)
|
|
|
|
self.call_hooks([self.sid, 'SAVE', {'target': uid}])
|
|
|
|
|
2017-08-30 09:56:18 +02:00
|
|
|
def _expandPUID(self, uid):
|
|
|
|
"""
|
|
|
|
Returns the nick or server name for the given UID/SID. This method helps support protocol
|
|
|
|
modules that use PUIDs internally, as they must convert them to talk with the uplink.
|
|
|
|
"""
|
2018-02-24 20:18:18 +01:00
|
|
|
log.debug('(%s) _expandPUID: got uid %s', self.name, uid)
|
2017-08-30 09:56:18 +02:00
|
|
|
# TODO: stop hardcoding @ as separator
|
2018-02-24 20:18:18 +01:00
|
|
|
if uid and '@' in uid:
|
2017-08-30 09:56:18 +02:00
|
|
|
if uid in self.users:
|
|
|
|
# UID exists and has a @ in it, meaning it's a PUID (orignick@counter style).
|
|
|
|
# Return this user's nick accordingly.
|
|
|
|
nick = self.users[uid].nick
|
|
|
|
log.debug('(%s) Mangling target PUID %s to nick %s', self.name, uid, nick)
|
|
|
|
return nick
|
|
|
|
elif uid in self.servers:
|
|
|
|
# Ditto for servers
|
|
|
|
sname = self.servers[uid].name
|
|
|
|
log.debug('(%s) Mangling target PSID %s to server name %s', self.name, uid, sname)
|
|
|
|
return sname
|
|
|
|
return uid # Regular UID, no change
|
|
|
|
|
2018-05-11 23:38:21 +02:00
|
|
|
def wrap_message(self, source, target, command, text):
|
|
|
|
"""
|
|
|
|
Wraps the given message text into multiple lines (length depends on how much the protocol
|
|
|
|
allows), and returns these as a list.
|
|
|
|
"""
|
|
|
|
# This is protocol specific, so stub it here in the base class.
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2017-08-29 05:01:28 +02:00
|
|
|
utils._proto_utils_class = PyLinkNetworkCoreWithUtils # Used by compatibility wrappers
|
|
|
|
|
2017-06-16 06:55:08 +02:00
|
|
|
class IRCNetwork(PyLinkNetworkCoreWithUtils):
|
2017-07-08 05:13:52 +02:00
|
|
|
S2S_BUFSIZE = 510
|
|
|
|
|
2017-06-28 00:58:38 +02:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super().__init__(*args, **kwargs)
|
|
|
|
|
2017-07-13 07:56:30 +02:00
|
|
|
self._queue = None
|
|
|
|
self._ping_timer = None
|
|
|
|
self._socket = None
|
2018-03-17 19:01:32 +01:00
|
|
|
self._selector_key = None
|
2018-03-17 20:18:16 +01:00
|
|
|
self._buffer = b''
|
2018-03-23 01:42:28 +01:00
|
|
|
self._reconnect_thread = None
|
2018-04-08 06:46:05 +02:00
|
|
|
self._queue_thread = None
|
2017-06-28 00:58:38 +02:00
|
|
|
|
2017-07-14 14:22:05 +02:00
|
|
|
def _init_vars(self, *args, **kwargs):
|
|
|
|
super()._init_vars(*args, **kwargs)
|
2017-06-28 01:05:46 +02:00
|
|
|
|
|
|
|
# Set IRC specific variables for ping checking and queuing
|
|
|
|
self.lastping = time.time()
|
|
|
|
self.pingfreq = self.serverdata.get('pingfreq') or 90
|
|
|
|
self.pingtimeout = self.pingfreq * 3
|
|
|
|
|
|
|
|
self.maxsendq = self.serverdata.get('maxsendq', 4096)
|
2017-07-13 07:56:30 +02:00
|
|
|
self._queue = queue.Queue(self.maxsendq)
|
2017-06-28 01:05:46 +02:00
|
|
|
|
2017-06-27 11:53:09 +02:00
|
|
|
def _schedule_ping(self):
|
2017-06-16 06:31:03 +02:00
|
|
|
"""Schedules periodic pings in a loop."""
|
2017-07-05 07:09:50 +02:00
|
|
|
self._ping_uplink()
|
2016-05-01 01:33:46 +02:00
|
|
|
|
2018-04-08 06:46:05 +02:00
|
|
|
if self._aborted.is_set():
|
|
|
|
return
|
|
|
|
|
2017-07-13 07:56:30 +02:00
|
|
|
self._ping_timer = threading.Timer(self.pingfreq, self._schedule_ping)
|
|
|
|
self._ping_timer.daemon = True
|
|
|
|
self._ping_timer.name = 'Ping timer loop for %s' % self.name
|
|
|
|
self._ping_timer.start()
|
2016-07-01 04:52:06 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
log.debug('(%s) Ping scheduled at %s', self.name, time.time())
|
|
|
|
|
2017-07-13 07:02:40 +02:00
|
|
|
def _log_connection_error(self, *args, **kwargs):
|
|
|
|
# Log connection errors to ERROR unless were shutting down (in which case,
|
|
|
|
# the given text goes to DEBUG).
|
2017-08-31 22:36:46 +02:00
|
|
|
if self._aborted.is_set() or world.shutting_down.is_set():
|
2017-07-13 07:02:40 +02:00
|
|
|
log.debug(*args, **kwargs)
|
|
|
|
else:
|
|
|
|
log.error(*args, **kwargs)
|
|
|
|
|
2018-04-08 06:22:18 +02:00
|
|
|
def _connect(self):
|
2017-06-16 06:31:03 +02:00
|
|
|
"""
|
2018-03-17 19:01:32 +01:00
|
|
|
Connects to the network.
|
2017-06-16 06:31:03 +02:00
|
|
|
"""
|
2018-03-17 19:01:32 +01:00
|
|
|
self._pre_connect()
|
2017-01-02 21:08:22 +01:00
|
|
|
|
2018-03-17 19:01:32 +01:00
|
|
|
ip = self.serverdata["ip"]
|
|
|
|
port = self.serverdata["port"]
|
|
|
|
checks_ok = True
|
|
|
|
try:
|
|
|
|
# Set the socket type (IPv6 or IPv4).
|
|
|
|
stype = socket.AF_INET6 if self.serverdata.get("ipv6") else socket.AF_INET
|
|
|
|
|
|
|
|
# Creat the socket.
|
|
|
|
self._socket = socket.socket(stype)
|
|
|
|
|
|
|
|
# Set the socket bind if applicable.
|
|
|
|
if 'bindhost' in self.serverdata:
|
|
|
|
self._socket.bind((self.serverdata['bindhost'], 0))
|
|
|
|
|
|
|
|
# Resolve hostnames if it's not an IP address already.
|
|
|
|
old_ip = ip
|
|
|
|
ip = socket.getaddrinfo(ip, port, stype)[0][-1][0]
|
|
|
|
log.debug('(%s) Resolving address %s to %s', self.name, old_ip, ip)
|
|
|
|
|
|
|
|
# Enable SSL if set to do so.
|
|
|
|
self.ssl = self.serverdata.get('ssl')
|
|
|
|
if self.ssl:
|
|
|
|
log.info('(%s) Attempting SSL for this connection...', self.name)
|
|
|
|
certfile = self.serverdata.get('ssl_certfile')
|
|
|
|
keyfile = self.serverdata.get('ssl_keyfile')
|
|
|
|
|
|
|
|
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
|
|
|
# Disable SSLv2 and SSLv3 - these are insecure
|
|
|
|
context.options |= ssl.OP_NO_SSLv2
|
|
|
|
context.options |= ssl.OP_NO_SSLv3
|
|
|
|
|
|
|
|
# Cert and key files are optional, load them if specified.
|
|
|
|
if certfile and keyfile:
|
2017-06-16 06:31:03 +02:00
|
|
|
try:
|
2018-03-17 19:01:32 +01:00
|
|
|
context.load_cert_chain(certfile, keyfile)
|
|
|
|
except OSError:
|
|
|
|
log.exception('(%s) Caught OSError trying to '
|
|
|
|
'initialize the SSL connection; '
|
|
|
|
'are "ssl_certfile" and '
|
|
|
|
'"ssl_keyfile" set correctly?',
|
|
|
|
self.name)
|
|
|
|
checks_ok = False
|
|
|
|
|
|
|
|
self._socket = context.wrap_socket(self._socket)
|
|
|
|
|
|
|
|
log.info("Connecting to network %r on %s:%s", self.name, ip, port)
|
2018-03-24 04:21:49 +01:00
|
|
|
|
2018-04-08 06:22:18 +02:00
|
|
|
# Use a lower timeout for the initial connect.
|
|
|
|
self._socket.settimeout(self.pingfreq)
|
|
|
|
|
2018-03-24 04:21:49 +01:00
|
|
|
try:
|
|
|
|
self._socket.connect((ip, port))
|
|
|
|
except (ssl.SSLError, OSError):
|
2018-04-08 06:46:05 +02:00
|
|
|
if world.shutting_down.is_set():
|
|
|
|
return
|
2018-03-24 04:21:49 +01:00
|
|
|
log.exception('Unable to connect to network %r', self.name)
|
|
|
|
self._start_reconnect()
|
|
|
|
return
|
2018-04-08 06:22:18 +02:00
|
|
|
|
|
|
|
if self not in world.networkobjects.values():
|
|
|
|
log.debug("(%s) _connect: disconnecting socket %s as the network was removed",
|
|
|
|
self.name, self._socket)
|
|
|
|
try:
|
|
|
|
self._socket.shutdown(socket.SHUT_RDWR)
|
|
|
|
finally:
|
|
|
|
self._socket.close()
|
|
|
|
return
|
|
|
|
|
2018-03-17 19:01:32 +01:00
|
|
|
self._socket.settimeout(self.pingtimeout)
|
2018-04-12 19:51:34 +02:00
|
|
|
|
|
|
|
# Make sure future reads never block, since select doesn't always guarantee this.
|
|
|
|
self._socket.setblocking(False)
|
|
|
|
|
2018-03-24 04:16:29 +01:00
|
|
|
self._selector_key = selectdriver.register(self)
|
2018-03-17 19:01:32 +01:00
|
|
|
|
|
|
|
# If SSL was enabled, optionally verify the certificate
|
|
|
|
# fingerprint for some added security. I don't bother to check
|
|
|
|
# the entire certificate for validity, since most IRC networks
|
|
|
|
# self-sign their certificates anyways.
|
|
|
|
if self.ssl and checks_ok:
|
|
|
|
peercert = self._socket.getpeercert(binary_form=True)
|
|
|
|
|
|
|
|
# Hash type is configurable using the ssl_fingerprint_type
|
|
|
|
# value, and defaults to sha256.
|
|
|
|
hashtype = self.serverdata.get('ssl_fingerprint_type', 'sha256').lower()
|
2016-05-01 01:44:37 +02:00
|
|
|
|
2018-03-17 19:01:32 +01:00
|
|
|
try:
|
|
|
|
hashfunc = getattr(hashlib, hashtype)
|
|
|
|
except AttributeError:
|
|
|
|
log.error('(%s) Unsupported SSL certificate fingerprint type %r given, disconnecting...',
|
|
|
|
self.name, hashtype)
|
|
|
|
checks_ok = False
|
|
|
|
else:
|
|
|
|
fp = hashfunc(peercert).hexdigest()
|
|
|
|
expected_fp = self.serverdata.get('ssl_fingerprint')
|
|
|
|
|
|
|
|
if expected_fp and checks_ok:
|
|
|
|
if fp != expected_fp:
|
|
|
|
# SSL Fingerprint doesn't match; break.
|
|
|
|
log.error('(%s) Uplink\'s SSL certificate '
|
|
|
|
'fingerprint (%s) does not match the '
|
|
|
|
'one configured: expected %r, got %r; '
|
|
|
|
'disconnecting...', self.name, hashtype,
|
|
|
|
expected_fp, fp)
|
|
|
|
checks_ok = False
|
|
|
|
else:
|
|
|
|
log.info('(%s) Uplink SSL certificate fingerprint '
|
|
|
|
'(%s) verified: %r', self.name, hashtype,
|
|
|
|
fp)
|
|
|
|
else:
|
|
|
|
log.info('(%s) Uplink\'s SSL certificate fingerprint (%s) '
|
|
|
|
'is %r. You can enhance the security of your '
|
|
|
|
'link by specifying this in a "ssl_fingerprint"'
|
|
|
|
' option in your server block.', self.name,
|
|
|
|
hashtype, fp)
|
|
|
|
|
|
|
|
if checks_ok:
|
|
|
|
|
|
|
|
self._queue_thread = threading.Thread(name="Queue thread for %s" % self.name,
|
|
|
|
target=self._process_queue, daemon=True)
|
|
|
|
self._queue_thread.start()
|
|
|
|
|
|
|
|
self.sid = self.serverdata.get("sid")
|
|
|
|
# All our checks passed, get the protocol module to connect and run the listen
|
|
|
|
# loop. This also updates any SID values should the protocol module do so.
|
|
|
|
self.post_connect()
|
|
|
|
|
|
|
|
log.info('(%s) Enumerating our own SID %s', self.name, self.sid)
|
|
|
|
host = self.hostname()
|
|
|
|
|
|
|
|
self.servers[self.sid] = Server(self, None, host, internal=True,
|
|
|
|
desc=self.serverdata.get('serverdesc')
|
|
|
|
or conf.conf['pylink']['serverdesc'])
|
|
|
|
|
|
|
|
log.info('(%s) Starting ping schedulers....', self.name)
|
|
|
|
self._schedule_ping()
|
|
|
|
log.info('(%s) Server ready; listening for data.', self.name)
|
|
|
|
self.autoconnect_active_multiplier = 1 # Reset any extra autoconnect delays
|
|
|
|
else: # Configuration error :(
|
|
|
|
log.error('(%s) A configuration error was encountered '
|
|
|
|
'trying to set up this connection. Please check'
|
|
|
|
' your configuration file and try again.',
|
|
|
|
self.name)
|
|
|
|
# _run_irc() or the protocol module it called raised an exception, meaning we've disconnected!
|
|
|
|
# Note: socket.error, ConnectionError, IOError, etc. are included in OSError since Python 3.3,
|
|
|
|
# so we don't need to explicitly catch them here.
|
|
|
|
# We also catch SystemExit here as a way to abort out connection threads properly, and stop the
|
|
|
|
# IRC connection from freezing instead.
|
|
|
|
except (OSError, RuntimeError, SystemExit) as e:
|
|
|
|
self._log_connection_error('(%s) Disconnected from IRC:', self.name, exc_info=True)
|
2017-12-04 02:46:24 +01:00
|
|
|
if not self._aborted.is_set():
|
|
|
|
self.disconnect()
|
|
|
|
|
2018-04-08 06:22:18 +02:00
|
|
|
def connect(self):
|
|
|
|
"""
|
|
|
|
Starts a thread to connect the network.
|
|
|
|
"""
|
|
|
|
connect_thread = threading.Thread(target=self._connect, daemon=True,
|
|
|
|
name="Connect thread for %s" %
|
|
|
|
self.name)
|
|
|
|
connect_thread.start()
|
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def disconnect(self):
|
|
|
|
"""Handle disconnects from the remote server."""
|
2018-03-31 20:53:01 +02:00
|
|
|
if self._aborted.is_set():
|
|
|
|
return
|
|
|
|
|
2017-06-17 01:49:45 +02:00
|
|
|
self._pre_disconnect()
|
2017-02-18 22:32:48 +01:00
|
|
|
|
2018-04-08 07:12:17 +02:00
|
|
|
# Stop the queue thread.
|
|
|
|
if self._queue is not None:
|
|
|
|
try:
|
|
|
|
# XXX: queue.Queue.queue isn't actually documented, so this is probably not reliable in the long run.
|
|
|
|
with self._queue.mutex:
|
|
|
|
self._queue.queue[0] = None
|
|
|
|
except IndexError:
|
|
|
|
self._queue.put(None)
|
|
|
|
|
2017-07-13 07:56:30 +02:00
|
|
|
if self._socket is not None:
|
2018-03-29 23:04:58 +02:00
|
|
|
try:
|
|
|
|
selectdriver.unregister(self)
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2017-07-13 07:50:16 +02:00
|
|
|
try:
|
2018-04-08 06:46:05 +02:00
|
|
|
log.debug('(%s) disconnect: shutting down read half of socket %s', self.name, self._socket)
|
|
|
|
self._socket.shutdown(socket.SHUT_RD)
|
|
|
|
except:
|
|
|
|
log.debug('(%s) Error on socket shutdown:', self.name, exc_info=True)
|
2016-05-01 01:54:11 +02:00
|
|
|
|
2018-04-08 07:12:17 +02:00
|
|
|
log.debug('(%s) disconnect: waiting for write half of socket %s to shutdown', self.name, self._socket)
|
2018-04-08 06:46:05 +02:00
|
|
|
# Wait for the write half to shut down when applicable.
|
|
|
|
if self._queue_thread is None or self._aborted_send.wait(10):
|
|
|
|
log.debug('(%s) disconnect: closing socket %s', self.name, self._socket)
|
|
|
|
self._socket.close()
|
2016-05-01 01:54:11 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# Stop the ping timer.
|
2017-07-13 07:56:30 +02:00
|
|
|
if self._ping_timer:
|
2017-06-16 06:31:03 +02:00
|
|
|
log.debug('(%s) Canceling pingTimer at %s due to disconnect() call', self.name, time.time())
|
2017-07-13 07:56:30 +02:00
|
|
|
self._ping_timer.cancel()
|
2018-03-17 20:18:16 +01:00
|
|
|
self._buffer = b''
|
2017-06-17 01:49:45 +02:00
|
|
|
self._post_disconnect()
|
2018-03-29 23:04:58 +02:00
|
|
|
|
|
|
|
# Clear old sockets.
|
|
|
|
self._socket = None
|
|
|
|
|
2018-03-24 04:21:49 +01:00
|
|
|
self._start_reconnect()
|
2017-05-05 04:04:03 +02:00
|
|
|
|
2018-03-24 04:21:49 +01:00
|
|
|
def _start_reconnect(self):
|
|
|
|
"""Schedules a reconnection to the network."""
|
2018-03-17 23:26:36 +01:00
|
|
|
def _reconnect():
|
|
|
|
# _run_autoconnect() will block and return True after the autoconnect
|
2018-03-24 04:21:49 +01:00
|
|
|
# delay has passed, if autoconnect is disabled. We do not want it to
|
|
|
|
# block whatever is calling disconnect() though, so we run it in a new
|
|
|
|
# thread.
|
2018-03-17 23:26:36 +01:00
|
|
|
if self._run_autoconnect():
|
|
|
|
self.connect()
|
2018-03-23 01:42:28 +01:00
|
|
|
|
2018-04-08 06:22:18 +02:00
|
|
|
if self not in world.networkobjects.values():
|
|
|
|
log.debug('(%s) _start_reconnect: Stopping reconnect timer as the network was removed', self.name)
|
|
|
|
return
|
|
|
|
elif self._reconnect_thread is None or not self._reconnect_thread.is_alive():
|
2018-03-23 01:42:28 +01:00
|
|
|
self._reconnect_thread = threading.Thread(target=_reconnect, name="Reconnecting network %s" % self.name)
|
|
|
|
self._reconnect_thread.start()
|
2018-03-24 04:21:49 +01:00
|
|
|
else:
|
|
|
|
log.debug('(%s) Ignoring attempt to reschedule reconnect as one is in progress.', self.name)
|
2018-03-17 19:01:32 +01:00
|
|
|
|
2017-07-31 05:09:08 +02:00
|
|
|
def handle_events(self, line):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def parse_irc_command(self, line):
|
|
|
|
"""Sends a command to the protocol module."""
|
|
|
|
log.debug("(%s) <- %s", self.name, line)
|
|
|
|
try:
|
|
|
|
hook_args = self.handle_events(line)
|
|
|
|
except Exception:
|
|
|
|
log.exception('(%s) Caught error in handle_events, disconnecting!', self.name)
|
|
|
|
log.error('(%s) The offending line was: <- %s', self.name, line)
|
2017-08-06 07:14:44 +02:00
|
|
|
self.disconnect()
|
2017-07-31 05:09:08 +02:00
|
|
|
return
|
|
|
|
# Only call our hooks if there's data to process. Handlers that support
|
|
|
|
# hooks will return a dict of parsed arguments, which can be passed on
|
|
|
|
# to plugins and the like. For example, the JOIN handler will return
|
|
|
|
# something like: {'channel': '#whatever', 'users': ['UID1', 'UID2',
|
|
|
|
# 'UID3']}, etc.
|
|
|
|
if hook_args is not None:
|
|
|
|
self.call_hooks(hook_args)
|
|
|
|
|
|
|
|
return hook_args
|
|
|
|
|
2017-06-27 10:44:26 +02:00
|
|
|
def _run_irc(self):
|
2018-03-17 19:01:32 +01:00
|
|
|
"""
|
|
|
|
Message handler, called when select() has data to read.
|
|
|
|
"""
|
2018-05-19 04:08:37 +02:00
|
|
|
if self._socket is None:
|
|
|
|
log.debug('(%s) Ignoring attempt to read data because self._socket is None', self.name)
|
|
|
|
return
|
|
|
|
|
2018-03-17 19:01:32 +01:00
|
|
|
data = b''
|
|
|
|
try:
|
|
|
|
data = self._socket.recv(2048)
|
2018-03-17 19:03:58 +01:00
|
|
|
except (BlockingIOError, ssl.SSLWantReadError, ssl.SSLWantWriteError):
|
2018-04-12 19:51:34 +02:00
|
|
|
log.debug('(%s) No data to read, trying again later...', self.name, exc_info=True)
|
|
|
|
return
|
2018-03-17 19:01:32 +01:00
|
|
|
except OSError:
|
|
|
|
# Suppress socket read warnings from lingering recv() calls if
|
|
|
|
# we've been told to shutdown.
|
|
|
|
if self._aborted.is_set():
|
2017-06-16 06:31:03 +02:00
|
|
|
return
|
2018-03-17 19:01:32 +01:00
|
|
|
raise
|
|
|
|
|
2018-03-17 20:18:16 +01:00
|
|
|
self._buffer += data
|
2018-03-17 19:01:32 +01:00
|
|
|
if not data:
|
|
|
|
self._log_connection_error('(%s) Connection lost, disconnecting.', self.name)
|
|
|
|
self.disconnect()
|
|
|
|
return
|
|
|
|
elif (time.time() - self.lastping) > self.pingtimeout:
|
|
|
|
self._log_connection_error('(%s) Connection timed out.', self.name)
|
|
|
|
self.disconnect()
|
|
|
|
return
|
2016-07-07 09:25:50 +02:00
|
|
|
|
2018-03-17 20:18:16 +01:00
|
|
|
while b'\n' in self._buffer:
|
|
|
|
line, self._buffer = self._buffer.split(b'\n', 1)
|
2018-03-17 19:01:32 +01:00
|
|
|
line = line.strip(b'\r')
|
|
|
|
line = line.decode(self.encoding, "replace")
|
|
|
|
self.parse_irc_command(line)
|
2016-07-07 08:11:36 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def _send(self, data):
|
|
|
|
"""Sends raw text to the uplink server."""
|
2018-03-30 19:47:34 +02:00
|
|
|
if self._aborted.is_set():
|
|
|
|
log.debug("(%s) Not sending message %r since the connection is dead", self.name, data)
|
|
|
|
return
|
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
# Safeguard against newlines in input!! Otherwise, each line gets
|
|
|
|
# treated as a separate command, which is particularly nasty.
|
|
|
|
data = data.replace('\n', ' ')
|
2017-07-17 03:46:56 +02:00
|
|
|
encoded_data = data.encode(self.encoding, 'replace')
|
|
|
|
if self.S2S_BUFSIZE > 0: # Apply message cutoff as needed
|
|
|
|
encoded_data = encoded_data[:self.S2S_BUFSIZE]
|
|
|
|
encoded_data += b"\r\n"
|
2017-05-05 04:04:03 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
log.debug("(%s) -> %s", self.name, data)
|
2017-05-05 04:04:03 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
try:
|
2017-07-13 07:56:30 +02:00
|
|
|
self._socket.send(encoded_data)
|
2018-03-08 03:30:14 +01:00
|
|
|
except:
|
|
|
|
log.exception("(%s) Failed to send message %r; aborting!", self.name, data)
|
|
|
|
self.disconnect()
|
2016-07-07 08:11:36 +02:00
|
|
|
|
2017-06-16 06:31:03 +02:00
|
|
|
def send(self, data, queue=True):
|
|
|
|
"""send() wrapper with optional queueing support."""
|
2017-08-06 07:16:39 +02:00
|
|
|
if self._aborted.is_set():
|
|
|
|
log.debug('(%s) refusing to queue data %r as self._aborted is set', self.name, data)
|
2017-06-16 06:31:03 +02:00
|
|
|
return
|
|
|
|
if queue:
|
|
|
|
# XXX: we don't really know how to handle blocking queues yet, so
|
|
|
|
# it's better to not expose that yet.
|
2017-07-13 07:56:30 +02:00
|
|
|
self._queue.put_nowait(data)
|
2017-06-16 06:31:03 +02:00
|
|
|
else:
|
|
|
|
self._send(data)
|
2017-05-05 04:04:03 +02:00
|
|
|
|
2017-06-27 11:53:09 +02:00
|
|
|
def _process_queue(self):
|
2017-06-16 06:31:03 +02:00
|
|
|
"""Loop to process outgoing queue data."""
|
|
|
|
while True:
|
|
|
|
throttle_time = self.serverdata.get('throttle_time', 0.005)
|
2017-08-06 07:16:39 +02:00
|
|
|
if not self._aborted.wait(throttle_time):
|
2017-07-13 07:56:30 +02:00
|
|
|
data = self._queue.get()
|
2017-06-16 06:31:03 +02:00
|
|
|
if data is None:
|
|
|
|
log.debug('(%s) Stopping queue thread due to getting None as item', self.name)
|
|
|
|
break
|
2017-08-06 07:11:22 +02:00
|
|
|
elif self not in world.networkobjects.values():
|
|
|
|
log.debug('(%s) Stopping stale queue thread; no longer matches world.networkobjects', self.name)
|
|
|
|
break
|
2018-03-30 19:47:34 +02:00
|
|
|
elif self._aborted.is_set():
|
|
|
|
# The _aborted flag may have changed while we were waiting for an item,
|
|
|
|
# so check for it again.
|
|
|
|
log.debug('(%s) Stopping queue thread since the connection is dead', self.name)
|
2018-04-08 06:46:05 +02:00
|
|
|
break
|
2017-06-16 06:31:03 +02:00
|
|
|
elif data:
|
|
|
|
self._send(data)
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
2018-04-08 06:46:05 +02:00
|
|
|
# Once we're done here, shut down the write part of the socket.
|
|
|
|
if self._socket:
|
|
|
|
log.debug('(%s) _process_queue: shutting down write half of socket %s', self.name, self._socket)
|
|
|
|
self._socket.shutdown(socket.SHUT_WR)
|
|
|
|
self._aborted_send.set()
|
|
|
|
|
2018-05-11 23:38:21 +02:00
|
|
|
def wrap_message(self, source, target, text):
|
|
|
|
"""
|
|
|
|
Wraps the given message text into multiple lines, and returns these as a list.
|
|
|
|
|
|
|
|
For IRC, the maximum length of one message is calculated as S2S_BUFSIZE (default to 510)
|
|
|
|
minus the length of ":sender-nick!sender-user@sender-host PRIVMSG #target :"
|
|
|
|
"""
|
2018-05-19 04:09:06 +02:00
|
|
|
# We explicitly want wrapping (e.g. for messages eventually making its way to a user), so
|
|
|
|
# use the default bufsize of 510 even if the IRCd's S2S protocol allows infinitely long
|
|
|
|
# long messages.
|
|
|
|
bufsize = self.S2S_BUFSIZE or IRCNetwork.S2S_BUFSIZE
|
2018-05-11 23:46:47 +02:00
|
|
|
try:
|
|
|
|
target = self.get_friendly_name(target)
|
|
|
|
except KeyError:
|
|
|
|
log.warning('(%s) Possible desync? Error while expanding wrap_message target %r '
|
|
|
|
'(source=%s)', self.name, target, source, exc_info=True)
|
|
|
|
|
2018-05-11 23:38:21 +02:00
|
|
|
prefixstr = ":%s PRIVMSG %s :" % (self.get_hostmask(source), target)
|
2018-05-19 04:09:06 +02:00
|
|
|
maxlen = bufsize - len(prefixstr)
|
2018-05-11 23:38:21 +02:00
|
|
|
|
2018-05-19 04:09:06 +02:00
|
|
|
log.debug('(%s) wrap_message: length of prefix %r is %s, bufsize=%s, maxlen=%s',
|
|
|
|
self.name, prefixstr, len(prefixstr), bufsize, maxlen)
|
|
|
|
|
|
|
|
if maxlen <= 0:
|
|
|
|
log.error('(%s) Got invalid maxlen %s for wrap_message (%s -> %s)', self.name, maxlen,
|
|
|
|
source, target)
|
|
|
|
return [text]
|
2018-05-11 23:38:21 +02:00
|
|
|
|
|
|
|
return textwrap.wrap(text, width=maxlen)
|
|
|
|
|
2017-06-16 06:55:08 +02:00
|
|
|
Irc = IRCNetwork
|
2016-07-07 08:11:36 +02:00
|
|
|
|
2017-06-16 06:54:40 +02:00
|
|
|
class Server():
|
2015-12-07 02:40:13 +01:00
|
|
|
"""PyLink IRC server class.
|
2015-06-22 00:00:33 +02:00
|
|
|
|
2017-08-30 10:26:35 +02:00
|
|
|
irc: the protocol/network object this Server instance is attached to.
|
|
|
|
uplink: The SID of this Server instance's uplink. This is set to None
|
|
|
|
for **both** the main PyLink server and our uplink.
|
2015-06-22 00:00:33 +02:00
|
|
|
name: The name of the server.
|
2017-08-30 10:26:35 +02:00
|
|
|
internal: Boolean, whether the server is an internal PyLink server.
|
|
|
|
desc: Sets the server description if relevant.
|
2015-06-22 00:00:33 +02:00
|
|
|
"""
|
2016-01-10 04:15:39 +01:00
|
|
|
|
2017-08-25 22:53:45 +02:00
|
|
|
def __init__(self, irc, uplink, name, internal=False, desc="(None given)"):
|
2015-06-07 07:17:45 +02:00
|
|
|
self.uplink = uplink
|
2015-08-29 21:35:06 +02:00
|
|
|
self.users = set()
|
2015-06-22 00:00:33 +02:00
|
|
|
self.internal = internal
|
|
|
|
self.name = name.lower()
|
2015-09-12 19:39:05 +02:00
|
|
|
self.desc = desc
|
2017-08-25 22:53:45 +02:00
|
|
|
self._irc = irc
|
2016-01-10 04:15:39 +01:00
|
|
|
|
2017-09-24 07:35:55 +02:00
|
|
|
assert uplink is None or uplink in self._irc.servers, "Unknown uplink %s" % uplink
|
|
|
|
|
|
|
|
if uplink is None:
|
|
|
|
self.hopcount = 1
|
|
|
|
else:
|
|
|
|
self.hopcount = self._irc.servers[uplink].hopcount + 1
|
|
|
|
|
2017-08-31 04:16:54 +02:00
|
|
|
# Has the server finished bursting yet?
|
|
|
|
self.has_eob = False
|
|
|
|
|
2015-06-07 07:17:45 +02:00
|
|
|
def __repr__(self):
|
2017-07-01 06:49:12 +02:00
|
|
|
return 'Server(%s)' % self.name
|
2017-08-25 22:53:45 +02:00
|
|
|
|
2017-06-16 06:54:40 +02:00
|
|
|
IrcServer = Server
|
2015-06-07 08:04:11 +02:00
|
|
|
|
2018-03-17 23:49:48 +01:00
|
|
|
class Channel(structures.CamelCaseToSnakeCase, structures.CopyWrapper):
|
2015-12-07 02:40:13 +01:00
|
|
|
"""PyLink IRC channel class."""
|
2017-07-01 06:34:08 +02:00
|
|
|
|
2017-08-25 22:53:45 +02:00
|
|
|
def __init__(self, irc, name=None):
|
2015-12-07 02:40:13 +01:00
|
|
|
# Initialize variables, such as the topic, user list, TS, who's opped, etc.
|
2015-06-07 18:43:13 +02:00
|
|
|
self.users = set()
|
2017-01-01 09:37:12 +01:00
|
|
|
self.modes = set()
|
2015-07-07 00:33:23 +02:00
|
|
|
self.topic = ''
|
2015-07-07 04:00:20 +02:00
|
|
|
self.ts = int(time.time())
|
2016-03-20 01:25:04 +01:00
|
|
|
self.prefixmodes = {'op': set(), 'halfop': set(), 'voice': set(),
|
|
|
|
'owner': set(), 'admin': set()}
|
2017-08-25 22:53:45 +02:00
|
|
|
self._irc = irc
|
2015-07-05 21:48:39 +02:00
|
|
|
|
2015-12-07 02:40:13 +01:00
|
|
|
# Determines whether a topic has been set here or not. Protocol modules
|
|
|
|
# should set this.
|
|
|
|
self.topicset = False
|
|
|
|
|
2016-03-20 01:01:39 +01:00
|
|
|
# Saves the channel name (may be useful to plugins, etc.)
|
|
|
|
self.name = name
|
|
|
|
|
2015-06-07 08:04:11 +02:00
|
|
|
def __repr__(self):
|
2017-07-01 06:49:12 +02:00
|
|
|
return 'Channel(%s)' % self.name
|
2015-07-04 03:07:01 +02:00
|
|
|
|
2017-07-01 06:34:08 +02:00
|
|
|
def remove_user(self, target):
|
2015-12-07 02:40:13 +01:00
|
|
|
"""Removes a user from a channel."""
|
2015-07-05 21:48:39 +02:00
|
|
|
for s in self.prefixmodes.values():
|
|
|
|
s.discard(target)
|
|
|
|
self.users.discard(target)
|
2017-07-01 06:34:08 +02:00
|
|
|
removeuser = remove_user
|
2015-07-05 21:48:39 +02:00
|
|
|
|
2017-07-01 06:34:08 +02:00
|
|
|
def is_voice(self, uid):
|
2016-03-20 01:32:32 +01:00
|
|
|
"""Returns whether the given user is voice in the channel."""
|
|
|
|
return uid in self.prefixmodes['voice']
|
|
|
|
|
2017-07-01 06:34:08 +02:00
|
|
|
def is_halfop(self, uid):
|
2016-03-20 01:32:32 +01:00
|
|
|
"""Returns whether the given user is halfop in the channel."""
|
|
|
|
return uid in self.prefixmodes['halfop']
|
|
|
|
|
2017-07-01 06:34:08 +02:00
|
|
|
def is_op(self, uid):
|
2016-03-20 01:32:32 +01:00
|
|
|
"""Returns whether the given user is op in the channel."""
|
|
|
|
return uid in self.prefixmodes['op']
|
|
|
|
|
2017-07-01 06:34:08 +02:00
|
|
|
def is_admin(self, uid):
|
2016-03-20 01:32:32 +01:00
|
|
|
"""Returns whether the given user is admin (&) in the channel."""
|
|
|
|
return uid in self.prefixmodes['admin']
|
|
|
|
|
2017-07-01 06:34:08 +02:00
|
|
|
def is_owner(self, uid):
|
2016-03-20 01:32:32 +01:00
|
|
|
"""Returns whether the given user is owner (~) in the channel."""
|
|
|
|
return uid in self.prefixmodes['owner']
|
|
|
|
|
2017-07-01 06:34:08 +02:00
|
|
|
def is_voice_plus(self, uid):
|
2016-03-20 01:37:38 +01:00
|
|
|
"""Returns whether the given user is voice or above in the channel."""
|
|
|
|
# If the user has any prefix mode, it has to be voice or greater.
|
|
|
|
return bool(self.getPrefixModes(uid))
|
|
|
|
|
2017-07-01 06:34:08 +02:00
|
|
|
def is_halfop_plus(self, uid):
|
2016-03-20 01:37:38 +01:00
|
|
|
"""Returns whether the given user is halfop or above in the channel."""
|
|
|
|
for mode in ('halfop', 'op', 'admin', 'owner'):
|
|
|
|
if uid in self.prefixmodes[mode]:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2017-07-01 06:34:08 +02:00
|
|
|
def is_op_plus(self, uid):
|
2016-03-20 01:37:38 +01:00
|
|
|
"""Returns whether the given user is op or above in the channel."""
|
|
|
|
for mode in ('op', 'admin', 'owner'):
|
|
|
|
if uid in self.prefixmodes[mode]:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2016-07-12 00:21:17 +02:00
|
|
|
@staticmethod
|
2017-07-01 06:34:08 +02:00
|
|
|
def sort_prefixes(key):
|
2016-07-12 00:21:17 +02:00
|
|
|
"""
|
2017-10-15 11:16:18 +02:00
|
|
|
Returns a numeric value for a named prefix mode: higher ranks have lower values
|
|
|
|
(sorted first), and lower ranks have higher values (sorted last).
|
|
|
|
|
|
|
|
This function essentially implements a sorted() key function for named prefix modes.
|
2016-07-12 00:21:17 +02:00
|
|
|
"""
|
2017-10-15 10:28:21 +02:00
|
|
|
values = {'owner': 0, 'admin': 100, 'op': 200, 'halfop': 300, 'voice': 500}
|
2016-07-12 00:21:17 +02:00
|
|
|
|
2017-10-15 10:28:21 +02:00
|
|
|
# Default to highest value (1000) for unknown modes, should they appear.
|
2016-07-12 00:21:17 +02:00
|
|
|
return values.get(key, 1000)
|
|
|
|
|
2017-07-01 06:34:08 +02:00
|
|
|
def get_prefix_modes(self, uid, prefixmodes=None):
|
2017-10-15 11:16:18 +02:00
|
|
|
"""
|
|
|
|
Returns a list of all named prefix modes the user has in the channel, in
|
2017-12-07 21:06:38 +01:00
|
|
|
decreasing order from owner to voice.
|
2016-03-20 01:54:18 +01:00
|
|
|
|
|
|
|
Optionally, a prefixmodes argument can be given to look at an earlier state of
|
|
|
|
the channel's prefix modes mapping, e.g. for checking the op status of a mode
|
|
|
|
setter before their modes are processed and added to the channel state.
|
|
|
|
"""
|
2016-03-20 01:32:32 +01:00
|
|
|
|
|
|
|
if uid not in self.users:
|
2016-03-20 02:00:44 +01:00
|
|
|
raise KeyError("User %s does not exist or is not in the channel" % uid)
|
2016-03-20 01:32:32 +01:00
|
|
|
|
|
|
|
result = []
|
2016-03-20 01:54:18 +01:00
|
|
|
prefixmodes = prefixmodes or self.prefixmodes
|
2016-03-20 01:32:32 +01:00
|
|
|
|
2016-03-20 01:54:18 +01:00
|
|
|
for mode, modelist in prefixmodes.items():
|
2016-03-20 01:32:32 +01:00
|
|
|
if uid in modelist:
|
|
|
|
result.append(mode)
|
|
|
|
|
2017-07-01 06:49:12 +02:00
|
|
|
return sorted(result, key=self.sort_prefixes)
|
2017-06-16 06:54:40 +02:00
|
|
|
IrcChannel = Channel
|
2017-08-29 04:42:10 +02:00
|
|
|
|
|
|
|
class PUIDGenerator():
|
|
|
|
"""
|
|
|
|
Pseudo UID Generator module, using a prefix and a simple counter.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, prefix, start=0):
|
|
|
|
self.prefix = prefix
|
|
|
|
self.counter = start
|
|
|
|
|
|
|
|
def next_uid(self, prefix=''):
|
|
|
|
"""
|
|
|
|
Generates the next PUID.
|
|
|
|
"""
|
|
|
|
uid = '%s@%s' % (prefix or self.prefix, self.counter)
|
|
|
|
self.counter += 1
|
|
|
|
return uid
|
|
|
|
next_sid = next_uid
|