2015-12-07 02:40:13 +01:00
"""
classes . py - Base classes for PyLink IRC Services .
This module contains the base classes used by PyLink , including threaded IRC
connections and objects used to represent IRC servers , users , and channels .
Here be dragons .
"""
2019-07-15 00:12:29 +02:00
import collections
import collections . abc
import functools
2015-08-26 05:37:15 +02:00
import hashlib
2017-02-25 07:27:11 +01:00
import ipaddress
2017-04-01 02:41:56 +02:00
import queue
2017-08-07 05:02:09 +02:00
import re
2019-07-15 00:12:29 +02:00
import socket
import ssl
import string
2018-05-11 23:38:21 +02:00
import textwrap
2019-07-15 00:12:29 +02:00
import threading
import time
2015-07-08 03:07:20 +02:00
2019-07-15 00:12:29 +02:00
from . import __version__ , conf , selectdriver , structures , utils , world
2020-06-19 00:47:20 +02:00
from . log import log , PyLinkChannelLogger
2017-08-22 07:20:20 +02:00
from . utils import ProtocolError # Compatibility with PyLink 1.x
2015-08-26 05:37:15 +02:00
2020-06-19 00:47:20 +02:00
__all__ = [ ' ChannelState ' , ' User ' , ' UserMapping ' , ' PyLinkNetworkCore ' ,
' PyLinkNetworkCoreWithUtils ' , ' IRCNetwork ' , ' Server ' , ' Channel ' ,
2021-06-05 09:39:18 +02:00
' PUIDGenerator ' , ' ProtocolError ' ]
2020-06-19 00:47:20 +02:00
2019-07-15 00:12:29 +02:00
QUEUE_FULL = queue . Full
2015-08-26 05:37:15 +02:00
### Internal classes (users, servers, channels)
2017-08-22 06:31:50 +02:00
class ChannelState ( structures . IRCCaseInsensitiveDict ) :
2017-08-07 02:52:52 +02:00
"""
A dictionary storing channels case insensitively . Channel objects are initialized on access .
"""
def __getitem__ ( self , key ) :
key = self . _keymangle ( key )
if key not in self . _data :
log . debug ( ' ( %s ) ChannelState: creating new channel %s in memory ' , self . _irc . name , key )
2017-08-25 22:53:45 +02:00
self . _data [ key ] = newchan = Channel ( self . _irc , key )
2017-08-07 02:52:52 +02:00
return newchan
return self . _data [ key ]
2018-05-26 10:00:04 +02:00
class TSObject ( ) :
""" Base class for classes containing a type-normalized timestamp. """
def __init__ ( self , * args , * * kwargs ) :
self . _ts = int ( time . time ( ) )
2018-03-24 08:10:00 +01:00
2018-05-26 10:00:04 +02:00
@property
def ts ( self ) :
return self . _ts
@ts.setter
def ts ( self , value ) :
if ( not isinstance ( value , int ) ) and ( not isinstance ( value , float ) ) :
log . warning ( ' TSObject: Got bad type for TS, converting from %s to int ' ,
type ( value ) , stack_info = True )
value = int ( value )
self . _ts = value
class User ( TSObject ) :
2018-03-24 08:10:00 +01:00
""" PyLink IRC user class. """
def __init__ ( self , irc , nick , ts , uid , server , ident = ' null ' , host = ' null ' ,
realname = ' PyLink dummy client ' , realhost = ' null ' ,
ip = ' 0.0.0.0 ' , manipulatable = False , opertype = ' IRC Operator ' ) :
2018-05-26 10:00:04 +02:00
super ( ) . __init__ ( )
2018-03-24 08:10:00 +01:00
self . _nick = nick
self . lower_nick = irc . to_lower ( nick )
self . ts = ts
self . uid = uid
self . ident = ident
self . host = host
self . realhost = realhost
self . ip = ip
self . realname = realname
self . modes = set ( ) # Tracks user modes
self . server = server
self . _irc = irc
# Tracks PyLink identification status
self . account = ' '
# Tracks oper type (for display only)
self . opertype = opertype
# Tracks external services identification status
self . services_account = ' '
# Tracks channels the user is in
self . channels = structures . IRCCaseInsensitiveSet ( self . _irc )
# Tracks away message status
self . away = ' '
# This sets whether the client should be marked as manipulatable.
# Plugins like bots.py's commands should take caution against
# manipulating these "protected" clients, to prevent desyncs and such.
# For "serious" service clients, this should always be False.
self . manipulatable = manipulatable
# Cloaked host for IRCds that use it
self . cloaked_host = None
# Stores service bot name if applicable
self . service = None
2020-10-19 22:58:45 +02:00
# Whether the user is using SSL/TLS (None = unknown)
self . ssl = None
2018-03-24 08:10:00 +01:00
@property
def nick ( self ) :
return self . _nick
@nick.setter
def nick ( self , newnick ) :
oldnick = self . lower_nick
self . _nick = newnick
self . lower_nick = self . _irc . to_lower ( newnick )
# Update the irc.users bynick index:
if oldnick in self . _irc . users . bynick :
# Remove existing value -> key mappings.
self . _irc . users . bynick [ oldnick ] . remove ( self . uid )
# Remove now-empty keys as well.
if not self . _irc . users . bynick [ oldnick ] :
del self . _irc . users . bynick [ oldnick ]
# Update the new nick.
self . _irc . users . bynick . setdefault ( self . lower_nick , [ ] ) . append ( self . uid )
2018-05-26 11:12:38 +02:00
def get_fields ( self ) :
"""
Returns all template / substitution - friendly fields for the User object in a read - only dictionary .
"""
fields = self . __dict__ . copy ( )
# These don't really make sense in text substitutions
2019-03-29 04:14:04 +01:00
for field in ( ' manipulatable ' , ' _irc ' , ' channels ' , ' modes ' ) :
2018-05-26 11:12:38 +02:00
del fields [ field ]
# Swap SID and server name for convenience
fields [ ' sid ' ] = self . server
try :
fields [ ' server ' ] = self . _irc . get_friendly_name ( self . server )
except KeyError :
pass # Keep it as is (i.e. as the SID) if grabbing the server name fails
# Network name
fields [ ' netname ' ] = self . _irc . name
# Add the nick attribute; this isn't in __dict__ because it's a property
fields [ ' nick ' ] = self . _nick
return fields
2018-03-24 08:10:00 +01:00
def __repr__ ( self ) :
return ' User( %s / %s ) ' % ( self . uid , self . nick )
IrcUser = User
# Bidirectional dict based off https://stackoverflow.com/a/21894086
class UserMapping ( collections . abc . MutableMapping , structures . CopyWrapper ) :
"""
A mapping storing User objects by UID , as well as UIDs by nick via
the ' bynick ' attribute
"""
2018-06-09 00:54:06 +02:00
def __init__ ( self , irc , data = None ) :
2018-03-24 08:10:00 +01:00
if data is not None :
assert isinstance ( data , dict )
self . _data = data
else :
self . _data = { }
self . bynick = collections . defaultdict ( list )
2018-06-09 00:54:06 +02:00
self . _irc = irc
2018-03-24 08:10:00 +01:00
def __getitem__ ( self , key ) :
return self . _data [ key ]
def __setitem__ ( self , key , userobj ) :
2018-03-30 19:46:49 +02:00
assert hasattr ( userobj , ' lower_nick ' ) , " Cannot add object without lower_nick attribute to UserMapping "
2018-03-24 08:10:00 +01:00
if key in self . _data :
2018-06-09 00:54:06 +02:00
log . warning ( ' ( %s ) Attempting to replace User object for %r : %r -> %r ' , self . _irc . name ,
2018-03-24 08:10:00 +01:00
key , self . _data . get ( key ) , userobj )
self . _data [ key ] = userobj
self . bynick . setdefault ( userobj . lower_nick , [ ] ) . append ( key )
def __delitem__ ( self , key ) :
# Remove this entry from the bynick index
if self [ key ] . lower_nick in self . bynick :
self . bynick [ self [ key ] . lower_nick ] . remove ( key )
if not self . bynick [ self [ key ] . lower_nick ] :
del self . bynick [ self [ key ] . lower_nick ]
del self . _data [ key ]
# Generic container methods. XXX: consider abstracting this out in structures?
def __repr__ ( self ) :
return " %s ( %s ) " % ( self . __class__ . __name__ , self . _data )
def __iter__ ( self ) :
return iter ( self . _data )
def __len__ ( self ) :
return len ( self . _data )
def __contains__ ( self , key ) :
return self . _data . __contains__ ( key )
def __copy__ ( self ) :
2018-06-09 19:44:36 +02:00
return self . __class__ ( self . _irc , data = self . _data . copy ( ) )
2018-03-24 08:10:00 +01:00
2018-03-17 23:49:48 +01:00
class PyLinkNetworkCore ( structures . CamelCaseToSnakeCase ) :
2015-12-07 02:40:13 +01:00
""" Base IRC object for PyLink. """
2017-06-25 08:27:24 +02:00
def __init__ ( self , netname ) :
2017-02-25 07:28:26 +01:00
2016-01-31 08:33:03 +01:00
self . loghandlers = [ ]
2016-04-02 21:10:56 +02:00
self . name = netname
2017-06-25 08:27:24 +02:00
self . conf = conf . conf
2019-02-20 22:21:08 +01:00
if not hasattr ( self , ' sid ' ) :
self . sid = None
# serverdata may be overridden as a property on some protocols
if netname in conf . conf [ ' servers ' ] and not hasattr ( self , ' serverdata ' ) :
self . serverdata = conf . conf [ ' servers ' ] [ netname ]
2018-06-12 08:43:57 +02:00
2017-06-28 01:15:37 +02:00
self . protoname = self . __class__ . __module__ . split ( ' . ' ) [ - 1 ] # Remove leading pylinkirc.protocols.
2017-06-25 08:27:24 +02:00
# Protocol stuff
self . casemapping = ' rfc1459 '
self . hook_map = { }
# Lists required conf keys for the server block.
self . conf_keys = { ' ip ' , ' port ' , ' hostname ' , ' sid ' , ' sidrange ' , ' protocol ' , ' sendpass ' ,
' recvpass ' }
# Defines a set of PyLink protocol capabilities
self . protocol_caps = set ( )
2017-06-02 16:30:20 +02:00
# These options depend on self.serverdata from above to be set.
self . encoding = None
2015-12-07 02:40:13 +01:00
self . connected = threading . Event ( )
2017-08-06 07:16:39 +02:00
self . _aborted = threading . Event ( )
2018-04-08 06:46:05 +02:00
self . _aborted_send = threading . Event ( )
2017-07-14 14:22:05 +02:00
self . _reply_lock = threading . RLock ( )
2015-12-07 02:40:13 +01:00
2017-03-11 09:21:30 +01:00
# Sets the multiplier for autoconnect delay (grows with time).
self . autoconnect_active_multiplier = 1
2017-06-17 01:49:45 +02:00
self . was_successful = False
2017-07-14 14:22:05 +02:00
self . _init_vars ( )
2016-03-25 22:54:29 +01:00
2017-06-03 08:17:14 +02:00
def log_setup ( self ) :
2016-01-23 22:13:38 +01:00
"""
Initializes any channel loggers defined for the current network .
"""
try :
2017-03-05 09:00:11 +01:00
channels = conf . conf [ ' logging ' ] [ ' channels ' ] [ self . name ]
2017-09-09 04:04:49 +02:00
except ( KeyError , TypeError ) : # Not set up; just ignore.
2016-01-23 22:13:38 +01:00
return
log . debug ( ' ( %s ) Setting up channel logging to channels %r ' , self . name ,
channels )
2016-01-31 08:33:03 +01:00
2017-09-09 04:04:49 +02:00
# Only create handlers if they haven't already been set up.
2016-01-31 08:33:03 +01:00
if not self . loghandlers :
2017-09-09 04:04:49 +02:00
if not isinstance ( channels , dict ) :
log . warning ( ' ( %s ) Got invalid channel logging configuration %r ; are your indentation '
' and block commenting consistent? ' , self . name , channels )
return
2016-01-31 08:33:03 +01:00
for channel , chandata in channels . items ( ) :
# Fetch the log level for this channel block.
level = None
2017-09-09 04:04:49 +02:00
if isinstance ( chandata , dict ) :
2016-01-31 08:33:03 +01:00
level = chandata . get ( ' loglevel ' )
2017-09-09 04:04:49 +02:00
else :
log . warning ( ' ( %s ) Got invalid channel logging pair %r : %r ; are your indentation '
2020-06-19 03:57:37 +02:00
' and block commenting consistent? ' , self . name , channel , chandata )
2016-01-31 08:33:03 +01:00
handler = PyLinkChannelLogger ( self , channel , level = level )
self . loghandlers . append ( handler )
log . addHandler ( handler )
2016-01-23 22:13:38 +01:00
2017-07-14 14:22:05 +02:00
def _init_vars ( self ) :
2015-12-07 02:40:13 +01:00
"""
( Re ) sets an IRC object to its default state . This should be called when
an IRC object is first created , and on every reconnection to a network .
"""
2017-06-02 16:30:20 +02:00
self . encoding = self . serverdata . get ( ' encoding ' ) or ' utf-8 '
2015-10-03 08:07:57 +02:00
2017-06-28 01:05:46 +02:00
# Tracks the main PyLink client's UID.
2015-08-26 05:37:15 +02:00
self . pseudoclient = None
2016-08-22 01:46:57 +02:00
2016-07-01 03:22:45 +02:00
# Internal variable to set the place and caller of the last command (in PM
2015-09-26 18:05:44 +02:00
# or in a channel), used by fantasy command support.
self . called_by = None
2016-07-01 03:22:45 +02:00
self . called_in = None
2015-09-26 18:05:44 +02:00
2015-12-07 02:40:13 +01:00
# Intialize the server, channel, and user indexes to be populated by
2017-08-25 11:11:48 +02:00
# our protocol module.
2016-07-29 06:49:16 +02:00
self . servers = { }
2018-06-09 00:54:06 +02:00
self . users = UserMapping ( self )
2017-08-25 11:11:48 +02:00
# Two versions of the channels index exist in PyLink 2.0, and they are joined together
# - irc._channels which implicitly creates channels on access (mostly used
# in protocol modules)
# - irc.channels which does not (recommended for use by plugins)
self . _channels = ChannelState ( self )
self . channels = structures . IRCCaseInsensitiveDict ( self , data = self . _channels . _data )
2015-12-07 02:40:13 +01:00
# This sets the list of supported channel and user modes: the default
# RFC1459 modes are implied. Named modes are used here to make
# protocol-independent code easier to write, as mode chars vary by
# IRCd.
# Protocol modules should add to and/or replace this with what their
# protocol supports. This can be a hardcoded list or something
# negotiated on connect, depending on the nature of their protocol.
2015-08-26 05:37:15 +02:00
self . cmodes = { ' op ' : ' o ' , ' secret ' : ' s ' , ' private ' : ' p ' ,
' noextmsg ' : ' n ' , ' moderated ' : ' m ' , ' inviteonly ' : ' i ' ,
' topiclock ' : ' t ' , ' limit ' : ' l ' , ' ban ' : ' b ' ,
' voice ' : ' v ' , ' key ' : ' k ' ,
2015-12-07 02:40:13 +01:00
# This fills in the type of mode each mode character is.
# A-type modes are list modes (i.e. bans, ban exceptions, etc.),
# B-type modes require an argument to both set and unset,
# but there can only be one value at a time
# (i.e. cmode +k).
# C-type modes require an argument to set but not to unset
# (one sets "+l limit" and # "-l"),
# and D-type modes take no arguments at all.
2015-08-26 05:37:15 +02:00
' *A ' : ' b ' ,
' *B ' : ' k ' ,
' *C ' : ' l ' ,
2017-07-30 18:43:19 +02:00
' *D ' : ' imnpst ' }
2015-08-26 05:37:15 +02:00
self . umodes = { ' invisible ' : ' i ' , ' snomask ' : ' s ' , ' wallops ' : ' w ' ,
' oper ' : ' o ' ,
2016-08-12 03:11:41 +02:00
' *A ' : ' ' , ' *B ' : ' ' , ' *C ' : ' ' , ' *D ' : ' iosw ' }
2015-08-26 05:37:15 +02:00
2017-08-06 06:52:34 +02:00
# Acting extbans such as +b m:n!u@h on InspIRCd
self . extbans_acting = { }
2017-08-24 06:18:44 +02:00
# Matching extbans such as R:account on InspIRCd and $a:account on TS6.
2017-08-24 06:47:30 +02:00
self . extbans_matching = { }
2017-08-06 06:52:34 +02:00
2015-08-26 05:37:15 +02:00
# This max nick length starts off as the config value, but may be
# overwritten later by the protocol module if such information is
2016-06-28 07:39:18 +02:00
# received. It defaults to 30.
self . maxnicklen = self . serverdata . get ( ' maxnicklen ' , 30 )
2015-12-07 02:40:13 +01:00
# Defines a list of supported prefix modes.
2015-08-26 05:37:15 +02:00
self . prefixmodes = { ' o ' : ' @ ' , ' v ' : ' + ' }
2015-12-07 02:40:13 +01:00
# Defines the uplink SID (to be filled in by protocol module).
2015-08-26 05:37:15 +02:00
self . uplink = None
self . start_ts = int ( time . time ( ) )
2016-01-23 22:13:38 +01:00
# Set up channel logging for the network
2017-06-03 08:17:14 +02:00
self . log_setup ( )
2016-01-23 22:13:38 +01:00
2017-06-16 06:31:03 +02:00
def __repr__ ( self ) :
2017-07-05 08:32:30 +02:00
return " < %s object for network %r > " % ( self . __class__ . __name__ , self . name )
2016-07-29 06:34:00 +02:00
2017-08-07 06:49:41 +02:00
## Stubs
def validate_server_conf ( self ) :
return
2017-08-07 06:49:52 +02:00
def connect ( self ) :
raise NotImplementedError
def disconnect ( self ) :
raise NotImplementedError
2017-08-07 06:49:41 +02:00
## General utility functions
2017-06-16 06:31:03 +02:00
def call_hooks ( self , hook_args ) :
""" Calls a hook function with the given hook args. """
numeric , command , parsed_args = hook_args
# Always make sure TS is sent.
if ' ts ' not in parsed_args :
parsed_args [ ' ts ' ] = int ( time . time ( ) )
hook_cmd = command
2017-06-25 10:45:16 +02:00
hook_map = self . hook_map
2016-07-29 06:34:00 +02:00
2017-06-16 06:31:03 +02:00
# If the hook name is present in the protocol module's hook_map, then we
# should set the hook name to the name that points to instead.
# For example, plugins will read SETHOST as CHGHOST, EOS (end of sync)
# as ENDBURST, etc.
if command in hook_map :
hook_cmd = hook_map [ command ]
2015-12-07 02:40:13 +01:00
2017-06-16 06:31:03 +02:00
# However, individual handlers can also return a 'parse_as' key to send
# their payload to a different hook. An example of this is "/join 0"
# being interpreted as leaving all channels (PART).
hook_cmd = parsed_args . get ( ' parse_as ' ) or hook_cmd
2015-12-07 02:40:13 +01:00
2017-06-16 06:31:03 +02:00
log . debug ( ' ( %s ) Raw hook data: [ %r , %r , %r ] received from %s handler '
' (calling hook %s ) ' , self . name , numeric , hook_cmd , parsed_args ,
command , hook_cmd )
2017-01-02 21:30:24 +01:00
2017-06-16 06:31:03 +02:00
# Iterate over registered hook functions, catching errors accordingly.
2017-09-23 22:39:43 +02:00
for hook_pair in world . hooks [ hook_cmd ] . copy ( ) :
2017-09-03 06:15:59 +02:00
hook_func = hook_pair [ 1 ]
2017-06-16 06:31:03 +02:00
try :
log . debug ( ' ( %s ) Calling hook function %s from plugin " %s " ' , self . name ,
hook_func , hook_func . __module__ )
2018-02-19 07:42:39 +01:00
retcode = hook_func ( self , numeric , command , parsed_args )
if retcode is False :
log . debug ( ' ( %s ) Stopping hook loop for %r (command= %r ) ' , self . name ,
hook_func , command )
break
2017-06-16 06:31:03 +02:00
except Exception :
# We don't want plugins to crash our servers...
log . exception ( ' ( %s ) Unhandled exception caught in hook %r from plugin " %s " ' ,
self . name , hook_func , hook_func . __module__ )
log . error ( ' ( %s ) The offending hook data was: %s ' , self . name ,
hook_args )
continue
2015-12-07 02:40:13 +01:00
2017-06-16 06:31:03 +02:00
def call_command ( self , source , text ) :
"""
Calls a PyLink bot command . source is the caller ' s UID, and text is the
full , unparsed text of the message .
"""
world . services [ ' pylink ' ] . call_cmd ( self , source , text )
2016-06-11 20:29:11 +02:00
2018-05-11 23:38:21 +02:00
def msg ( self , target , text , notice = None , source = None , loopback = True , wrap = True ) :
2017-06-16 06:31:03 +02:00
""" Handy function to send messages/notices to clients. Source
is optional , and defaults to the main PyLink client if not specified . """
if not text :
return
2016-07-29 07:49:05 +02:00
2017-06-16 06:31:03 +02:00
if not ( source or self . pseudoclient ) :
# No explicit source set and our main client wasn't available; abort.
return
source = source or self . pseudoclient . uid
2016-07-29 07:49:05 +02:00
2018-05-11 23:38:21 +02:00
def _msg ( text ) :
if notice :
self . notice ( source , target , text )
cmd = ' PYLINK_SELF_NOTICE '
else :
self . message ( source , target , text )
cmd = ' PYLINK_SELF_PRIVMSG '
2016-07-29 07:49:05 +02:00
2018-05-11 23:38:21 +02:00
# Determines whether we should send a hook for this msg(), to forward things like services
2017-06-16 06:31:03 +02:00
# replies across relay.
2018-05-11 23:38:21 +02:00
if loopback :
self . call_hooks ( [ source , cmd , { ' target ' : target , ' text ' : text } ] )
# Optionally wrap the text output.
if wrap :
for line in self . wrap_message ( source , target , text ) :
_msg ( line )
else :
_msg ( text )
2015-12-07 02:40:13 +01:00
2017-06-16 06:31:03 +02:00
def _reply ( self , text , notice = None , source = None , private = None , force_privmsg_in_private = False ,
2018-05-11 23:38:21 +02:00
loopback = True , wrap = True ) :
2017-06-16 06:31:03 +02:00
"""
Core of the reply ( ) function - replies to the last caller in the right context
( channel or PM ) .
"""
if private is None :
# Allow using private replies as the default, if no explicit setting was given.
2017-07-14 14:50:07 +02:00
private = conf . conf [ ' pylink ' ] . get ( " prefer_private_replies " )
2015-08-26 05:37:15 +02:00
2017-06-16 06:31:03 +02:00
# Private reply is enabled, or the caller was originally a PM
if private or ( self . called_in in self . users ) :
if not force_privmsg_in_private :
# For private replies, the default is to override the notice=True/False argument,
# and send replies as notices regardless. This is standard behaviour for most
# IRC services, but can be disabled if force_privmsg_in_private is given.
notice = True
target = self . called_by
else :
target = self . called_in
2016-01-31 08:04:13 +01:00
2018-05-11 23:38:21 +02:00
self . msg ( target , text , notice = notice , source = source , loopback = loopback , wrap = wrap )
2016-01-31 08:04:13 +01:00
2017-06-16 06:31:03 +02:00
def reply ( self , * args , * * kwargs ) :
"""
Replies to the last caller in the right context ( channel or PM ) .
2016-01-31 08:04:13 +01:00
2017-06-16 06:31:03 +02:00
This function wraps around _reply ( ) and can be monkey - patched in a thread - safe manner
to temporarily redirect plugin output to another target .
"""
2017-07-14 14:22:05 +02:00
with self . _reply_lock :
2017-06-16 06:31:03 +02:00
self . _reply ( * args , * * kwargs )
2015-08-26 05:37:15 +02:00
2017-06-16 06:31:03 +02:00
def error ( self , text , * * kwargs ) :
""" Replies with an error to the last caller in the right context (channel or PM). """
# This is a stub to alias error to reply
self . reply ( " Error: %s " % text , * * kwargs )
2016-08-22 01:46:57 +02:00
2017-08-07 06:49:41 +02:00
## Configuration-based lookup functions.
2017-06-16 06:31:03 +02:00
def version ( self ) :
"""
Returns a detailed version string including the PyLink daemon version ,
the protocol module in use , and the server hostname .
"""
fullversion = ' PyLink- %s . %s :[protocol: %s , encoding: %s ] ' % ( __version__ , self . hostname ( ) , self . protoname , self . encoding )
return fullversion
2016-08-22 01:46:57 +02:00
2017-06-16 06:31:03 +02:00
def hostname ( self ) :
"""
Returns the server hostname used by PyLink on the given server .
"""
return self . serverdata . get ( ' hostname ' , world . fallback_hostname )
2016-07-29 06:49:16 +02:00
2017-06-16 06:31:03 +02:00
def get_full_network_name ( self ) :
"""
Returns the full network name ( as defined by the " netname " option ) , or the
short network name if that isn ' t defined.
"""
return self . serverdata . get ( ' netname ' , self . name )
2016-07-29 07:22:47 +02:00
2018-04-08 07:20:35 +02:00
def get_service_option ( self , servicename , option , default = None , global_option = None ) :
"""
Returns the value of the requested service bot option on the current network , or the
global value if it is not set for this network . This function queries and returns :
1 ) If present , the value of the config option servers : : < NETNAME > : : < SERVICENAME > _ < OPTION >
2 ) If present , the value of the config option < SERVICENAME > : : < GLOBAL_OPTION > , where
< GLOBAL_OPTION > is either the ' global_option ' keyword argument or < OPTION > .
3 ) The default value given in the ' keyword ' argument .
While service bot and config option names can technically be uppercase or mixed case ,
the convention is to define them in all lowercase characters .
"""
netopt = self . serverdata . get ( ' %s _ %s ' % ( servicename , option ) )
if netopt is not None :
return netopt
if global_option is not None :
option = global_option
globalopt = conf . conf . get ( servicename , { } ) . get ( option )
if globalopt is not None :
return globalopt
return default
2017-08-07 06:49:41 +02:00
2019-10-11 03:49:07 +02:00
def get_service_options ( self , servicename : str , option : str , itertype : type , global_option = None ) :
"""
Returns a merged copy of the requested service bot option . This includes :
1 ) If present , the value of the config option servers : : < NETNAME > : : < SERVICENAME > _ < OPTION > ( netopt )
2 ) If present , the value of the config option < SERVICENAME > : : < GLOBAL_OPTION > , where
< GLOBAL_OPTION > is either the ' global_option ' keyword value or < OPTION > ( globalopt )
For itertype , the following types are allowed :
- list : items are combined as globalopt + netopt
- dict : items are combined as { * * globalopt , * * netopt }
"""
netopt = self . serverdata . get ( ' %s _ %s ' % ( servicename , option ) ) or itertype ( )
globalopt = conf . conf . get ( servicename , { } ) . get ( global_option or option ) or itertype ( )
return utils . merge_iterables ( globalopt , netopt )
2017-08-07 06:49:41 +02:00
def has_cap ( self , capab ) :
"""
Returns whether this protocol module instance has the requested capability .
"""
return capab . lower ( ) in self . protocol_caps
## Shared helper functions
2017-06-17 01:49:45 +02:00
def _pre_connect ( self ) :
2018-01-21 22:20:42 +01:00
"""
Implements triggers called before a network connects .
"""
2018-04-08 06:46:05 +02:00
self . _aborted_send . clear ( )
2017-08-06 07:16:39 +02:00
self . _aborted . clear ( )
2017-07-14 14:22:05 +02:00
self . _init_vars ( )
2017-06-17 01:49:45 +02:00
try :
2017-07-11 11:22:01 +02:00
self . validate_server_conf ( )
2017-07-13 07:39:28 +02:00
except Exception as e :
log . error ( " ( %s ) Configuration error: %s " , self . name , e )
2017-06-17 01:49:45 +02:00
raise
def _run_autoconnect ( self ) :
""" Blocks for the autoconnect time and returns True if autoconnect is enabled. """
2017-08-31 22:40:11 +02:00
if world . shutting_down . is_set ( ) :
log . debug ( ' ( %s ) _run_autoconnect: aborting autoconnect attempt since we are shutting down. ' , self . name )
return
2017-06-17 01:49:45 +02:00
autoconnect = self . serverdata . get ( ' autoconnect ' )
# Sets the autoconnect growth multiplier (e.g. a value of 2 multiplies the autoconnect
# time by 2 on every failure, etc.)
autoconnect_multiplier = self . serverdata . get ( ' autoconnect_multiplier ' , 2 )
autoconnect_max = self . serverdata . get ( ' autoconnect_max ' , 1800 )
# These values must at least be 1.
autoconnect_multiplier = max ( autoconnect_multiplier , 1 )
autoconnect_max = max ( autoconnect_max , 1 )
log . debug ( ' ( %s ) _run_autoconnect: Autoconnect delay set to %s seconds. ' , self . name , autoconnect )
if autoconnect is not None and autoconnect > = 1 :
log . debug ( ' ( %s ) _run_autoconnect: Multiplying autoconnect delay %s by %s . ' , self . name , autoconnect , self . autoconnect_active_multiplier )
autoconnect * = self . autoconnect_active_multiplier
# Add a cap on the max. autoconnect delay, so that we don't go on forever...
autoconnect = min ( autoconnect , autoconnect_max )
log . info ( ' ( %s ) _run_autoconnect: Going to auto-reconnect in %s seconds. ' , self . name , autoconnect )
2017-08-06 07:16:39 +02:00
# Continue when either self._aborted is set or the autoconnect time passes.
2017-06-17 01:49:45 +02:00
# Compared to time.sleep(), this allows us to stop connections quicker if we
# break while while for autoconnect.
2017-08-06 07:16:39 +02:00
self . _aborted . clear ( )
self . _aborted . wait ( autoconnect )
2017-06-17 01:49:45 +02:00
# Store in the local state what the autoconnect multiplier currently is.
self . autoconnect_active_multiplier * = autoconnect_multiplier
if self not in world . networkobjects . values ( ) :
log . debug ( ' ( %s ) _run_autoconnect: Stopping stale connect loop ' , self . name )
return
return True
else :
2017-07-03 07:26:28 +02:00
log . debug ( ' ( %s ) _run_autoconnect: Stopping connect loop (autoconnect value %r is < 1). ' , self . name , autoconnect )
2017-06-17 01:49:45 +02:00
return
def _pre_disconnect ( self ) :
2018-01-21 22:20:42 +01:00
"""
Implements triggers called before a network disconnects .
"""
2017-08-06 07:16:39 +02:00
self . _aborted . set ( )
2017-06-17 01:49:45 +02:00
self . was_successful = self . connected . is_set ( )
log . debug ( ' ( %s ) _pre_disconnect: got %s for was_successful state ' , self . name , self . was_successful )
log . debug ( ' ( %s ) _pre_disconnect: Clearing self.connected state. ' , self . name )
self . connected . clear ( )
log . debug ( ' ( %s ) _pre_disconnect: Removing channel logging handlers due to disconnect. ' , self . name )
while self . loghandlers :
log . removeHandler ( self . loghandlers . pop ( ) )
def _post_disconnect ( self ) :
2018-01-21 22:20:42 +01:00
"""
Implements triggers called after a network disconnects .
"""
2017-06-17 01:49:45 +02:00
# Internal hook signifying that a network has disconnected.
self . call_hooks ( [ None , ' PYLINK_DISCONNECT ' , { ' was_successful ' : self . was_successful } ] )
2018-03-23 06:03:08 +01:00
# Clear the to_lower cache.
self . to_lower . cache_clear ( )
2017-06-28 01:21:30 +02:00
def _remove_client ( self , numeric ) :
2019-07-01 22:36:53 +02:00
"""
Internal function to remove a client from our internal state .
If the removal was successful , return the User object for the given numeric ( UID ) . """
2017-06-25 11:03:12 +02:00
for c , v in self . channels . copy ( ) . items ( ) :
2017-07-01 06:49:12 +02:00
v . remove_user ( numeric )
2017-06-25 08:27:24 +02:00
# Clear empty non-permanent channels.
2017-06-25 11:03:12 +02:00
if not ( self . channels [ c ] . users or ( ( self . cmodes . get ( ' permanent ' ) , None ) in self . channels [ c ] . modes ) ) :
del self . channels [ c ]
2017-06-25 08:27:24 +02:00
2017-06-25 11:03:12 +02:00
sid = self . get_server ( numeric )
2018-04-21 04:37:22 +02:00
try :
2019-07-01 22:36:53 +02:00
userobj = self . users [ numeric ]
2018-04-21 04:37:22 +02:00
del self . users [ numeric ]
2018-04-28 05:48:38 +02:00
self . servers [ sid ] . users . discard ( numeric )
2018-04-21 04:37:22 +02:00
except KeyError :
2018-05-21 09:28:16 +02:00
log . debug ( ' ( %s ) Skipping removing client %s that no longer exists ' , self . name , numeric ,
exc_info = True )
else :
log . debug ( ' ( %s ) Removing client %s from user + server state ' , self . name , numeric )
2019-07-01 22:36:53 +02:00
return userobj
2017-06-25 08:27:24 +02:00
2017-08-07 06:49:41 +02:00
## State checking functions
2019-06-16 19:30:16 +02:00
def nick_to_uid ( self , nick , multi = False , filterfunc = None ) :
""" Looks up the UID of a user with the given nick, or return None if no such nick exists.
If multi is given , return all matches for nick instead of just the last result . ( Return an empty list if no matches )
If filterfunc is given , filter matched users by the given function first . """
2017-06-28 01:17:28 +02:00
nick = self . to_lower ( nick )
2018-03-24 08:10:00 +01:00
uids = self . users . bynick . get ( nick , [ ] )
2019-06-16 19:30:16 +02:00
if filterfunc :
uids = list ( filter ( filterfunc , uids ) )
if multi :
return uids
else :
if len ( uids ) > 1 :
log . warning ( ' ( %s ) Multiple UIDs found for nick %r : %r ; using the last one! ' , self . name , nick , uids )
try :
return uids [ - 1 ]
except IndexError :
return None
2017-06-28 01:17:28 +02:00
2019-06-16 19:31:23 +02:00
def is_internal_client ( self , uid ) :
2017-06-28 01:17:28 +02:00
"""
2019-06-16 19:31:23 +02:00
Returns whether the given UID is a PyLink client .
This returns False if the numeric doesn ' t exist.
2017-06-28 01:17:28 +02:00
"""
2019-06-16 19:31:23 +02:00
sid = self . get_server ( uid )
2017-06-28 01:17:28 +02:00
if sid and self . servers [ sid ] . internal :
return True
return False
def is_internal_server ( self , sid ) :
""" Returns whether the given SID is an internal PyLink server. """
return ( sid in self . servers and self . servers [ sid ] . internal )
2019-06-16 19:31:23 +02:00
def get_server ( self , uid ) :
""" Finds the ID of the server a user is on. Return None if the user does not exist. """
userobj = self . users . get ( uid )
2017-06-28 01:17:28 +02:00
if userobj :
return userobj . server
def is_manipulatable_client ( self , uid ) :
"""
2019-06-16 19:31:23 +02:00
Returns whether the given client is marked manipulatable for interactions
such as force - JOIN .
2017-06-28 01:17:28 +02:00
"""
return self . is_internal_client ( uid ) and self . users [ uid ] . manipulatable
def get_service_bot ( self , uid ) :
"""
2019-06-16 19:31:23 +02:00
Checks whether the given UID exists and is a registered service bot .
If True , returns the corresponding ServiceBot object .
Otherwise , return False .
2017-06-28 01:17:28 +02:00
"""
userobj = self . users . get ( uid )
if not userobj :
return False
2017-08-31 04:29:26 +02:00
# Look for the "service" attribute in the User object,sname = userobj.service
# Warn if the service name we fetched isn't a registered service.
sname = userobj . service
2017-08-31 04:48:46 +02:00
if sname is not None and sname not in world . services . keys ( ) :
2017-08-31 04:29:26 +02:00
log . warning ( " ( %s ) User %s / %s had a service bot record to a service that doesn ' t "
" exist ( %s )! " , self . name , uid , userobj . nick , sname )
return world . services . get ( sname )
2017-06-25 08:27:24 +02:00
2017-08-26 02:05:53 +02:00
structures . _BLACKLISTED_COPY_TYPES . append ( PyLinkNetworkCore )
2017-06-16 06:31:03 +02:00
class PyLinkNetworkCoreWithUtils ( PyLinkNetworkCore ) :
2017-07-07 23:33:00 +02:00
2017-06-28 01:12:45 +02:00
def __init__ ( self , * args , * * kwargs ) :
super ( ) . __init__ ( * args , * * kwargs )
# Lock for updateTS to make sure only one thread can change the channel TS at one time.
self . _ts_lock = threading . Lock ( )
2018-03-23 06:03:08 +01:00
@functools.lru_cache ( maxsize = 8192 )
def to_lower ( self , text ) :
2019-07-01 23:00:26 +02:00
"""
Returns the lowercase representation of text . This respects IRC casemappings defined by the protocol module .
"""
2019-02-07 22:49:38 +01:00
if ( not text ) or ( not isinstance ( text , str ) ) :
2018-02-24 20:18:18 +01:00
return text
2018-03-23 06:03:08 +01:00
if self . casemapping == ' rfc1459 ' :
2017-06-16 06:31:03 +02:00
text = text . replace ( ' { ' , ' [ ' )
text = text . replace ( ' } ' , ' ] ' )
text = text . replace ( ' | ' , ' \\ ' )
text = text . replace ( ' ~ ' , ' ^ ' )
# Encode the text as bytes first, and then lowercase it so that only ASCII characters are
2017-07-07 23:33:00 +02:00
# changed. Unicode in channel names, etc. *is* case sensitive!
2019-02-20 22:22:01 +01:00
# Interesting, a quick emperical test found that this method is actually faster than str.translate()?!
2017-06-16 06:31:03 +02:00
return text . encode ( ) . lower ( ) . decode ( )
2016-07-29 06:49:16 +02:00
2017-08-29 05:01:28 +02:00
_NICK_REGEX = r ' ^[A-Za-z \ | \\ _ \ [ \ ] \ { \ } \ ^ \ `][A-Z0-9a-z \ - \ | \\ _ \ [ \ ] \ { \ } \ ^ \ `]*$ '
@classmethod
def is_nick ( cls , s , nicklen = None ) :
2019-07-01 23:00:26 +02:00
"""
Returns whether the string given is a valid nick .
Other platforms SHOULD redefine this if their definition of a valid nick is different . """
2017-08-29 05:01:28 +02:00
if nicklen and len ( s ) > nicklen :
return False
return bool ( re . match ( cls . _NICK_REGEX , s ) )
@staticmethod
2019-07-01 23:00:26 +02:00
def is_channel ( obj ) :
"""
Returns whether the item given is a valid channel ( for a mapping key ) .
For IRC , this checks if the item ' s name starts with a " # " .
Other platforms SHOULD redefine this if they track channels by some other format ( e . g . numerical IDs ) .
"""
return str ( obj ) . startswith ( ' # ' )
2017-08-29 05:01:28 +02:00
2019-09-11 04:31:57 +02:00
# Modified from https://stackoverflow.com/a/106223 (RFC 1123):
# - Allow hostnames that end in '.'
# - Require at least one '.' in the hostname
_HOSTNAME_RE = re . compile ( r ' ^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9 \ -]*[a-zA-Z0-9]) \ .)+ '
r ' ([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9 \ -]*[A-Za-z0-9])*$ ' )
2017-08-29 05:01:28 +02:00
@classmethod
2019-09-11 04:31:57 +02:00
def is_server_name ( cls , text ) :
2019-07-01 23:00:26 +02:00
""" Returns whether the string given is a valid server name. """
2019-09-11 04:31:57 +02:00
return bool ( cls . _HOSTNAME_RE . match ( text ) )
2017-08-29 05:01:28 +02:00
_HOSTMASK_RE = re . compile ( r ' ^ \ S+! \ S+@ \ S+$ ' )
@classmethod
def is_hostmask ( cls , text ) :
2019-07-01 23:00:26 +02:00
"""
Returns whether the given text is a valid hostmask ( nick ! user @host )
Other protocols may redefine this to meet their definition of hostmask
( i . e . some unique identifier for a user ) .
"""
2017-08-29 05:01:28 +02:00
# Band-aid patch here to prevent bad bans set by Janus forwarding people into invalid channels.
return bool ( cls . _HOSTMASK_RE . match ( text ) and ' # ' not in text )
2019-02-12 08:27:04 +01:00
# TODO: these wrappers really need to be standardized
def _get_SID ( self , sname ) :
""" Returns the SID of a server with the given name, if present. """
name = sname . lower ( )
if name in self . servers :
return name
for k , v in self . servers . items ( ) :
if v . name . lower ( ) == name :
return k
else :
return sname # Fall back to given text instead of None
def _get_UID ( self , target ) :
2019-06-24 04:45:29 +02:00
"""
2019-07-01 23:00:26 +02:00
Converts a nick argument to its matching UID . This differs from nick_to_uid ( )
in that it returns the original text instead of None if no matching nick is found .
2019-06-24 04:45:29 +02:00
Subclasses like Clientbot may override this to tweak the nick lookup behaviour ,
e . g . by filtering virtual clients out .
"""
2019-02-12 08:27:04 +01:00
if target in self . users :
return target
target = self . nick_to_uid ( target ) or target
return target
def _squit ( self , numeric , command , args ) :
""" Handles incoming SQUITs. """
split_server = self . _get_SID ( args [ 0 ] )
# Normally we'd only need to check for our SID as the SQUIT target, but Nefarious
# actually uses the uplink server as the SQUIT target.
# <- ABAAE SQ nefarious.midnight.vpn 0 :test
if split_server in ( self . sid , self . uplink ) :
raise ProtocolError ( ' SQUIT received: (reason: %s ) ' % args [ - 1 ] )
affected_users = [ ]
affected_servers = [ split_server ]
affected_nicks = collections . defaultdict ( list )
log . debug ( ' ( %s ) Splitting server %s (reason: %s ) ' , self . name , split_server , args [ - 1 ] )
if split_server not in self . servers :
log . warning ( " ( %s ) Tried to split a server ( %s ) that didn ' t exist! " , self . name , split_server )
return
# Prevent RuntimeError: dictionary changed size during iteration
old_servers = self . servers . copy ( )
old_channels = self . _channels . copy ( )
# Cycle through our list of servers. If any server's uplink is the one that is being SQUIT,
# remove them and all their users too.
for sid , data in old_servers . items ( ) :
if data . uplink == split_server :
log . debug ( ' Server %s also hosts server %s , removing those users too... ' , split_server , sid )
# Recursively run SQUIT on any other hubs this server may have been connected to.
args = self . _squit ( sid , ' SQUIT ' , [ sid , " 0 " ,
" PyLink: Automatically splitting leaf servers of %s " % sid ] )
affected_users + = args [ ' users ' ]
affected_servers + = args [ ' affected_servers ' ]
for user in self . servers [ split_server ] . users . copy ( ) :
affected_users . append ( user )
nick = self . users [ user ] . nick
# Nicks affected is channel specific for SQUIT:. This makes Clientbot's SQUIT relaying
# much easier to implement.
for name , cdata in old_channels . items ( ) :
if user in cdata . users :
affected_nicks [ name ] . append ( nick )
log . debug ( ' Removing client %s ( %s ) ' , user , nick )
self . _remove_client ( user )
serverdata = self . servers [ split_server ]
sname = serverdata . name
uplink = serverdata . uplink
del self . servers [ split_server ]
log . debug ( ' ( %s ) Netsplit affected users: %s ' , self . name , affected_users )
return { ' target ' : split_server , ' users ' : affected_users , ' name ' : sname ,
' uplink ' : uplink , ' nicks ' : affected_nicks , ' serverdata ' : serverdata ,
' channeldata ' : old_channels , ' affected_servers ' : affected_servers }
2019-12-23 07:02:07 +01:00
@staticmethod
def _log_debug_modes ( * args , * * kwargs ) :
"""
Log debug info related to mode parsing if enabled .
"""
if conf . conf [ ' pylink ' ] . get ( ' log_mode_parsers ' ) :
log . debug ( * args , * * kwargs )
2018-06-09 03:45:20 +02:00
def _parse_modes ( self , args , existing , supported_modes , is_channel = False , prefixmodes = None ,
ignore_missing_args = False ) :
2017-06-16 06:31:03 +02:00
"""
2018-02-11 02:28:04 +01:00
parse_modes ( ) core .
2015-12-07 02:40:13 +01:00
2018-02-11 02:28:04 +01:00
args : A mode string or a mode string split by space ( type list )
existing : A set or iterable of existing modes
supported_modes : a dict of PyLink supported modes ( mode names mapping
to mode chars , with * ABCD keys )
prefixmodes : a dict of prefix modes ( irc . prefixmodes style )
"""
prefix = ' '
2017-07-12 23:29:34 +02:00
if isinstance ( args , str ) :
2017-06-16 06:31:03 +02:00
# If the modestring was given as a string, split it into a list.
args = args . split ( )
2017-03-11 09:21:30 +01:00
2017-06-16 06:31:03 +02:00
assert args , ' No valid modes were supplied! '
modestring = args [ 0 ]
args = args [ 1 : ]
2017-03-11 09:21:30 +01:00
2018-02-11 02:28:04 +01:00
existing = set ( existing )
2019-08-24 06:01:55 +02:00
existing_casemap = { }
for modepair in existing :
arg = modepair [ 1 ]
if arg is not None :
existing_casemap [ ( modepair [ 0 ] , self . to_lower ( arg ) ) ] = modepair
else :
existing_casemap [ modepair ] = modepair
2016-11-03 06:34:02 +01:00
2017-06-16 06:31:03 +02:00
res = [ ]
for mode in modestring :
if mode in ' +- ' :
prefix = mode
else :
if not prefix :
prefix = ' + '
arg = None
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' Current mode: %s %s ; args left: %s ' , prefix , mode , args )
2017-06-16 06:31:03 +02:00
try :
2018-03-03 06:07:47 +01:00
if prefixmodes and mode in self . prefixmodes :
2017-06-16 06:31:03 +02:00
# We're setting a prefix mode on someone (e.g. +o user1)
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' Mode %s : This mode is a prefix mode. ' , mode )
2017-06-16 06:31:03 +02:00
arg = args . pop ( 0 )
2017-11-04 07:40:11 +01:00
# Convert nicks to UIDs implicitly
2019-08-26 21:17:07 +02:00
arg = self . _get_UID ( arg )
2017-06-16 06:31:03 +02:00
if arg not in self . users : # Target doesn't exist, skip it.
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' ( %s ) Skipping setting mode " %s %s " ; the '
' target doesn \' t seem to exist! ' , self . name ,
mode , arg )
2017-06-16 06:31:03 +02:00
continue
elif mode in ( supported_modes [ ' *A ' ] + supported_modes [ ' *B ' ] ) :
# Must have parameter.
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' Mode %s : This mode must have parameter. ' , mode )
2017-06-16 06:31:03 +02:00
arg = args . pop ( 0 )
if prefix == ' - ' :
if mode in supported_modes [ ' *B ' ] and arg == ' * ' :
# Charybdis allows unsetting +k without actually
# knowing the key by faking the argument when unsetting
# as a single "*".
# We'd need to know the real argument of +k for us to
# be able to unset the mode.
2018-02-11 02:28:04 +01:00
oldarg = dict ( existing ) . get ( mode )
2017-06-16 06:31:03 +02:00
if oldarg :
# Set the arg to the old one on the channel.
arg = oldarg
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( " Mode %s : coersing argument of ' * ' to %r . " , mode , arg )
2017-03-11 09:21:30 +01:00
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' ( %s ) parse_modes: checking if + %s %s is in old modes list: %s ; existing_casemap= %s ' , self . name , mode , arg , existing , existing_casemap )
2016-11-03 06:34:02 +01:00
2019-08-24 06:01:55 +02:00
arg = self . to_lower ( arg )
casefolded_modepair = existing_casemap . get ( ( mode , arg ) ) # Case fold arguments as needed
if casefolded_modepair not in existing :
# Ignore attempts to unset parameter modes that don't exist.
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( " ( %s ) parse_modes: ignoring removal of non-existent list mode + %s %s ; casefolded_modepair= %s " , self . name , mode , arg , casefolded_modepair )
2017-06-16 06:31:03 +02:00
continue
2019-08-24 06:01:55 +02:00
arg = casefolded_modepair [ 1 ]
2015-08-26 05:37:15 +02:00
2017-06-16 06:31:03 +02:00
elif prefix == ' + ' and mode in supported_modes [ ' *C ' ] :
# Only has parameter when setting.
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' Mode %s : Only has parameter when setting. ' , mode )
2017-06-16 06:31:03 +02:00
arg = args . pop ( 0 )
except IndexError :
2019-12-23 07:02:07 +01:00
logfunc = self . _log_debug_modes if ignore_missing_args else log . warning
2018-06-09 03:45:20 +02:00
logfunc ( ' ( %s ) Error while parsing mode %r : mode requires an '
' argument but none was found. (modestring: %r ) ' ,
self . name , mode , modestring )
2017-06-16 06:31:03 +02:00
continue # Skip this mode; don't error out completely.
2018-03-03 05:57:16 +01:00
newmode = ( prefix + mode , arg )
res . append ( newmode )
2019-08-27 00:43:16 +02:00
# Tentatively apply the new mode to the "existing" mode list. This is so queries
# like +b-b *!*@example.com *!*@example.com behave correctly
# (we can't rely on the original mode list to check whether a mode currently exists)
2018-03-03 05:57:16 +01:00
existing = self . _apply_modes ( existing , [ newmode ] , is_channel = is_channel )
2019-08-27 00:43:16 +02:00
lowered_mode = ( newmode [ 0 ] [ - 1 ] , self . to_lower ( newmode [ 1 ] ) if newmode [ 1 ] else newmode [ 1 ] )
if prefix == ' + ' and lowered_mode not in existing_casemap :
existing_casemap [ lowered_mode ] = ( mode , arg )
elif prefix == ' - ' and lowered_mode in existing_casemap :
del existing_casemap [ lowered_mode ]
2017-06-16 06:31:03 +02:00
return res
2016-01-10 04:38:27 +01:00
2018-06-09 03:45:20 +02:00
def parse_modes ( self , target , args , ignore_missing_args = False ) :
2018-02-11 02:28:04 +01:00
""" Parses a modestring list into a list of (mode, argument) tuples.
[ ' +mitl-o ' , ' 3 ' , ' person ' ] = > [ ( ' +m ' , None ) , ( ' +i ' , None ) , ( ' +t ' , None ) , ( ' +l ' , ' 3 ' ) , ( ' -o ' , ' person ' ) ]
"""
# http://www.irc.org/tech_docs/005.html
# A = Mode that adds or removes a nick or address to a list. Always has a parameter.
# B = Mode that changes a setting and always has a parameter.
# C = Mode that changes a setting and only has a parameter when set.
# D = Mode that changes a setting and never has a parameter.
2018-03-03 05:56:59 +01:00
is_channel = self . is_channel ( target )
if not is_channel :
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' ( %s ) Using self.umodes for this query: %s ' , self . name , self . umodes )
2018-02-11 02:28:04 +01:00
if target not in self . users :
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' ( %s ) Possible desync! Mode target %s is not in the users index. ' , self . name , target )
2018-02-11 02:28:04 +01:00
return [ ] # Return an empty mode list
supported_modes = self . umodes
oldmodes = self . users [ target ] . modes
2018-03-03 06:07:47 +01:00
prefixmodes = None
2018-02-11 02:28:04 +01:00
else :
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' ( %s ) Using self.cmodes for this query: %s ' , self . name , self . cmodes )
2018-02-11 02:28:04 +01:00
supported_modes = self . cmodes
oldmodes = self . _channels [ target ] . modes
2018-03-03 06:07:47 +01:00
prefixmodes = self . _channels [ target ] . prefixmodes
2018-02-11 02:28:04 +01:00
2018-03-03 05:56:59 +01:00
return self . _parse_modes ( args , oldmodes , supported_modes , is_channel = is_channel ,
2018-06-09 03:45:20 +02:00
prefixmodes = prefixmodes , ignore_missing_args = ignore_missing_args )
2018-02-11 02:28:04 +01:00
2018-03-03 05:43:05 +01:00
def _apply_modes ( self , old_modelist , changedmodes , is_channel = False ,
prefixmodes = None ) :
"""
Takes a list of parsed IRC modes , and applies them onto the given target mode list .
"""
modelist = set ( old_modelist )
2019-08-23 08:16:51 +02:00
mapping = collections . defaultdict ( set )
2016-01-23 22:13:38 +01:00
2018-03-03 05:43:05 +01:00
if is_channel :
supported_modes = self . cmodes
else :
supported_modes = self . umodes
2016-03-26 01:03:25 +01:00
2019-08-23 08:16:51 +02:00
for modepair in modelist : # Make a mapping of mode chars to values
mapping [ modepair [ 0 ] ] . add ( modepair [ 1 ] )
2017-06-16 06:31:03 +02:00
for mode in changedmodes :
# Chop off the +/- part that parse_modes gives; it's meaningless for a mode list.
try :
real_mode = ( mode [ 0 ] [ 1 ] , mode [ 1 ] )
except IndexError :
real_mode = mode
2015-08-26 05:37:15 +02:00
2018-03-03 05:47:21 +01:00
if is_channel :
if prefixmodes is not None :
# We only handle +qaohv for now. Iterate over every supported mode:
# if the IRCd supports this mode and it is the one being set, add/remove
# the person from the corresponding prefix mode list (e.g. c.prefixmodes['op']
# for ops).
for pmode , pmodelist in prefixmodes . items ( ) :
if pmode in supported_modes and real_mode [ 0 ] == supported_modes [ pmode ] :
if mode [ 0 ] [ 0 ] == ' + ' :
pmodelist . add ( mode [ 1 ] )
else :
pmodelist . discard ( mode [ 1 ] )
2017-06-16 06:31:03 +02:00
if real_mode [ 0 ] in self . prefixmodes :
2017-07-01 06:49:12 +02:00
# Don't add prefix modes to Channel.modes; they belong in the
2017-06-16 06:31:03 +02:00
# prefixmodes mapping handled above.
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' ( %s ) Not adding mode %s to Channel.modes because '
' it \' s a prefix mode. ' , self . name , str ( mode ) )
2017-06-16 06:31:03 +02:00
continue
2016-06-15 19:55:47 +02:00
2019-08-23 08:16:51 +02:00
if mode [ 0 ] [ 0 ] != ' - ' : # Adding a mode; assume add if no explicit +/- is given
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' ( %s ) Adding mode %r on %s ' , self . name , real_mode , modelist )
2019-08-23 08:16:51 +02:00
existing = mapping . get ( real_mode [ 0 ] )
if existing and real_mode [ 0 ] not in supported_modes [ ' *A ' ] :
2017-06-16 06:31:03 +02:00
# The mode we're setting takes a parameter, but is not a list mode (like +beI).
# Therefore, only one version of it can exist at a time, and we must remove
# any old modepairs using the same letter. Otherwise, we'll get duplicates when,
# for example, someone sets mode "+l 30" on a channel already set "+l 25".
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' ( %s ) Old modes for mode %r exist in %s , removing them: %s ' ,
2018-03-03 05:43:05 +01:00
self . name , real_mode , modelist , str ( existing ) )
2019-08-23 09:22:25 +02:00
while existing :
oldvalue = existing . pop ( )
2019-08-23 08:16:51 +02:00
modelist . discard ( ( real_mode [ 0 ] , oldvalue ) )
2017-06-16 06:31:03 +02:00
modelist . add ( real_mode )
2019-08-23 09:22:25 +02:00
mapping [ real_mode [ 0 ] ] . add ( real_mode [ 1 ] )
2019-08-23 08:16:51 +02:00
else : # Removing a mode
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' ( %s ) Removing mode %r from %s ' , self . name , real_mode , modelist )
2019-08-23 08:16:51 +02:00
2019-08-23 09:10:03 +02:00
existing = mapping . get ( real_mode [ 0 ] )
arg = real_mode [ 1 ]
2019-12-23 08:52:02 +01:00
# Mode requires argument for removal (case insensitive)
if real_mode [ 0 ] in ( supported_modes [ ' *A ' ] + supported_modes [ ' *B ' ] ) :
modelist . discard ( ( real_mode [ 0 ] , self . to_lower ( arg ) ) )
# Mode does not require argument for removal - remove all modes entries with the same character
else :
2019-08-23 09:22:25 +02:00
while existing :
oldvalue = existing . pop ( )
2019-08-23 09:10:03 +02:00
if arg is None or self . to_lower ( arg ) == self . to_lower ( oldvalue ) :
2019-08-23 08:16:51 +02:00
modelist . discard ( ( real_mode [ 0 ] , oldvalue ) )
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' ( %s ) Final modelist: %s ' , self . name , modelist )
2018-03-03 05:43:05 +01:00
return modelist
def apply_modes ( self , target , changedmodes ) :
""" Takes a list of parsed IRC modes, and applies them on the given target.
The target can be either a channel or a user ; this is handled automatically . """
is_channel = self . is_channel ( target )
prefixmodes = None
2017-06-16 06:31:03 +02:00
try :
2018-03-03 05:43:05 +01:00
if is_channel :
c = self . _channels [ target ]
old_modelist = c . modes
prefixmodes = c . prefixmodes
2017-06-16 06:31:03 +02:00
else :
2018-03-03 05:43:05 +01:00
old_modelist = self . users [ target ] . modes
except KeyError :
log . warning ( ' ( %s ) Possible desync? Mode target %s is unknown. ' , self . name , target )
return
modelist = self . _apply_modes ( old_modelist , changedmodes , is_channel = is_channel ,
prefixmodes = prefixmodes )
try :
if is_channel :
2017-08-25 11:11:48 +02:00
self . _channels [ target ] . modes = modelist
2018-03-03 05:43:05 +01:00
else :
self . users [ target ] . modes = modelist
2017-06-16 06:31:03 +02:00
except KeyError :
2018-03-03 05:43:05 +01:00
log . warning ( " ( %s ) Invalid MODE target %s (is_channel= %s ) " , self . name , target , is_channel )
2017-02-26 07:06:43 +01:00
2017-06-16 06:31:03 +02:00
@staticmethod
def _flip ( mode ) :
""" Flips a mode character. """
2017-11-04 07:40:11 +01:00
# Make it a list first; strings don't support item assignment
2017-06-16 06:31:03 +02:00
mode = list ( mode )
if mode [ 0 ] == ' - ' : # Query is something like "-n"
mode [ 0 ] = ' + ' # Change it to "+n"
elif mode [ 0 ] == ' + ' :
mode [ 0 ] = ' - '
else : # No prefix given, assume +
mode . insert ( 0 , ' - ' )
return ' ' . join ( mode )
2016-06-15 19:55:47 +02:00
2017-06-16 06:31:03 +02:00
def reverse_modes ( self , target , modes , oldobj = None ) :
2019-07-01 23:00:26 +02:00
"""
IRC specific : Reverses / inverts the mode string or mode list given .
2016-01-10 05:24:46 +01:00
2017-06-16 06:31:03 +02:00
Optionally , an oldobj argument can be given to look at an earlier state of
a channel / user object , e . g . for checking the op status of a mode setter
before their modes are processed and added to the channel state .
2016-01-10 05:24:46 +01:00
2017-06-16 06:31:03 +02:00
This function allows both mode strings or mode lists . Example uses :
" +mi-lk test => " - mi + lk test "
" mi-k test => " - mi + k test "
[ ( ' +m ' , None ) , ( ' +r ' , None ) , ( ' +l ' , ' 3 ' ) , ( ' -o ' , ' person ' )
2019-08-26 22:16:52 +02:00
= > [ ( ' -m ' , None ) , ( ' -r ' , None ) , ( ' -l ' , None ) , ( ' +o ' , ' person ' ) } ]
{ ( ' s ' , None ) , ( ' +o ' , ' whoever ' ) = > [ ( ' -s ' , None ) , ( ' -o ' , ' whoever ' ) } ]
2017-06-16 06:31:03 +02:00
"""
2017-07-12 23:29:34 +02:00
origstring = isinstance ( modes , str )
2017-07-14 14:50:07 +02:00
2017-06-16 06:31:03 +02:00
# If the query is a string, we have to parse it first.
2017-07-12 23:29:34 +02:00
if origstring :
2017-06-16 06:31:03 +02:00
modes = self . parse_modes ( target , modes . split ( " " ) )
2019-08-26 23:46:17 +02:00
2017-06-16 06:31:03 +02:00
# Get the current mode list first.
2017-08-29 05:13:25 +02:00
if self . is_channel ( target ) :
2017-08-25 11:11:48 +02:00
c = oldobj or self . _channels [ target ]
2017-06-16 06:31:03 +02:00
oldmodes = c . modes . copy ( )
possible_modes = self . cmodes . copy ( )
# For channels, this also includes the list of prefix modes.
possible_modes [ ' *A ' ] + = ' ' . join ( self . prefixmodes )
for name , userlist in c . prefixmodes . items ( ) :
try :
2019-08-26 23:46:06 +02:00
# Add prefix modes to the list of old modes
oldmodes | = { ( self . cmodes [ name ] , u ) for u in userlist }
2017-06-16 06:31:03 +02:00
except KeyError :
continue
else :
2019-08-26 23:46:06 +02:00
oldmodes = set ( self . users [ target ] . modes )
2017-06-16 06:31:03 +02:00
possible_modes = self . umodes
2019-08-26 23:46:06 +02:00
oldmodes_mapping = dict ( oldmodes )
oldmodes_lower = { ( modepair [ 0 ] , self . to_lower ( modepair [ 1 ] ) if modepair [ 1 ] else modepair [ 1 ] )
for modepair in oldmodes }
2017-06-16 06:31:03 +02:00
newmodes = [ ]
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' ( %s ) reverse_modes: old/current mode list for %s is: %s ' , self . name ,
target , oldmodes )
2017-06-16 06:31:03 +02:00
for char , arg in modes :
# Mode types:
# A = Mode that adds or removes a nick or address to a list. Always has a parameter.
# B = Mode that changes a setting and always has a parameter.
# C = Mode that changes a setting and only has a parameter when set.
# D = Mode that changes a setting and never has a parameter.
mchar = char [ - 1 ]
if mchar in possible_modes [ ' *B ' ] + possible_modes [ ' *C ' ] :
2017-11-04 07:40:11 +01:00
# We need to look at the current mode list to reset modes that take arguments
# For example, trying to bounce +l 30 on a channel that had +l 50 set should
# give "+l 50" and not "-l".
2019-08-26 23:46:06 +02:00
oldarg = oldmodes_mapping . get ( mchar )
2017-06-16 06:31:03 +02:00
if oldarg : # Old mode argument for this mode existed, use that.
2019-08-26 23:46:06 +02:00
mpair = ( ' + %s ' % mchar , oldarg )
2017-06-16 06:31:03 +02:00
else : # Not found, flip the mode then.
2019-08-26 23:46:06 +02:00
2017-06-16 06:31:03 +02:00
# Mode takes no arguments when unsetting.
if mchar in possible_modes [ ' *C ' ] and char [ 0 ] != ' - ' :
arg = None
mpair = ( self . _flip ( char ) , arg )
else :
mpair = ( self . _flip ( char ) , arg )
2019-08-26 23:46:06 +02:00
if arg is not None :
arg = self . to_lower ( arg )
2017-06-16 06:31:03 +02:00
if char [ 0 ] != ' - ' and ( mchar , arg ) in oldmodes :
# Mode is already set.
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( " ( %s ) reverse_modes: skipping reversing ' %s %s ' with %s since we ' re "
" setting a mode that ' s already set. " , self . name , char , arg , mpair )
2017-06-16 06:31:03 +02:00
continue
elif char [ 0 ] == ' - ' and ( mchar , arg ) not in oldmodes and mchar in possible_modes [ ' *A ' ] :
2019-08-26 23:46:06 +02:00
# We're unsetting a list or prefix mode that was never set - don't set it in response!
2017-11-04 07:40:11 +01:00
# TS6 IRCds lacks server-side verification for this and can cause annoying mode floods.
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( " ( %s ) reverse_modes: skipping reversing ' %s %s ' with %s since it "
" wasn ' t previously set. " , self . name , char , arg , mpair )
2017-06-16 06:31:03 +02:00
continue
2019-08-26 23:46:17 +02:00
elif char [ 0 ] == ' - ' and mchar not in oldmodes_mapping :
# Check the same for regular modes that previously didn't exist
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( " ( %s ) reverse_modes: skipping reversing ' %s %s ' with %s since it "
" wasn ' t previously set. " , self . name , char , arg , mpair )
2019-08-26 23:46:17 +02:00
continue
2019-08-27 01:07:29 +02:00
elif mpair in newmodes :
# Check the same for regular modes that previously didn't exist
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( " ( %s ) reverse_modes: skipping duplicate reverse mode %s " , self . name , mpair )
2019-08-27 01:07:29 +02:00
continue
2017-06-16 06:31:03 +02:00
newmodes . append ( mpair )
2017-05-27 10:27:09 +02:00
2019-12-23 07:02:07 +01:00
self . _log_debug_modes ( ' ( %s ) reverse_modes: new modes: %s ' , self . name , newmodes )
2017-07-12 23:29:34 +02:00
if origstring :
2017-06-16 06:31:03 +02:00
# If the original query is a string, send it back as a string.
return self . join_modes ( newmodes )
else :
2019-08-26 22:16:52 +02:00
return newmodes
2015-09-19 19:31:43 +02:00
2017-06-16 06:31:03 +02:00
@staticmethod
def join_modes ( modes , sort = False ) :
2019-07-01 23:00:26 +02:00
"""
IRC specific : Takes a list of ( mode , arg ) tuples in parse_modes ( ) format , and
2017-06-16 06:31:03 +02:00
joins them into a string .
2019-07-01 23:00:26 +02:00
"""
2017-06-16 06:31:03 +02:00
prefix = ' + ' # Assume we're adding modes unless told otherwise
modelist = ' '
args = [ ]
2015-12-22 19:46:34 +01:00
2017-06-16 06:31:03 +02:00
# Sort modes alphabetically like a conventional IRCd.
if sort :
modes = sorted ( modes )
2015-12-27 01:43:40 +01:00
2017-06-16 06:31:03 +02:00
for modepair in modes :
mode , arg = modepair
assert len ( mode ) in ( 1 , 2 ) , " Incorrect length of a mode (received %r ) " % mode
try :
# If the mode has a prefix, use that.
curr_prefix , mode = mode
except ValueError :
2017-11-04 07:40:11 +01:00
# If not, the current prefix stays the same as the last mode pair; move on
# to the next one.
2017-06-16 06:31:03 +02:00
pass
else :
2017-11-04 07:40:11 +01:00
# Only when the prefix of this mode isn't the same as the last one do we add
# the prefix to the mode string. This prevents '+nt-lk' from turning
2017-06-16 06:31:03 +02:00
# into '+n+t-l-k' or '+ntlk'.
if prefix != curr_prefix :
modelist + = curr_prefix
prefix = curr_prefix
modelist + = mode
if arg is not None :
args . append ( arg )
if not modelist . startswith ( ( ' + ' , ' - ' ) ) :
# Our starting mode didn't have a prefix with it. Assume '+'.
modelist = ' + ' + modelist
if args :
# Add the args if there are any.
2019-02-07 23:55:27 +01:00
modelist + = ' '
modelist + = ' ' . join ( ( str ( arg ) for arg in args ) )
2017-06-16 06:31:03 +02:00
return modelist
2015-12-27 01:43:40 +01:00
2017-06-16 06:31:03 +02:00
@classmethod
def wrap_modes ( cls , modes , limit , max_modes_per_msg = 0 ) :
"""
2019-07-01 23:00:26 +02:00
IRC specific : Takes a list of modes and wraps it across multiple lines .
2017-06-16 06:31:03 +02:00
"""
strings = [ ]
2015-12-27 01:43:40 +01:00
2017-06-16 06:31:03 +02:00
# This process is slightly trickier than just wrapping arguments, because modes create
# positional arguments that can't be separated from its character.
queued_modes = [ ]
total_length = 0
2015-12-27 01:43:40 +01:00
2017-06-16 06:31:03 +02:00
last_prefix = ' + '
orig_modes = modes . copy ( )
modes = list ( modes )
while modes :
# PyLink mode lists come in the form [('+t', None), ('-b', '*!*@someone'), ('+l', 3)]
2017-11-04 07:40:11 +01:00
# The +/- part is optional and is treated as the prefix of the last mode if not given,
# or + (adding modes) if it is the first mode in the list.
2017-06-16 06:31:03 +02:00
next_mode = modes . pop ( 0 )
2015-08-26 05:37:15 +02:00
2017-06-16 06:31:03 +02:00
modechar , arg = next_mode
prefix = modechar [ 0 ]
if prefix not in ' +- ' :
prefix = last_prefix
# Explicitly add the prefix to the mode character to prevent
2017-11-04 07:40:11 +01:00
# ambiguity when passing it to e.g. join_modes().
2017-06-16 06:31:03 +02:00
modechar = prefix + modechar
2017-11-04 07:40:11 +01:00
# XXX: because tuples are immutable, we have to replace the entire modepair...
2017-06-16 06:31:03 +02:00
next_mode = ( modechar , arg )
2017-05-27 11:21:12 +02:00
2017-06-16 06:31:03 +02:00
# Figure out the length that the next mode will add to the buffer. If we're changing
2017-11-04 07:40:11 +01:00
# from + to - (setting to removing modes) or vice versa, we'll need two characters:
# the "+" or "-" as well as the actual mode char.
2017-06-16 06:31:03 +02:00
next_length = 1
if prefix != last_prefix :
next_length + = 1
2016-08-22 01:46:57 +02:00
2017-11-04 07:40:11 +01:00
# Replace the last mode prefix with the current one for the next iteration.
2017-06-16 06:31:03 +02:00
last_prefix = prefix
2015-08-26 05:37:15 +02:00
2017-06-16 06:31:03 +02:00
if arg :
# This mode has an argument, so add the length of that and a space.
next_length + = 1
next_length + = len ( arg )
assert next_length < = limit , \
" wrap_modes: Mode %s is too long for the given length %s " % ( next_mode , limit )
# Check both message length and max. modes per msg if enabled.
if ( next_length + total_length ) < = limit and ( ( not max_modes_per_msg ) or len ( queued_modes ) < max_modes_per_msg ) :
# We can fit this mode in the next message; add it.
total_length + = next_length
2019-12-23 07:02:07 +01:00
cls . _log_debug_modes ( ' wrap_modes: Adding mode %s to queued modes ' , str ( next_mode ) )
2017-06-16 06:31:03 +02:00
queued_modes . append ( next_mode )
2019-12-23 07:02:07 +01:00
cls . _log_debug_modes ( ' wrap_modes: queued modes: %s ' , queued_modes )
2017-06-16 06:31:03 +02:00
else :
2017-11-04 07:40:11 +01:00
# Otherwise, create a new message by joining the previous queued modes into a message.
# Then, create a new message with our current mode.
2017-06-16 06:31:03 +02:00
strings . append ( cls . join_modes ( queued_modes ) )
queued_modes . clear ( )
2019-12-23 07:02:07 +01:00
cls . _log_debug_modes ( ' wrap_modes: cleared queue (length %s ) and now adding %s ' , limit , str ( next_mode ) )
2017-06-16 06:31:03 +02:00
queued_modes . append ( next_mode )
total_length = next_length
2016-08-22 01:46:57 +02:00
else :
2017-06-16 06:31:03 +02:00
# Everything fit in one line, so just use that.
strings . append ( cls . join_modes ( queued_modes ) )
2016-08-22 01:46:57 +02:00
2019-12-23 07:02:07 +01:00
cls . _log_debug_modes ( ' wrap_modes: returning %s for %s ' , strings , orig_modes )
2017-06-16 06:31:03 +02:00
return strings
2016-01-10 04:38:27 +01:00
2017-06-16 06:31:03 +02:00
def get_hostmask ( self , user , realhost = False , ip = False ) :
"""
2019-07-01 23:00:26 +02:00
Returns a representative hostmask / user friendly identifier for a user .
On IRC , this is nick ! user @host ; other platforms may choose to define a different
style for user hostmasks .
If the realhost option is given , prefer showing the real host of the user instead
of the displayed host .
If the ip option is given , prefering showing the IP address of the user ( this overrides
2017-06-16 06:31:03 +02:00
realhost ) . """
userobj = self . users . get ( user )
2016-01-01 02:28:47 +01:00
2017-06-16 06:31:03 +02:00
try :
nick = userobj . nick
except AttributeError :
nick = ' <unknown-nick> '
2016-08-27 18:50:53 +02:00
2017-06-16 06:31:03 +02:00
try :
ident = userobj . ident
except AttributeError :
ident = ' <unknown-ident> '
2016-08-27 18:50:53 +02:00
2017-06-16 06:31:03 +02:00
try :
if ip :
host = userobj . ip
elif realhost :
host = userobj . realhost
else :
host = userobj . host
except AttributeError :
host = ' <unknown-host> '
2016-01-01 02:28:47 +01:00
2017-06-16 06:31:03 +02:00
return ' %s ! %s @ %s ' % ( nick , ident , host )
2016-09-01 03:28:13 +02:00
2017-06-16 06:31:03 +02:00
def get_friendly_name ( self , entityid ) :
2017-04-01 01:25:28 +02:00
"""
2019-07-01 23:00:26 +02:00
Returns the display name of an entity :
For servers , this returns the server name given a SID .
For users , this returns a nick given the UID .
For channels , return the channel name ( returned as - is for IRC ) .
2017-04-01 01:25:28 +02:00
"""
2017-06-16 06:31:03 +02:00
if entityid in self . servers :
return self . servers [ entityid ] . name
elif entityid in self . users :
return self . users [ entityid ] . nick
2018-06-09 02:18:39 +02:00
# Return channels as-is. Remember to strip any STATUSMSG prefixes like from @#channel
elif self . is_channel ( entityid . lstrip ( ' ' . join ( self . prefixmodes . values ( ) ) ) ) :
2018-05-11 23:40:24 +02:00
return entityid
2017-04-01 01:25:28 +02:00
else :
2017-06-16 06:31:03 +02:00
raise KeyError ( " Unknown UID/SID %s " % entityid )
2016-07-01 03:22:45 +02:00
2018-05-26 09:14:04 +02:00
def is_privileged_service ( self , entityid ) :
"""
2019-07-01 23:00:26 +02:00
Returns whether the given UID and SID belongs to a privileged service .
For IRC , this reads the ' ulines ' option in the server configuration . Other platforms
may override this to suit their needs .
2018-05-26 09:14:04 +02:00
"""
ulines = self . serverdata . get ( ' ulines ' , [ ] )
if entityid in self . users :
sid = self . get_server ( entityid )
else :
sid = entityid
return self . get_friendly_name ( sid ) in ulines
2018-06-12 08:55:19 +02:00
def is_oper ( self , uid , * * kwargs ) :
2017-04-01 01:25:28 +02:00
"""
2019-07-01 23:00:26 +02:00
Returns whether the given user has operator / server administration status .
For IRC , this checks usermode + o . Other platforms may choose to define this another way .
2018-06-12 08:55:19 +02:00
The allowAuthed and allowOper keyword arguments are deprecated since PyLink 2.0 - alpha4 .
2017-04-01 01:25:28 +02:00
"""
2018-06-12 08:55:19 +02:00
if ' allowAuthed ' in kwargs or ' allowOper ' in kwargs :
log . warning ( ' ( %s ) is_oper: the " allowAuthed " and " allowOper " options are deprecated as '
' of PyLink 2.0-alpha4 and now imply False and True respectively. To check for '
' PyLink account status, instead check the User.account attribute directly. ' ,
self . name )
if uid in self . users and ( " o " , None ) in self . users [ uid ] . modes :
return True
2017-06-16 06:31:03 +02:00
return False
2016-05-01 01:57:38 +02:00
2017-06-16 06:31:03 +02:00
def match_host ( self , glob , target , ip = True , realhost = True ) :
"""
2019-07-01 23:00:26 +02:00
Checks whether the given host or given UID ' s hostmask matches the given glob
( nick ! user @host for IRC ) . PyLink extended targets are also supported .
2016-04-25 06:37:23 +02:00
2017-06-16 06:31:03 +02:00
If the target given is a UID , and the ' ip ' or ' realhost ' options are True , this will also
match against the target ' s IP address and real host, respectively.
2016-04-25 06:37:23 +02:00
2017-06-16 06:31:03 +02:00
This function respects IRC casemappings ( rfc1459 and ascii ) . If the given target is a UID ,
2019-07-01 23:00:26 +02:00
and the ' ip ' option is enabled , the host portion of the glob is also matched as a CIDR range .
2017-06-16 06:31:03 +02:00
"""
# Allow queries like !$exttarget to invert the given match.
invert = glob . startswith ( ' ! ' )
if invert :
glob = glob . lstrip ( ' ! ' )
2016-04-25 06:37:23 +02:00
2017-06-16 06:31:03 +02:00
def match_host_core ( ) :
"""
Core processor for match_host ( ) , minus the inversion check .
"""
# Work with variables in the match_host() scope, from
# http://stackoverflow.com/a/8178808
nonlocal glob
2016-04-25 06:37:23 +02:00
2017-06-16 06:31:03 +02:00
# Prepare a list of hosts to check against.
if target in self . users :
2017-08-08 01:31:17 +02:00
2017-08-29 05:13:25 +02:00
if not self . is_hostmask ( glob ) :
2017-08-08 01:31:17 +02:00
for specialchar in ' $:() ' :
# XXX: we should probably add proper rules on what's a valid account name
if specialchar in glob :
break
else :
# Implicitly convert matches for *sane* account names to "$pylinkacc:accountname".
log . debug ( ' ( %s ) Using target $pylinkacc: %s instead of raw string %r ' , self . name , glob , glob )
glob = ' $pylinkacc: ' + glob
2017-06-16 06:31:03 +02:00
if glob . startswith ( ' $ ' ) :
# Exttargets start with $. Skip regular ban matching and find the matching ban handler.
glob = glob . lstrip ( ' $ ' )
exttargetname = glob . split ( ' : ' , 1 ) [ 0 ]
handler = world . exttarget_handlers . get ( exttargetname )
2016-04-25 06:37:23 +02:00
2017-06-16 06:31:03 +02:00
if handler :
# Handler exists. Return what it finds.
result = handler ( self , glob , target )
log . debug ( ' ( %s ) Got %s from exttarget %s in match_host() glob $ %s for target %s ' ,
self . name , result , exttargetname , glob , target )
return result
else :
log . debug ( ' ( %s ) Unknown exttarget %s in match_host() glob $ %s ' , self . name ,
exttargetname , glob )
return False
2016-04-25 06:37:23 +02:00
2017-06-16 06:31:03 +02:00
hosts = { self . get_hostmask ( target ) }
2016-04-25 06:37:23 +02:00
2017-06-16 06:31:03 +02:00
if ip :
hosts . add ( self . get_hostmask ( target , ip = True ) )
2016-04-25 06:37:23 +02:00
2017-06-16 06:31:03 +02:00
# HACK: support CIDR hosts in the hosts portion
try :
header , cidrtarget = glob . split ( ' @ ' , 1 )
# Try to parse the host portion as a CIDR range
network = ipaddress . ip_network ( cidrtarget )
2016-04-25 06:37:23 +02:00
2017-06-16 06:31:03 +02:00
real_ip = self . users [ target ] . ip
if ipaddress . ip_address ( real_ip ) in network :
# If the CIDR matches, hack around the host matcher by pretending that
# the lookup target was the IP and not the CIDR range!
glob = ' @ ' . join ( ( header , real_ip ) )
2017-08-16 09:03:13 +02:00
log . debug ( ' ( %s ) Found matching CIDR %s for %s , replacing target glob with IP %s ' , self . name ,
2017-08-16 06:26:18 +02:00
cidrtarget , target , real_ip )
2017-06-16 06:31:03 +02:00
except ValueError :
pass
2016-04-25 06:37:23 +02:00
2017-06-16 06:31:03 +02:00
if realhost :
hosts . add ( self . get_hostmask ( target , realhost = True ) )
2016-04-25 06:37:23 +02:00
2017-06-16 06:31:03 +02:00
else : # We were given a host, use that.
hosts = [ target ]
2016-05-01 01:33:46 +02:00
2019-06-21 21:25:34 +02:00
# Iterate over the hosts to match, since we may have multiple (check IP/real host)
2017-06-16 06:31:03 +02:00
for host in hosts :
2019-06-21 21:25:34 +02:00
if self . match_text ( glob , host ) :
2017-06-16 06:31:03 +02:00
return True
2016-05-01 01:33:46 +02:00
2017-06-16 06:31:03 +02:00
return False
2016-05-01 01:33:46 +02:00
2017-06-16 06:31:03 +02:00
result = match_host_core ( )
if invert :
result = not result
return result
2016-05-01 01:33:46 +02:00
2018-06-09 03:21:37 +02:00
def match_text ( self , glob , text ) :
"""
2019-07-01 23:00:26 +02:00
Returns whether the given glob matches the given text under the network ' s current case mapping.
2018-06-09 03:21:37 +02:00
"""
2019-06-21 21:25:34 +02:00
return utils . match_text ( glob , text , filterfunc = self . to_lower )
2018-06-09 03:21:37 +02:00
2017-08-07 02:55:43 +02:00
def match_all ( self , banmask , channel = None ) :
"""
Returns all users matching the target hostmask / exttarget . Users can also be filtered by channel .
"""
if channel :
banmask = " $and:( %s +$channel: %s ) " % ( banmask , channel )
for uid , userobj in self . users . copy ( ) . items ( ) :
if self . match_host ( banmask , uid ) and uid in self . users :
yield uid
2017-08-07 05:02:09 +02:00
def match_all_re ( self , re_mask , channel = None ) :
"""
Returns all users whose " nick!user@host [gecos] " mask matches the given regular expression . Users can also be filtered by channel .
"""
regexp = re . compile ( re_mask )
for uid , userobj in self . users . copy ( ) . items ( ) :
target = ' %s [ %s ] ' % ( self . get_hostmask ( uid ) , userobj . realname )
if regexp . fullmatch ( target ) and ( ( not channel ) or channel in userobj . channels ) :
yield uid
2018-10-20 21:34:11 +02:00
def make_channel_ban ( self , uid , ban_type = ' ban ' , ban_style = None ) :
2017-08-07 04:21:55 +02:00
""" Creates a hostmask-based ban for the given user.
Ban exceptions , invite exceptions quiets , and extbans are also supported by setting ban_type
to the appropriate PyLink named mode ( e . g . " ban " , " banexception " , " invex " , " quiet " , " ban_nonick " ) . """
assert uid in self . users , " Unknown user %s " % uid
# FIXME: verify that this is a valid mask.
# XXX: support slicing hosts so things like *!ident@*.isp.net are possible. This is actually
# more annoying to do than it appears because of vHosts using /, IPv6 addresses
# (cloaked and uncloaked), etc.
2019-07-01 23:00:26 +02:00
# TODO: make this not specific to IRC
2018-10-20 21:34:11 +02:00
ban_style = ban_style or self . serverdata . get ( ' ban_style ' ) or \
conf . conf [ ' pylink ' ] . get ( ' ban_style ' ) or ' *!*@$host '
2017-08-07 04:21:55 +02:00
template = string . Template ( ban_style )
2018-10-20 21:29:45 +02:00
banhost = template . safe_substitute ( self . users [ uid ] . get_fields ( ) )
2018-10-20 21:31:54 +02:00
if not self . is_hostmask ( banhost ) :
raise ValueError ( " Ban mask %r is not a valid hostmask! " % banhost )
2017-08-07 04:21:55 +02:00
if ban_type in self . cmodes :
return ( ' + %s ' % self . cmodes [ ban_type ] , banhost )
elif ban_type in self . extbans_acting : # Handle extbans, which are generally "+b prefix:banmask"
return ( ' + %s ' % self . cmodes [ ' ban ' ] , self . extbans_acting [ ban_type ] + banhost )
else :
raise ValueError ( " ban_type %r is not available on IRCd %r " % ( ban_type , self . protoname ) )
2017-06-25 08:27:24 +02:00
def updateTS ( self , sender , channel , their_ts , modes = None ) :
"""
2019-07-01 23:00:26 +02:00
IRC specific : Merges modes of a channel given the remote TS and a list of modes .
2017-06-25 08:27:24 +02:00
"""
# Okay, so the situation is that we have 6 possible TS/sender combinations:
# | our TS lower | TS equal | their TS lower
# mode origin is us | OVERWRITE | MERGE | IGNORE
# mode origin is uplink | IGNORE | MERGE | OVERWRITE
if modes is None :
modes = [ ]
def _clear ( ) :
2017-06-25 11:03:12 +02:00
log . debug ( " ( %s ) Clearing local modes from channel %s due to TS change " , self . name ,
2017-06-25 08:27:24 +02:00
channel )
2017-08-25 11:11:48 +02:00
self . _channels [ channel ] . modes . clear ( )
for p in self . _channels [ channel ] . prefixmodes . values ( ) :
2017-06-25 08:27:24 +02:00
for user in p . copy ( ) :
2017-06-25 11:03:12 +02:00
if not self . is_internal_client ( user ) :
2017-06-25 08:27:24 +02:00
p . discard ( user )
def _apply ( ) :
if modes :
2017-06-25 11:03:12 +02:00
log . debug ( " ( %s ) Applying modes on channel %s (TS ok) " , self . name ,
2017-06-25 08:27:24 +02:00
channel )
2017-06-25 11:03:12 +02:00
self . apply_modes ( channel , modes )
2017-06-25 08:27:24 +02:00
# Use a lock so only one thread can change a channel's TS at once: this prevents race
2017-06-28 01:12:45 +02:00
# conditions that would otherwise desync channel modes.
with self . _ts_lock :
2017-08-25 11:11:48 +02:00
our_ts = self . _channels [ channel ] . ts
2017-07-12 23:29:34 +02:00
assert isinstance ( our_ts , int ) , " Wrong type for our_ts (expected int, got %s ) " % type ( our_ts )
assert isinstance ( their_ts , int ) , " Wrong type for their_ts (expected int, got %s ) " % type ( their_ts )
2017-06-25 08:27:24 +02:00
# Check if we're the mode sender based on the UID / SID given.
2017-06-25 11:03:12 +02:00
our_mode = self . is_internal_client ( sender ) or self . is_internal_server ( sender )
2017-06-25 08:27:24 +02:00
2017-06-25 11:03:12 +02:00
log . debug ( " ( %s / %s ) our_ts: %s ; their_ts: %s ; is the mode origin us? %s " , self . name ,
2017-06-25 08:27:24 +02:00
channel , our_ts , their_ts , our_mode )
if their_ts == our_ts :
log . debug ( " ( %s / %s ) remote TS of %s is equal to our %s ; mode query %s " ,
2017-06-25 11:03:12 +02:00
self . name , channel , their_ts , our_ts , modes )
2017-06-25 08:27:24 +02:00
# Their TS is equal to ours. Merge modes.
_apply ( )
elif ( their_ts < our_ts ) :
if their_ts < 750000 :
2018-08-20 01:41:29 +02:00
if their_ts != 0 : # Sometimes unreal sends SJOIN with 0, don't warn for those
2018-08-23 08:52:54 +02:00
if self . serverdata . get ( ' ignore_ts_errors ' ) :
log . debug ( ' ( %s ) Silently ignoring bogus TS %s on channel %s ' , self . name , their_ts , channel )
else :
log . warning ( ' ( %s ) Possible desync? Not setting bogus TS %s on channel %s ' , self . name , their_ts , channel )
2017-06-25 08:27:24 +02:00
else :
log . debug ( ' ( %s ) Resetting channel TS of %s from %s to %s (remote has lower TS) ' ,
2017-06-25 11:03:12 +02:00
self . name , channel , our_ts , their_ts )
2017-08-25 11:11:48 +02:00
self . _channels [ channel ] . ts = their_ts
2017-06-25 08:27:24 +02:00
# Remote TS was lower and we're receiving modes. Clear the modelist and apply theirs.
_clear ( )
_apply ( )
2017-07-31 14:58:02 +02:00
def _check_nick_collision ( self , nick ) :
"""
2019-07-01 23:00:26 +02:00
IRC specific : Nick collision preprocessor for user introductions .
If the given nick matches an existing UID , send out a SAVE hook payload indicating a nick collision .
2017-07-31 14:58:02 +02:00
"""
uid = self . nick_to_uid ( nick )
# If there is a nick collision, we simply alert plugins. Relay will purposely try to
# lose fights and tag nicks instead, while other plugins can choose how to handle this.
if uid :
log . info ( ' ( %s ) Nick collision on %s / %s , forwarding this to plugins ' , self . name ,
uid , nick )
self . call_hooks ( [ self . sid , ' SAVE ' , { ' target ' : uid } ] )
2019-12-23 07:11:31 +01:00
def _expandPUID ( self , entityid ) :
2017-08-30 09:56:18 +02:00
"""
Returns the nick or server name for the given UID / SID . This method helps support protocol
modules that use PUIDs internally , as they must convert them to talk with the uplink .
"""
# TODO: stop hardcoding @ as separator
2019-12-23 07:11:31 +01:00
if isinstance ( entityid , str ) and ' @ ' in entityid :
name = self . get_friendly_name ( entityid )
log . debug ( ' ( %s ) _expandPUID: mangling pseudo ID %s to %s ' , self . name , entityid , name )
return name
return entityid # Regular UID/SID, no change
2017-08-30 09:56:18 +02:00
2019-02-10 22:00:53 +01:00
def wrap_message ( self , source , target , text ) :
2018-05-11 23:38:21 +02:00
"""
Wraps the given message text into multiple lines ( length depends on how much the protocol
allows ) , and returns these as a list .
"""
# This is protocol specific, so stub it here in the base class.
raise NotImplementedError
2018-06-14 09:41:00 +02:00
# When this many pings in a row are missed, the ping timer loop will force a disconnect on the
# next cycle. Effectively the ping timeout is: pingfreq * (KEEPALIVE_MAX_MISSED + 1)
KEEPALIVE_MAX_MISSED = 2
2017-06-16 06:55:08 +02:00
class IRCNetwork ( PyLinkNetworkCoreWithUtils ) :
2017-07-08 05:13:52 +02:00
S2S_BUFSIZE = 510
2017-06-28 00:58:38 +02:00
def __init__ ( self , * args , * * kwargs ) :
super ( ) . __init__ ( * args , * * kwargs )
2017-07-13 07:56:30 +02:00
self . _queue = None
self . _ping_timer = None
self . _socket = None
2019-07-01 23:10:54 +02:00
self . _buffer = bytearray ( )
2018-03-23 01:42:28 +01:00
self . _reconnect_thread = None
2018-04-08 06:46:05 +02:00
self . _queue_thread = None
2017-06-28 00:58:38 +02:00
2017-07-14 14:22:05 +02:00
def _init_vars ( self , * args , * * kwargs ) :
super ( ) . _init_vars ( * args , * * kwargs )
2017-06-28 01:05:46 +02:00
# Set IRC specific variables for ping checking and queuing
2018-06-14 09:41:00 +02:00
self . lastping = time . time ( ) # This actually tracks the last message received as of 2.0-alpha4
2017-06-28 01:05:46 +02:00
self . pingfreq = self . serverdata . get ( ' pingfreq ' ) or 90
self . maxsendq = self . serverdata . get ( ' maxsendq ' , 4096 )
2017-07-13 07:56:30 +02:00
self . _queue = queue . Queue ( self . maxsendq )
2017-06-28 01:05:46 +02:00
2017-06-27 11:53:09 +02:00
def _schedule_ping ( self ) :
2017-06-16 06:31:03 +02:00
""" Schedules periodic pings in a loop. """
2017-07-05 07:09:50 +02:00
self . _ping_uplink ( )
2016-05-01 01:33:46 +02:00
2018-04-08 06:46:05 +02:00
if self . _aborted . is_set ( ) :
return
2018-06-14 09:41:00 +02:00
elapsed = time . time ( ) - self . lastping
if elapsed > ( self . pingfreq * KEEPALIVE_MAX_MISSED ) :
log . error ( ' ( %s ) Disconnected from IRC: Ping timeout ( %d secs) ' , self . name , elapsed )
self . disconnect ( )
return
2017-07-13 07:56:30 +02:00
self . _ping_timer = threading . Timer ( self . pingfreq , self . _schedule_ping )
self . _ping_timer . daemon = True
self . _ping_timer . name = ' Ping timer loop for %s ' % self . name
self . _ping_timer . start ( )
2016-07-01 04:52:06 +02:00
2017-06-16 06:31:03 +02:00
log . debug ( ' ( %s ) Ping scheduled at %s ' , self . name , time . time ( ) )
2017-07-13 07:02:40 +02:00
def _log_connection_error ( self , * args , * * kwargs ) :
# Log connection errors to ERROR unless were shutting down (in which case,
# the given text goes to DEBUG).
2017-08-31 22:36:46 +02:00
if self . _aborted . is_set ( ) or world . shutting_down . is_set ( ) :
2017-07-13 07:02:40 +02:00
log . debug ( * args , * * kwargs )
else :
log . error ( * args , * * kwargs )
2018-06-14 07:52:37 +02:00
def _make_ssl_context ( self ) :
"""
Returns a ssl . SSLContext instance appropriate for this connection .
"""
2018-06-16 00:27:42 +02:00
context = ssl . create_default_context ( )
# Use the ssl-should-verify protocol capability to determine whether we should
# accept invalid certs by default. Generally, cert validation is OFF for server protocols
# and ON for client-based protocols like clientbot
if self . serverdata . get ( ' ssl_accept_invalid_certs ' , not self . has_cap ( " ssl-should-verify " ) ) :
# Note: check_hostname has to be off to set verify_mode to CERT_NONE,
# since it's possible for the remote link to not provide a cert at all
context . check_hostname = False
context . verify_mode = ssl . CERT_NONE
else :
# Otherwise, only check cert hostname if the target is a hostname OR we have
# ssl-should-verify defined
context . check_hostname = self . serverdata . get ( ' ssl_validate_hostname ' ,
self . has_cap ( " ssl-should-verify " ) or
2019-12-23 08:11:58 +01:00
utils . get_hostname_type ( self . serverdata [ ' ip ' ] ) == 0 )
2018-06-14 07:52:37 +02:00
return context
def _setup_ssl ( self ) :
"""
Initializes SSL / TLS for this network .
"""
log . info ( ' ( %s ) Using TLS/SSL for this connection... ' , self . name )
certfile = self . serverdata . get ( ' ssl_certfile ' )
keyfile = self . serverdata . get ( ' ssl_keyfile ' )
context = self . _make_ssl_context ( )
# Cert and key files are optional, load them if specified.
if certfile and keyfile :
try :
context . load_cert_chain ( certfile , keyfile )
except OSError :
log . exception ( ' ( %s ) Caught OSError trying to initialize the SSL connection; '
' are " ssl_certfile " and " ssl_keyfile " set correctly? ' ,
self . name )
raise
2018-06-15 11:47:12 +02:00
self . _socket = context . wrap_socket ( self . _socket , server_hostname = self . serverdata . get ( ' ip ' ) )
2018-06-14 07:52:37 +02:00
def _verify_ssl ( self ) :
"""
2018-06-16 00:57:45 +02:00
Implements additional SSL / TLS verifications ( so far , only certificate fingerprints when enabled ) .
2018-06-14 07:52:37 +02:00
"""
peercert = self . _socket . getpeercert ( binary_form = True )
# Hash type is configurable using the ssl_fingerprint_type
# value, and defaults to sha256.
hashtype = self . serverdata . get ( ' ssl_fingerprint_type ' , ' sha256 ' ) . lower ( )
try :
hashfunc = getattr ( hashlib , hashtype )
except AttributeError :
raise conf . ConfigurationError ( ' Unsupported or invalid TLS/SSL certificate fingerprint type %r ' ,
hashtype )
else :
expected_fp = self . serverdata . get ( ' ssl_fingerprint ' )
2018-06-16 00:50:32 +02:00
if expected_fp and peercert is None :
raise ssl . CertificateError ( ' TLS/SSL certificate fingerprint checking is enabled but the uplink '
' did not provide a certificate ' )
fp = hashfunc ( peercert ) . hexdigest ( )
2018-06-14 07:52:37 +02:00
if expected_fp :
if fp != expected_fp :
# SSL Fingerprint doesn't match; break.
2018-06-16 00:46:04 +02:00
raise ssl . CertificateError ( ' Uplink TLS/SSL certificate fingerprint ( %s : %r ) does not '
' match the one configured ( %s : %r ) ' % ( hashtype , fp , hashtype , expected_fp ) )
2018-06-14 07:52:37 +02:00
else :
log . info ( ' ( %s ) Uplink TLS/SSL certificate fingerprint '
2018-06-16 00:52:04 +02:00
' verified ( %s : %r ) ' , self . name , hashtype , fp )
2018-06-16 00:57:45 +02:00
elif hasattr ( self . _socket , ' context ' ) and self . _socket . context . verify_mode == ssl . CERT_NONE :
2018-06-14 07:52:37 +02:00
log . info ( ' ( %s ) Uplink \' s TLS/SSL certificate fingerprint ( %s ) '
' is %r . You can enhance the security of your '
' link by specifying this in a " ssl_fingerprint " '
' option in your server block. ' , self . name ,
hashtype , fp )
2018-04-08 06:22:18 +02:00
def _connect ( self ) :
2017-06-16 06:31:03 +02:00
"""
2018-03-17 19:01:32 +01:00
Connects to the network .
2017-06-16 06:31:03 +02:00
"""
2018-03-17 19:01:32 +01:00
self . _pre_connect ( )
2017-01-02 21:08:22 +01:00
2021-12-25 09:16:26 +01:00
remote = self . serverdata [ " ip " ]
2018-03-17 19:01:32 +01:00
port = self . serverdata [ " port " ]
try :
2021-12-25 09:16:26 +01:00
if ' bindhost ' in self . serverdata :
# Try detecting the socket type from the bindhost if specified.
force_ipv6 = utils . get_hostname_type ( self . serverdata [ ' bindhost ' ] ) == 2
else :
force_ipv6 = self . serverdata . get ( " ipv6 " ) # ternary value (None = use system default)
2018-06-16 03:43:00 +02:00
2021-12-25 09:16:26 +01:00
if force_ipv6 is True :
dns_stype = socket . AF_INET6
elif force_ipv6 is False :
dns_stype = socket . AF_INET
else :
dns_stype = socket . AF_UNSPEC
2018-06-16 03:43:00 +02:00
2021-12-25 09:16:26 +01:00
dns_result = socket . getaddrinfo ( remote , port , family = dns_stype ) [ 0 ]
ip = dns_result [ - 1 ] [ 0 ]
2018-03-17 19:01:32 +01:00
2021-12-25 09:16:26 +01:00
log . debug ( ' ( %s ) Resolving address %s to %s (force_ipv6= %s ) ' , self . name , remote , ip , force_ipv6 )
# Create the actual socket.
self . _socket = socket . socket ( dns_result [ 0 ] )
2018-03-17 19:01:32 +01:00
# Set the socket bind if applicable.
if ' bindhost ' in self . serverdata :
self . _socket . bind ( ( self . serverdata [ ' bindhost ' ] , 0 ) )
# Enable SSL if set to do so.
self . ssl = self . serverdata . get ( ' ssl ' )
if self . ssl :
2018-06-14 07:52:37 +02:00
self . _setup_ssl ( )
2018-06-16 03:30:21 +02:00
elif not ipaddress . ip_address ( ip ) . is_loopback :
log . warning ( ' ( %s ) This connection will be made via plain text, which is vulnerable '
' to man-in-the-middle (MITM) attacks and passive eavesdropping. Consider '
' enabling TLS/SSL with either certificate validation or fingerprint '
' pinning to better secure your network traffic. ' , self . name )
2018-03-17 19:01:32 +01:00
log . info ( " Connecting to network %r on %s : %s " , self . name , ip , port )
2018-03-24 04:21:49 +01:00
2018-04-08 06:22:18 +02:00
self . _socket . settimeout ( self . pingfreq )
2018-06-14 08:04:15 +02:00
# Start the actual connection
self . _socket . connect ( ( ip , port ) )
2018-04-08 06:22:18 +02:00
if self not in world . networkobjects . values ( ) :
log . debug ( " ( %s ) _connect: disconnecting socket %s as the network was removed " ,
self . name , self . _socket )
try :
self . _socket . shutdown ( socket . SHUT_RDWR )
finally :
self . _socket . close ( )
return
2018-04-12 19:51:34 +02:00
# Make sure future reads never block, since select doesn't always guarantee this.
self . _socket . setblocking ( False )
2020-06-19 04:00:18 +02:00
selectdriver . register ( self )
2018-03-17 19:01:32 +01:00
2018-06-14 07:52:37 +02:00
if self . ssl :
self . _verify_ssl ( )
2018-03-17 19:01:32 +01:00
2018-06-14 07:52:37 +02:00
self . _queue_thread = threading . Thread ( name = " Queue thread for %s " % self . name ,
target = self . _process_queue , daemon = True )
self . _queue_thread . start ( )
self . sid = self . serverdata . get ( " sid " )
# All our checks passed, get the protocol module to connect and run the listen
# loop. This also updates any SID values should the protocol module do so.
self . post_connect ( )
log . info ( ' ( %s ) Enumerating our own SID %s ' , self . name , self . sid )
host = self . hostname ( )
self . servers [ self . sid ] = Server ( self , None , host , internal = True ,
desc = self . serverdata . get ( ' serverdesc ' )
or conf . conf [ ' pylink ' ] [ ' serverdesc ' ] )
log . info ( ' ( %s ) Starting ping schedulers.... ' , self . name )
self . _schedule_ping ( )
log . info ( ' ( %s ) Server ready; listening for data. ' , self . name )
self . autoconnect_active_multiplier = 1 # Reset any extra autoconnect delays
2016-05-01 01:44:37 +02:00
2018-06-14 08:04:15 +02:00
# _run_irc() or the protocol module it called raised an exception, meaning we've disconnected
except :
2018-03-17 19:01:32 +01:00
self . _log_connection_error ( ' ( %s ) Disconnected from IRC: ' , self . name , exc_info = True )
2017-12-04 02:46:24 +01:00
if not self . _aborted . is_set ( ) :
self . disconnect ( )
2018-04-08 06:22:18 +02:00
def connect ( self ) :
"""
Starts a thread to connect the network .
"""
connect_thread = threading . Thread ( target = self . _connect , daemon = True ,
name = " Connect thread for %s " %
self . name )
connect_thread . start ( )
2017-06-16 06:31:03 +02:00
def disconnect ( self ) :
""" Handle disconnects from the remote server. """
2018-03-31 20:53:01 +02:00
if self . _aborted . is_set ( ) :
return
2017-06-17 01:49:45 +02:00
self . _pre_disconnect ( )
2017-02-18 22:32:48 +01:00
2018-04-08 07:12:17 +02:00
# Stop the queue thread.
if self . _queue is not None :
try :
# XXX: queue.Queue.queue isn't actually documented, so this is probably not reliable in the long run.
with self . _queue . mutex :
self . _queue . queue [ 0 ] = None
except IndexError :
self . _queue . put ( None )
2017-07-13 07:56:30 +02:00
if self . _socket is not None :
2018-03-29 23:04:58 +02:00
try :
selectdriver . unregister ( self )
except KeyError :
pass
2017-07-13 07:50:16 +02:00
try :
2018-04-08 06:46:05 +02:00
log . debug ( ' ( %s ) disconnect: shutting down read half of socket %s ' , self . name , self . _socket )
self . _socket . shutdown ( socket . SHUT_RD )
except :
log . debug ( ' ( %s ) Error on socket shutdown: ' , self . name , exc_info = True )
2016-05-01 01:54:11 +02:00
2018-04-08 07:12:17 +02:00
log . debug ( ' ( %s ) disconnect: waiting for write half of socket %s to shutdown ' , self . name , self . _socket )
2018-04-08 06:46:05 +02:00
# Wait for the write half to shut down when applicable.
if self . _queue_thread is None or self . _aborted_send . wait ( 10 ) :
log . debug ( ' ( %s ) disconnect: closing socket %s ' , self . name , self . _socket )
self . _socket . close ( )
2016-05-01 01:54:11 +02:00
2017-06-16 06:31:03 +02:00
# Stop the ping timer.
2017-07-13 07:56:30 +02:00
if self . _ping_timer :
2017-06-16 06:31:03 +02:00
log . debug ( ' ( %s ) Canceling pingTimer at %s due to disconnect() call ' , self . name , time . time ( ) )
2017-07-13 07:56:30 +02:00
self . _ping_timer . cancel ( )
2019-07-01 23:10:54 +02:00
self . _buffer . clear ( )
2017-06-17 01:49:45 +02:00
self . _post_disconnect ( )
2018-03-29 23:04:58 +02:00
# Clear old sockets.
self . _socket = None
2018-03-24 04:21:49 +01:00
self . _start_reconnect ( )
2017-05-05 04:04:03 +02:00
2018-03-24 04:21:49 +01:00
def _start_reconnect ( self ) :
""" Schedules a reconnection to the network. """
2018-03-17 23:26:36 +01:00
def _reconnect ( ) :
# _run_autoconnect() will block and return True after the autoconnect
2018-03-24 04:21:49 +01:00
# delay has passed, if autoconnect is disabled. We do not want it to
# block whatever is calling disconnect() though, so we run it in a new
# thread.
2018-03-17 23:26:36 +01:00
if self . _run_autoconnect ( ) :
self . connect ( )
2018-03-23 01:42:28 +01:00
2018-04-08 06:22:18 +02:00
if self not in world . networkobjects . values ( ) :
log . debug ( ' ( %s ) _start_reconnect: Stopping reconnect timer as the network was removed ' , self . name )
return
elif self . _reconnect_thread is None or not self . _reconnect_thread . is_alive ( ) :
2018-03-23 01:42:28 +01:00
self . _reconnect_thread = threading . Thread ( target = _reconnect , name = " Reconnecting network %s " % self . name )
self . _reconnect_thread . start ( )
2018-03-24 04:21:49 +01:00
else :
log . debug ( ' ( %s ) Ignoring attempt to reschedule reconnect as one is in progress. ' , self . name )
2018-03-17 19:01:32 +01:00
2017-07-31 05:09:08 +02:00
def handle_events ( self , line ) :
raise NotImplementedError
def parse_irc_command ( self , line ) :
""" Sends a command to the protocol module. """
log . debug ( " ( %s ) <- %s " , self . name , line )
2019-09-11 04:46:28 +02:00
if not line :
log . warning ( " ( %s ) Got empty line %r from IRC? " , self . name , line )
return
2017-07-31 05:09:08 +02:00
try :
hook_args = self . handle_events ( line )
except Exception :
log . exception ( ' ( %s ) Caught error in handle_events, disconnecting! ' , self . name )
log . error ( ' ( %s ) The offending line was: <- %s ' , self . name , line )
2017-08-06 07:14:44 +02:00
self . disconnect ( )
2017-07-31 05:09:08 +02:00
return
# Only call our hooks if there's data to process. Handlers that support
# hooks will return a dict of parsed arguments, which can be passed on
# to plugins and the like. For example, the JOIN handler will return
# something like: {'channel': '#whatever', 'users': ['UID1', 'UID2',
# 'UID3']}, etc.
if hook_args is not None :
self . call_hooks ( hook_args )
return hook_args
2017-06-27 10:44:26 +02:00
def _run_irc ( self ) :
2018-03-17 19:01:32 +01:00
"""
Message handler , called when select ( ) has data to read .
"""
2018-05-19 04:08:37 +02:00
if self . _socket is None :
log . debug ( ' ( %s ) Ignoring attempt to read data because self._socket is None ' , self . name )
return
2019-07-01 23:10:54 +02:00
data = bytearray ( )
2018-03-17 19:01:32 +01:00
try :
data = self . _socket . recv ( 2048 )
2018-03-17 19:03:58 +01:00
except ( BlockingIOError , ssl . SSLWantReadError , ssl . SSLWantWriteError ) :
2018-04-12 19:51:34 +02:00
log . debug ( ' ( %s ) No data to read, trying again later... ' , self . name , exc_info = True )
return
2018-03-17 19:01:32 +01:00
except OSError :
# Suppress socket read warnings from lingering recv() calls if
# we've been told to shutdown.
if self . _aborted . is_set ( ) :
2017-06-16 06:31:03 +02:00
return
2018-03-17 19:01:32 +01:00
raise
2018-03-17 20:18:16 +01:00
self . _buffer + = data
2018-03-17 19:01:32 +01:00
if not data :
self . _log_connection_error ( ' ( %s ) Connection lost, disconnecting. ' , self . name )
self . disconnect ( )
return
2016-07-07 09:25:50 +02:00
2018-03-17 20:18:16 +01:00
while b ' \n ' in self . _buffer :
line , self . _buffer = self . _buffer . split ( b ' \n ' , 1 )
2018-03-17 19:01:32 +01:00
line = line . strip ( b ' \r ' )
line = line . decode ( self . encoding , " replace " )
self . parse_irc_command ( line )
2016-07-07 08:11:36 +02:00
2018-06-14 09:41:00 +02:00
# Update the last message received time
self . lastping = time . time ( )
2017-06-16 06:31:03 +02:00
def _send ( self , data ) :
""" Sends raw text to the uplink server. """
2020-09-29 20:49:43 +02:00
if self . _aborted . is_set ( ) or self . _socket is None :
2018-03-30 19:47:34 +02:00
log . debug ( " ( %s ) Not sending message %r since the connection is dead " , self . name , data )
return
2017-06-16 06:31:03 +02:00
# Safeguard against newlines in input!! Otherwise, each line gets
# treated as a separate command, which is particularly nasty.
data = data . replace ( ' \n ' , ' ' )
2017-07-17 03:46:56 +02:00
encoded_data = data . encode ( self . encoding , ' replace ' )
if self . S2S_BUFSIZE > 0 : # Apply message cutoff as needed
encoded_data = encoded_data [ : self . S2S_BUFSIZE ]
encoded_data + = b " \r \n "
2017-05-05 04:04:03 +02:00
2017-06-16 06:31:03 +02:00
log . debug ( " ( %s ) -> %s " , self . name , data )
2017-05-05 04:04:03 +02:00
2020-09-29 19:43:38 +02:00
while True :
try :
self . _socket . send ( encoded_data )
except ( BlockingIOError , ssl . SSLWantReadError , ssl . SSLWantWriteError ) :
# The send attempt failed, wait a little bit.
# I would prefer using a blocking socket and MSG_DONTWAIT in recv()'s flags
# but SSLSocket doesn't support that...
throttle_time = self . serverdata . get ( ' throttle_time ' , 0 )
if self . _aborted . wait ( throttle_time ) :
break
continue
except :
log . exception ( " ( %s ) Failed to send message %r ; aborting! " , self . name , data )
self . disconnect ( )
2021-01-10 19:28:34 +01:00
return
2020-09-29 19:43:38 +02:00
else :
break
2016-07-07 08:11:36 +02:00
2017-06-16 06:31:03 +02:00
def send ( self , data , queue = True ) :
""" send() wrapper with optional queueing support. """
2017-08-06 07:16:39 +02:00
if self . _aborted . is_set ( ) :
log . debug ( ' ( %s ) refusing to queue data %r as self._aborted is set ' , self . name , data )
2017-06-16 06:31:03 +02:00
return
if queue :
# XXX: we don't really know how to handle blocking queues yet, so
# it's better to not expose that yet.
2018-10-09 01:25:53 +02:00
try :
self . _queue . put_nowait ( data )
except QUEUE_FULL :
log . error ( ' ( %s ) Max SENDQ exceeded ( %s ), disconnecting! ' , self . name , self . _queue . maxsize )
self . disconnect ( )
raise
2017-06-16 06:31:03 +02:00
else :
self . _send ( data )
2017-05-05 04:04:03 +02:00
2017-06-27 11:53:09 +02:00
def _process_queue ( self ) :
2017-06-16 06:31:03 +02:00
""" Loop to process outgoing queue data. """
while True :
2018-10-11 07:39:30 +02:00
throttle_time = self . serverdata . get ( ' throttle_time ' , 0 )
2017-08-06 07:16:39 +02:00
if not self . _aborted . wait ( throttle_time ) :
2017-07-13 07:56:30 +02:00
data = self . _queue . get ( )
2017-06-16 06:31:03 +02:00
if data is None :
log . debug ( ' ( %s ) Stopping queue thread due to getting None as item ' , self . name )
break
2017-08-06 07:11:22 +02:00
elif self not in world . networkobjects . values ( ) :
log . debug ( ' ( %s ) Stopping stale queue thread; no longer matches world.networkobjects ' , self . name )
break
2018-03-30 19:47:34 +02:00
elif self . _aborted . is_set ( ) :
# The _aborted flag may have changed while we were waiting for an item,
# so check for it again.
log . debug ( ' ( %s ) Stopping queue thread since the connection is dead ' , self . name )
2018-04-08 06:46:05 +02:00
break
2017-06-16 06:31:03 +02:00
elif data :
self . _send ( data )
else :
break
2018-04-08 06:46:05 +02:00
# Once we're done here, shut down the write part of the socket.
if self . _socket :
log . debug ( ' ( %s ) _process_queue: shutting down write half of socket %s ' , self . name , self . _socket )
self . _socket . shutdown ( socket . SHUT_WR )
self . _aborted_send . set ( )
2018-05-11 23:38:21 +02:00
def wrap_message ( self , source , target , text ) :
"""
Wraps the given message text into multiple lines , and returns these as a list .
For IRC , the maximum length of one message is calculated as S2S_BUFSIZE ( default to 510 )
minus the length of " :sender-nick!sender-user@sender-host PRIVMSG #target : "
"""
2018-05-19 04:09:06 +02:00
# We explicitly want wrapping (e.g. for messages eventually making its way to a user), so
# use the default bufsize of 510 even if the IRCd's S2S protocol allows infinitely long
# long messages.
bufsize = self . S2S_BUFSIZE or IRCNetwork . S2S_BUFSIZE
2018-05-11 23:46:47 +02:00
try :
target = self . get_friendly_name ( target )
except KeyError :
log . warning ( ' ( %s ) Possible desync? Error while expanding wrap_message target %r '
' (source= %s ) ' , self . name , target , source , exc_info = True )
2018-05-11 23:38:21 +02:00
prefixstr = " : %s PRIVMSG %s : " % ( self . get_hostmask ( source ) , target )
2018-05-19 04:09:06 +02:00
maxlen = bufsize - len ( prefixstr )
2018-05-11 23:38:21 +02:00
2018-05-19 04:09:06 +02:00
log . debug ( ' ( %s ) wrap_message: length of prefix %r is %s , bufsize= %s , maxlen= %s ' ,
self . name , prefixstr , len ( prefixstr ) , bufsize , maxlen )
if maxlen < = 0 :
log . error ( ' ( %s ) Got invalid maxlen %s for wrap_message ( %s -> %s ) ' , self . name , maxlen ,
source , target )
return [ text ]
2018-05-11 23:38:21 +02:00
return textwrap . wrap ( text , width = maxlen )
2017-06-16 06:55:08 +02:00
Irc = IRCNetwork
2016-07-07 08:11:36 +02:00
2017-06-16 06:54:40 +02:00
class Server ( ) :
2015-12-07 02:40:13 +01:00
""" PyLink IRC server class.
2015-06-22 00:00:33 +02:00
2017-08-30 10:26:35 +02:00
irc : the protocol / network object this Server instance is attached to .
uplink : The SID of this Server instance ' s uplink. This is set to None
for * * both * * the main PyLink server and our uplink .
2015-06-22 00:00:33 +02:00
name : The name of the server .
2017-08-30 10:26:35 +02:00
internal : Boolean , whether the server is an internal PyLink server .
desc : Sets the server description if relevant .
2015-06-22 00:00:33 +02:00
"""
2016-01-10 04:15:39 +01:00
2017-08-25 22:53:45 +02:00
def __init__ ( self , irc , uplink , name , internal = False , desc = " (None given) " ) :
2015-06-07 07:17:45 +02:00
self . uplink = uplink
2015-08-29 21:35:06 +02:00
self . users = set ( )
2015-06-22 00:00:33 +02:00
self . internal = internal
2019-02-17 01:32:42 +01:00
if isinstance ( name , str ) :
self . name = name . lower ( )
else :
self . name = name
2015-09-12 19:39:05 +02:00
self . desc = desc
2017-08-25 22:53:45 +02:00
self . _irc = irc
2016-01-10 04:15:39 +01:00
2017-09-24 07:35:55 +02:00
assert uplink is None or uplink in self . _irc . servers , " Unknown uplink %s " % uplink
if uplink is None :
self . hopcount = 1
else :
self . hopcount = self . _irc . servers [ uplink ] . hopcount + 1
2017-08-31 04:16:54 +02:00
# Has the server finished bursting yet?
self . has_eob = False
2015-06-07 07:17:45 +02:00
def __repr__ ( self ) :
2017-07-01 06:49:12 +02:00
return ' Server( %s ) ' % self . name
2017-08-25 22:53:45 +02:00
2017-06-16 06:54:40 +02:00
IrcServer = Server
2015-06-07 08:04:11 +02:00
2018-05-26 10:00:04 +02:00
class Channel ( TSObject , structures . CamelCaseToSnakeCase , structures . CopyWrapper ) :
2015-12-07 02:40:13 +01:00
""" PyLink IRC channel class. """
2017-07-01 06:34:08 +02:00
2017-08-25 22:53:45 +02:00
def __init__ ( self , irc , name = None ) :
2018-05-26 10:00:04 +02:00
super ( ) . __init__ ( )
2015-12-07 02:40:13 +01:00
# Initialize variables, such as the topic, user list, TS, who's opped, etc.
2015-06-07 18:43:13 +02:00
self . users = set ( )
2017-01-01 09:37:12 +01:00
self . modes = set ( )
2015-07-07 00:33:23 +02:00
self . topic = ' '
2016-03-20 01:25:04 +01:00
self . prefixmodes = { ' op ' : set ( ) , ' halfop ' : set ( ) , ' voice ' : set ( ) ,
' owner ' : set ( ) , ' admin ' : set ( ) }
2017-08-25 22:53:45 +02:00
self . _irc = irc
2015-07-05 21:48:39 +02:00
2015-12-07 02:40:13 +01:00
# Determines whether a topic has been set here or not. Protocol modules
# should set this.
self . topicset = False
2016-03-20 01:01:39 +01:00
# Saves the channel name (may be useful to plugins, etc.)
self . name = name
2015-06-07 08:04:11 +02:00
def __repr__ ( self ) :
2017-07-01 06:49:12 +02:00
return ' Channel( %s ) ' % self . name
2015-07-04 03:07:01 +02:00
2017-07-01 06:34:08 +02:00
def remove_user ( self , target ) :
2015-12-07 02:40:13 +01:00
""" Removes a user from a channel. """
2015-07-05 21:48:39 +02:00
for s in self . prefixmodes . values ( ) :
s . discard ( target )
self . users . discard ( target )
2017-07-01 06:34:08 +02:00
removeuser = remove_user
2015-07-05 21:48:39 +02:00
2017-07-01 06:34:08 +02:00
def is_voice ( self , uid ) :
2016-03-20 01:32:32 +01:00
""" Returns whether the given user is voice in the channel. """
return uid in self . prefixmodes [ ' voice ' ]
2017-07-01 06:34:08 +02:00
def is_halfop ( self , uid ) :
2016-03-20 01:32:32 +01:00
""" Returns whether the given user is halfop in the channel. """
return uid in self . prefixmodes [ ' halfop ' ]
2017-07-01 06:34:08 +02:00
def is_op ( self , uid ) :
2016-03-20 01:32:32 +01:00
""" Returns whether the given user is op in the channel. """
return uid in self . prefixmodes [ ' op ' ]
2017-07-01 06:34:08 +02:00
def is_admin ( self , uid ) :
2016-03-20 01:32:32 +01:00
""" Returns whether the given user is admin (&) in the channel. """
return uid in self . prefixmodes [ ' admin ' ]
2017-07-01 06:34:08 +02:00
def is_owner ( self , uid ) :
2016-03-20 01:32:32 +01:00
""" Returns whether the given user is owner (~) in the channel. """
return uid in self . prefixmodes [ ' owner ' ]
2017-07-01 06:34:08 +02:00
def is_voice_plus ( self , uid ) :
2016-03-20 01:37:38 +01:00
""" Returns whether the given user is voice or above in the channel. """
# If the user has any prefix mode, it has to be voice or greater.
2019-08-23 07:53:02 +02:00
return bool ( self . get_prefix_modes ( uid ) )
2016-03-20 01:37:38 +01:00
2017-07-01 06:34:08 +02:00
def is_halfop_plus ( self , uid ) :
2016-03-20 01:37:38 +01:00
""" Returns whether the given user is halfop or above in the channel. """
for mode in ( ' halfop ' , ' op ' , ' admin ' , ' owner ' ) :
if uid in self . prefixmodes [ mode ] :
return True
return False
2017-07-01 06:34:08 +02:00
def is_op_plus ( self , uid ) :
2016-03-20 01:37:38 +01:00
""" Returns whether the given user is op or above in the channel. """
for mode in ( ' op ' , ' admin ' , ' owner ' ) :
if uid in self . prefixmodes [ mode ] :
return True
return False
2016-07-12 00:21:17 +02:00
@staticmethod
2017-07-01 06:34:08 +02:00
def sort_prefixes ( key ) :
2016-07-12 00:21:17 +02:00
"""
2017-10-15 11:16:18 +02:00
Returns a numeric value for a named prefix mode : higher ranks have lower values
( sorted first ) , and lower ranks have higher values ( sorted last ) .
This function essentially implements a sorted ( ) key function for named prefix modes .
2016-07-12 00:21:17 +02:00
"""
2017-10-15 10:28:21 +02:00
values = { ' owner ' : 0 , ' admin ' : 100 , ' op ' : 200 , ' halfop ' : 300 , ' voice ' : 500 }
2016-07-12 00:21:17 +02:00
2017-10-15 10:28:21 +02:00
# Default to highest value (1000) for unknown modes, should they appear.
2016-07-12 00:21:17 +02:00
return values . get ( key , 1000 )
2017-07-01 06:34:08 +02:00
def get_prefix_modes ( self , uid , prefixmodes = None ) :
2017-10-15 11:16:18 +02:00
"""
Returns a list of all named prefix modes the user has in the channel , in
2017-12-07 21:06:38 +01:00
decreasing order from owner to voice .
2016-03-20 01:54:18 +01:00
Optionally , a prefixmodes argument can be given to look at an earlier state of
the channel ' s prefix modes mapping, e.g. for checking the op status of a mode
setter before their modes are processed and added to the channel state .
"""
2016-03-20 01:32:32 +01:00
if uid not in self . users :
2016-03-20 02:00:44 +01:00
raise KeyError ( " User %s does not exist or is not in the channel " % uid )
2016-03-20 01:32:32 +01:00
result = [ ]
2016-03-20 01:54:18 +01:00
prefixmodes = prefixmodes or self . prefixmodes
2016-03-20 01:32:32 +01:00
2016-03-20 01:54:18 +01:00
for mode , modelist in prefixmodes . items ( ) :
2016-03-20 01:32:32 +01:00
if uid in modelist :
result . append ( mode )
2017-07-01 06:49:12 +02:00
return sorted ( result , key = self . sort_prefixes )
2017-06-16 06:54:40 +02:00
IrcChannel = Channel
2017-08-29 04:42:10 +02:00
class PUIDGenerator ( ) :
"""
Pseudo UID Generator module , using a prefix and a simple counter .
"""
def __init__ ( self , prefix , start = 0 ) :
self . prefix = prefix
self . counter = start
def next_uid ( self , prefix = ' ' ) :
"""
Generates the next PUID .
"""
uid = ' %s @ %s ' % ( prefix or self . prefix , self . counter )
self . counter + = 1
return uid
next_sid = next_uid