3
0
mirror of https://github.com/ergochat/ergo.git synced 2024-11-29 15:40:02 +01:00

fix a race in regenerateMembersCache

The rationale for why regenerateMembersCache didn't need to hold the Lock()
throughout was subtly wrong. It is true that at least some attempt to
regenerate the cache would see *all* the updates. However, it was possible for
the value of `result` generated by that attempt to lose the race for the final
assignment `channel.membersCache = result`.

The fix is to serialize the attempts to regenerate the cache, without adding
any additional locking on the underlying `Channel` fields via
`Channel.stateMutex`. This ensures that the final read from `Channel.members`
is paired with the final write to `Channel.membersCache`.
This commit is contained in:
Shivaram Lingamneni 2017-11-07 14:38:18 -05:00
parent f9d8d1a4f9
commit 60b861e07e

View File

@ -24,6 +24,7 @@ type Channel struct {
key string key string
members MemberSet members MemberSet
membersCache []*Client // allow iteration over channel members without holding the lock membersCache []*Client // allow iteration over channel members without holding the lock
membersCacheMutex sync.Mutex // tier 2; see `regenerateMembersCache`
name string name string
nameCasefolded string nameCasefolded string
server *Server server *Server
@ -67,10 +68,15 @@ func NewChannel(s *Server, name string, addDefaultModes bool) *Channel {
} }
func (channel *Channel) regenerateMembersCache() { func (channel *Channel) regenerateMembersCache() {
// this is eventually consistent even without holding the writable Lock() // this is eventually consistent even without holding stateMutex.Lock()
// throughout the update; all updates to `members` while holding Lock() // throughout the update; all updates to `members` while holding Lock()
// have a serial order, so the call to `regenerateMembersCache` that // have a serial order, so the call to `regenerateMembersCache` that
// happens-after the last one will see *all* the updates // happens-after the last one will see *all* the updates. then,
// `membersCacheMutex` ensures that this final read is correctly paired
// with the final write to `membersCache`.
channel.membersCacheMutex.Lock()
defer channel.membersCacheMutex.Unlock()
channel.stateMutex.RLock() channel.stateMutex.RLock()
result := make([]*Client, len(channel.members)) result := make([]*Client, len(channel.members))
i := 0 i := 0