3
0
mirror of https://github.com/ergochat/ergo.git synced 2024-11-22 20:09:41 +01:00

Merge pull request #160 from slingamn/cache.4

fix a race in regenerateMembersCache
This commit is contained in:
Shivaram Lingamneni 2017-11-08 22:18:25 -05:00 committed by GitHub
commit d5832bf765
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -19,20 +19,21 @@ import (
// Channel represents a channel that clients can join. // Channel represents a channel that clients can join.
type Channel struct { type Channel struct {
flags ModeSet flags ModeSet
lists map[Mode]*UserMaskSet lists map[Mode]*UserMaskSet
key string key string
members MemberSet members MemberSet
membersCache []*Client // allow iteration over channel members without holding the lock membersCache []*Client // allow iteration over channel members without holding the lock
name string membersCacheMutex sync.Mutex // tier 2; see `regenerateMembersCache`
nameCasefolded string name string
server *Server nameCasefolded string
createdTime time.Time server *Server
stateMutex sync.RWMutex createdTime time.Time
topic string stateMutex sync.RWMutex
topicSetBy string topic string
topicSetTime time.Time topicSetBy string
userLimit uint64 topicSetTime time.Time
userLimit uint64
} }
// NewChannel creates a new channel from a `Server` and a `name` // NewChannel creates a new channel from a `Server` and a `name`
@ -67,10 +68,15 @@ func NewChannel(s *Server, name string, addDefaultModes bool) *Channel {
} }
func (channel *Channel) regenerateMembersCache() { func (channel *Channel) regenerateMembersCache() {
// this is eventually consistent even without holding the writable Lock() // this is eventually consistent even without holding stateMutex.Lock()
// throughout the update; all updates to `members` while holding Lock() // throughout the update; all updates to `members` while holding Lock()
// have a serial order, so the call to `regenerateMembersCache` that // have a serial order, so the call to `regenerateMembersCache` that
// happens-after the last one will see *all* the updates // happens-after the last one will see *all* the updates. then,
// `membersCacheMutex` ensures that this final read is correctly paired
// with the final write to `membersCache`.
channel.membersCacheMutex.Lock()
defer channel.membersCacheMutex.Unlock()
channel.stateMutex.RLock() channel.stateMutex.RLock()
result := make([]*Client, len(channel.members)) result := make([]*Client, len(channel.members))
i := 0 i := 0