// package kbucket implements a kademlia 'k-bucket' routing table. package kbucket import ( "encoding/binary" "errors" "fmt" "math/rand" "sync" "time" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" mh "github.com/multiformats/go-multihash" logging "github.com/ipfs/go-log" ) var log = logging.Logger("table") var ErrPeerRejectedHighLatency = errors.New("peer rejected; latency too high") var ErrPeerRejectedNoCapacity = errors.New("peer rejected; insufficient capacity") // maxCplForRefresh is the maximum cpl we support for refresh. // This limit exists because we can only generate 'maxCplForRefresh' bit prefixes for now. const maxCplForRefresh uint = 15 // CplRefresh contains a CPL(common prefix length) with the host & the last time // we refreshed that cpl/searched for an ID which has that cpl with the host. type CplRefresh struct { Cpl uint LastRefreshAt time.Time } // RoutingTable defines the routing table. type RoutingTable struct { // ID of the local peer local ID // Blanket lock, refine later for better performance tabLock sync.RWMutex // latency metrics metrics peerstore.Metrics // Maximum acceptable latency for peers in this cluster maxLatency time.Duration // kBuckets define all the fingers to other nodes. Buckets []*Bucket bucketsize int cplRefreshLk sync.RWMutex cplRefreshedAt map[uint]time.Time // notification functions PeerRemoved func(peer.ID) PeerAdded func(peer.ID) } // NewRoutingTable creates a new routing table with a given bucketsize, local ID, and latency tolerance. func NewRoutingTable(bucketsize int, localID ID, latency time.Duration, m peerstore.Metrics) *RoutingTable { rt := &RoutingTable{ Buckets: []*Bucket{newBucket()}, bucketsize: bucketsize, local: localID, maxLatency: latency, metrics: m, cplRefreshedAt: make(map[uint]time.Time), PeerRemoved: func(peer.ID) {}, PeerAdded: func(peer.ID) {}, } return rt } // GetTrackedCplsForRefresh returns the Cpl's we are tracking for refresh. // Caller is free to modify the returned slice as it is a defensive copy. func (rt *RoutingTable) GetTrackedCplsForRefresh() []CplRefresh { rt.cplRefreshLk.RLock() defer rt.cplRefreshLk.RUnlock() cpls := make([]CplRefresh, len(rt.cplRefreshedAt)) i := 0 for c, t := range rt.cplRefreshedAt { cpls[i] = CplRefresh{c, t} i++ } return cpls } // GenRandPeerID generates a random peerID for a given Cpl func (rt *RoutingTable) GenRandPeerID(targetCpl uint) (peer.ID, error) { if targetCpl > maxCplForRefresh { return "", fmt.Errorf("cannot generate peer ID for Cpl greater than %d", maxCplForRefresh) } localPrefix := binary.BigEndian.Uint16(rt.local) // For host with ID `L`, an ID `K` belongs to a bucket with ID `B` ONLY IF CommonPrefixLen(L,K) is EXACTLY B. // Hence, to achieve a targetPrefix `T`, we must toggle the (T+1)th bit in L & then copy (T+1) bits from L // to our randomly generated prefix. toggledLocalPrefix := localPrefix ^ (uint16(0x8000) >> targetCpl) randPrefix := uint16(rand.Uint32()) // Combine the toggled local prefix and the random bits at the correct offset // such that ONLY the first `targetCpl` bits match the local ID. mask := (^uint16(0)) << (16 - (targetCpl + 1)) targetPrefix := (toggledLocalPrefix & mask) | (randPrefix & ^mask) // Convert to a known peer ID. key := keyPrefixMap[targetPrefix] id := [34]byte{mh.SHA2_256, 32} binary.BigEndian.PutUint32(id[2:], key) return peer.ID(id[:]), nil } // ResetCplRefreshedAtForID resets the refresh time for the Cpl of the given ID. func (rt *RoutingTable) ResetCplRefreshedAtForID(id ID, newTime time.Time) { cpl := CommonPrefixLen(id, rt.local) if uint(cpl) > maxCplForRefresh { return } rt.cplRefreshLk.Lock() defer rt.cplRefreshLk.Unlock() rt.cplRefreshedAt[uint(cpl)] = newTime } // Update adds or moves the given peer to the front of its respective bucket func (rt *RoutingTable) Update(p peer.ID) (evicted peer.ID, err error) { peerID := ConvertPeerID(p) cpl := CommonPrefixLen(peerID, rt.local) rt.tabLock.Lock() defer rt.tabLock.Unlock() bucketID := cpl if bucketID >= len(rt.Buckets) { bucketID = len(rt.Buckets) - 1 } bucket := rt.Buckets[bucketID] if bucket.Has(p) { // If the peer is already in the table, move it to the front. // This signifies that it it "more active" and the less active nodes // Will as a result tend towards the back of the list bucket.MoveToFront(p) return "", nil } if rt.metrics.LatencyEWMA(p) > rt.maxLatency { // Connection doesnt meet requirements, skip! return "", ErrPeerRejectedHighLatency } // We have enough space in the bucket (whether spawned or grouped). if bucket.Len() < rt.bucketsize { bucket.PushFront(p) rt.PeerAdded(p) return "", nil } if bucketID == len(rt.Buckets)-1 { // if the bucket is too large and this is the last bucket (i.e. wildcard), unfold it. rt.nextBucket() // the structure of the table has changed, so let's recheck if the peer now has a dedicated bucket. bucketID = cpl if bucketID >= len(rt.Buckets) { bucketID = len(rt.Buckets) - 1 } bucket = rt.Buckets[bucketID] if bucket.Len() >= rt.bucketsize { // if after all the unfolding, we're unable to find room for this peer, scrap it. return "", ErrPeerRejectedNoCapacity } bucket.PushFront(p) rt.PeerAdded(p) return "", nil } return "", ErrPeerRejectedNoCapacity } // Remove deletes a peer from the routing table. This is to be used // when we are sure a node has disconnected completely. func (rt *RoutingTable) Remove(p peer.ID) { peerID := ConvertPeerID(p) cpl := CommonPrefixLen(peerID, rt.local) rt.tabLock.Lock() defer rt.tabLock.Unlock() bucketID := cpl if bucketID >= len(rt.Buckets) { bucketID = len(rt.Buckets) - 1 } bucket := rt.Buckets[bucketID] if bucket.Remove(p) { rt.PeerRemoved(p) } } func (rt *RoutingTable) nextBucket() { // This is the last bucket, which allegedly is a mixed bag containing peers not belonging in dedicated (unfolded) buckets. // _allegedly_ is used here to denote that *all* peers in the last bucket might feasibly belong to another bucket. // This could happen if e.g. we've unfolded 4 buckets, and all peers in folded bucket 5 really belong in bucket 8. bucket := rt.Buckets[len(rt.Buckets)-1] newBucket := bucket.Split(len(rt.Buckets)-1, rt.local) rt.Buckets = append(rt.Buckets, newBucket) // The newly formed bucket still contains too many peers. We probably just unfolded a empty bucket. if newBucket.Len() >= rt.bucketsize { // Keep unfolding the table until the last bucket is not overflowing. rt.nextBucket() } } // Find a specific peer by ID or return nil func (rt *RoutingTable) Find(id peer.ID) peer.ID { srch := rt.NearestPeers(ConvertPeerID(id), 1) if len(srch) == 0 || srch[0] != id { return "" } return srch[0] } // NearestPeer returns a single peer that is nearest to the given ID func (rt *RoutingTable) NearestPeer(id ID) peer.ID { peers := rt.NearestPeers(id, 1) if len(peers) > 0 { return peers[0] } log.Debugf("NearestPeer: Returning nil, table size = %d", rt.Size()) return "" } // NearestPeers returns a list of the 'count' closest peers to the given ID func (rt *RoutingTable) NearestPeers(id ID, count int) []peer.ID { // This is the number of bits _we_ share with the key. All peers in this // bucket share cpl bits with us and will therefore share at least cpl+1 // bits with the given key. +1 because both the target and all peers in // this bucket differ from us in the cpl bit. cpl := CommonPrefixLen(id, rt.local) // It's assumed that this also protects the buckets. rt.tabLock.RLock() // Get bucket index or last bucket if cpl >= len(rt.Buckets) { cpl = len(rt.Buckets) - 1 } pds := peerDistanceSorter{ peers: make([]peerDistance, 0, count+rt.bucketsize), target: id, } // Add peers from the target bucket (cpl+1 shared bits). pds.appendPeersFromList(rt.Buckets[cpl].list) // If we're short, add peers from buckets to the right until we have // enough. All buckets to the right share exactly cpl bits (as opposed // to the cpl+1 bits shared by the peers in the cpl bucket). // // Unfortunately, to be completely correct, we can't just take from // buckets until we have enough peers because peers because _all_ of // these peers will be ~2**(256-cpl) from us. // // However, we're going to do that anyways as it's "good enough" for i := cpl + 1; i < len(rt.Buckets) && pds.Len() < count; i++ { pds.appendPeersFromList(rt.Buckets[i].list) } // If we're still short, add in buckets that share _fewer_ bits. We can // do this bucket by bucket because each bucket will share 1 fewer bit // than the last. // // * bucket cpl-1: cpl-2 shared bits. // * bucket cpl-2: cpl-3 shared bits. // ... for i := cpl - 1; i >= 0 && pds.Len() < count; i-- { pds.appendPeersFromList(rt.Buckets[i].list) } rt.tabLock.RUnlock() // Sort by distance to local peer pds.sort() if count < pds.Len() { pds.peers = pds.peers[:count] } out := make([]peer.ID, 0, pds.Len()) for _, p := range pds.peers { out = append(out, p.p) } return out } // Size returns the total number of peers in the routing table func (rt *RoutingTable) Size() int { var tot int rt.tabLock.RLock() for _, buck := range rt.Buckets { tot += buck.Len() } rt.tabLock.RUnlock() return tot } // ListPeers takes a RoutingTable and returns a list of all peers from all buckets in the table. func (rt *RoutingTable) ListPeers() []peer.ID { var peers []peer.ID rt.tabLock.RLock() for _, buck := range rt.Buckets { peers = append(peers, buck.Peers()...) } rt.tabLock.RUnlock() return peers } // Print prints a descriptive statement about the provided RoutingTable func (rt *RoutingTable) Print() { fmt.Printf("Routing Table, bs = %d, Max latency = %d\n", rt.bucketsize, rt.maxLatency) rt.tabLock.RLock() for i, b := range rt.Buckets { fmt.Printf("\tbucket: %d\n", i) b.lk.RLock() for e := b.list.Front(); e != nil; e = e.Next() { p := e.Value.(peer.ID) fmt.Printf("\t\t- %s %s\n", p.Pretty(), rt.metrics.LatencyEWMA(p).String()) } b.lk.RUnlock() } rt.tabLock.RUnlock() }