Unverified Commit 60b07e92 authored by Steven Allen's avatar Steven Allen Committed by GitHub

feat: simplify broadcast cancel logic (#399)

Instead of tracking offsets, just create a "new" slice starting with the
broadcast cancel slice. Under the covers, this will just use the same memory
over and over.
parent 6d9c17eb
...@@ -200,20 +200,17 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { ...@@ -200,20 +200,17 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) {
// Create a buffer to use for filtering cancels per peer, with the // Create a buffer to use for filtering cancels per peer, with the
// broadcast wants at the front of the buffer (broadcast wants are sent to // broadcast wants at the front of the buffer (broadcast wants are sent to
// all peers) // all peers)
i := 0 broadcastCancels := make([]cid.Cid, 0, len(cancelKs))
cancelsBuff := make([]cid.Cid, len(cancelKs))
for _, c := range cancelKs { for _, c := range cancelKs {
if pwm.broadcastWants.Has(c) { if pwm.broadcastWants.Has(c) {
cancelsBuff[i] = c broadcastCancels = append(broadcastCancels, c)
i++
} }
} }
broadcastKsCount := i
// Send cancels to a particular peer // Send cancels to a particular peer
send := func(p peer.ID, pws *peerWant) { send := func(p peer.ID, pws *peerWant) {
// Start the index into the buffer after the broadcast wants // Start from the broadcast cancels
i = broadcastKsCount toCancel := broadcastCancels
// For each key to be cancelled // For each key to be cancelled
for _, c := range cancelKs { for _, c := range cancelKs {
...@@ -235,18 +232,17 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { ...@@ -235,18 +232,17 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) {
// If it's a broadcast want, we've already added it to // If it's a broadcast want, we've already added it to
// the peer cancels. // the peer cancels.
if !pwm.broadcastWants.Has(c) { if !pwm.broadcastWants.Has(c) {
cancelsBuff[i] = c toCancel = append(toCancel, c)
i++
} }
} }
// Send cancels to the peer // Send cancels to the peer
if i > 0 { if len(toCancel) > 0 {
pws.peerQueue.AddCancels(cancelsBuff[:i]) pws.peerQueue.AddCancels(toCancel)
} }
} }
if broadcastKsCount > 0 { if len(broadcastCancels) > 0 {
// If a broadcast want is being cancelled, send the cancel to all // If a broadcast want is being cancelled, send the cancel to all
// peers // peers
for p, pws := range pwm.peerWants { for p, pws := range pwm.peerWants {
...@@ -273,7 +269,7 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { ...@@ -273,7 +269,7 @@ func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) {
} }
// Remove cancelled broadcast wants // Remove cancelled broadcast wants
for _, c := range cancelsBuff[:broadcastKsCount] { for _, c := range broadcastCancels {
pwm.broadcastWants.Remove(c) pwm.broadcastWants.Remove(c)
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment